Skip to content
Toggle navigation
P
Projects
G
Groups
S
Snippets
Help
FORMUS3IC_LAS3
/
embb
This project
Loading...
Sign in
Toggle navigation
Go to a project
Project
Repository
Issues
0
Merge Requests
0
Pipelines
Wiki
Members
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Commit
579bdb09
authored
Nov 06, 2015
by
Christian Kern
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'development' into embb18_cmake_error_on_doxygen_warning
parents
a01fc717
b88f17ae
Hide whitespace changes
Inline
Side-by-side
Showing
39 changed files
with
1006 additions
and
1663 deletions
+1006
-1663
CHANGELOG.md
+0
-31
CMakeLists.txt
+2
-11
README.md
+3
-5
algorithms_cpp/include/embb/algorithms/invoke.h
+19
-10
algorithms_cpp/test/invoke_test.cc
+1
-20
base_c/src/condition_variable.c
+2
-2
base_c/src/internal/thread_index.c
+1
-16
base_c/test/condition_var_test.cc
+7
-7
base_c/test/time_test.cc
+0
-17
base_c/test/time_test.h
+1
-6
base_cpp/include/embb/base/atomic.h
+1
-1
base_cpp/include/embb/base/internal/atomic/atomic_base.h
+2
-1
base_cpp/include/embb/base/internal/atomic/atomic_pointer.h
+4
-4
base_cpp/include/embb/base/internal/mutex-inl.h
+2
-3
base_cpp/include/embb/base/mutex.h
+2
-2
base_cpp/test/mutex_test.cc
+6
-14
containers_cpp/include/embb/containers/internal/hazard_pointer-inl.h
+357
-331
containers_cpp/include/embb/containers/internal/hazard_pointer.h
+415
-202
containers_cpp/include/embb/containers/internal/lock_free_mpmc_queue-inl.h
+12
-11
containers_cpp/include/embb/containers/internal/lock_free_stack-inl.h
+8
-7
containers_cpp/include/embb/containers/internal/lock_free_tree_value_pool-inl.h
+22
-61
containers_cpp/include/embb/containers/internal/object_pool-inl.h
+11
-14
containers_cpp/include/embb/containers/internal/wait_free_array_value_pool-inl.h
+8
-30
containers_cpp/include/embb/containers/lock_free_mpmc_queue.h
+7
-18
containers_cpp/include/embb/containers/lock_free_stack.h
+5
-17
containers_cpp/include/embb/containers/lock_free_tree_value_pool.h
+7
-22
containers_cpp/include/embb/containers/object_pool.h
+7
-13
containers_cpp/include/embb/containers/wait_free_array_value_pool.h
+14
-55
containers_cpp/test/hazard_pointer_test.cc
+47
-416
containers_cpp/test/hazard_pointer_test.h
+16
-96
containers_cpp/test/main.cc
+0
-2
dataflow_cpp/test/dataflow_cpp_test_simple.cc
+4
-2
mtapi_c/src/embb_mtapi_id_pool_t.c
+2
-2
mtapi_c/test/embb_mtapi_test_id_pool.cc
+0
-120
mtapi_c/test/embb_mtapi_test_id_pool.h
+0
-78
mtapi_c/test/main.cc
+0
-6
mtapi_cpp/CMakeLists.txt
+4
-0
tasks_cpp/CMakeLists.txt
+3
-0
tasks_cpp/test/tasks_cpp_test_task.cc
+4
-10
No files found.
CHANGELOG.md
View file @
579bdb09
Embedded Multicore Building Blocks (EMB²)
Embedded Multicore Building Blocks (EMB²)
=========================================
=========================================
Version 0.3.1
-------------
### Features:
-
None
### Changes and improvements:
-
Removed one function argument from algorithms::Invoke
-
Added "explicit" specifier to base type constructor of Atomic
<BaseType
*
>
-
Added "const" qualifier to dereference operator and member access operator of AtomicPointer
<>
-
Changed AtomicBase
<>
::CompareAndSwap to atomically return expected value
-
Replaced constant in dataflow_cpp_test_simple.cc with corresponding macro
-
Added initialization of atomic variable in hazard_pointer_test.cc to avoid warning with GCC 5.1
-
Changed initial value of allocated_object_from_different_thread
-
Added tests for ID Pool and check for memory leaks
-
Updated unit test for the UniqueLock::Swap
### Bug fixes:
-
Fixed implementation of ID pool (provided fewer elements than specified by capacity)
-
Fixed unsigned overflow bug in timed wait function of condition variables
-
Fixed implementation of UniqueLock::Swap
### Build system:
-
Improved CMake output for automatic initialization option
-
Fixed cpplint and unsigned/signed warnings
### Documentation:
-
Fixed documentation of UniqueLock class
-
Updated README file
Version 0.3.0
Version 0.3.0
-------------
-------------
...
...
CMakeLists.txt
View file @
579bdb09
...
@@ -28,7 +28,7 @@ cmake_minimum_required (VERSION 2.8.9)
...
@@ -28,7 +28,7 @@ cmake_minimum_required (VERSION 2.8.9)
# Version number
# Version number
set
(
EMBB_BASE_VERSION_MAJOR 0
)
set
(
EMBB_BASE_VERSION_MAJOR 0
)
set
(
EMBB_BASE_VERSION_MINOR 3
)
set
(
EMBB_BASE_VERSION_MINOR 3
)
set
(
EMBB_BASE_VERSION_PATCH
1
)
set
(
EMBB_BASE_VERSION_PATCH
0
)
# Fix compilation for CMake versions >= 3.1
# Fix compilation for CMake versions >= 3.1
#
#
...
@@ -59,9 +59,7 @@ IF(NOT OpenCL_FOUND)
...
@@ -59,9 +59,7 @@ IF(NOT OpenCL_FOUND)
MESSAGE
(
STATUS
"OpenCL is not there, will build without MTAPI OpenCL Plugin."
)
MESSAGE
(
STATUS
"OpenCL is not there, will build without MTAPI OpenCL Plugin."
)
ENDIF
()
ENDIF
()
# give the user the possibility, to append compiler flags
set
(
CMAKE_CXX_FLAGS
"
${
CMAKE_CXX_FLAGS
}
${
EXTRA_CMAKE_CXX_FLAGS
}
"
)
set
(
CMAKE_C_FLAGS
"
${
CMAKE_C_FLAGS
}
${
EXTRA_CMAKE_C_FLAGS
}
"
)
if
(
NOT CMAKE_BUILD_TYPE
)
if
(
NOT CMAKE_BUILD_TYPE
)
set
(
CMAKE_BUILD_TYPE
"Release"
CACHE STRING
set
(
CMAKE_BUILD_TYPE
"Release"
CACHE STRING
...
@@ -102,13 +100,6 @@ else()
...
@@ -102,13 +100,6 @@ else()
endif
()
endif
()
message
(
" (set with command line option -DWARNINGS_ARE_ERRORS=ON/OFF)"
)
message
(
" (set with command line option -DWARNINGS_ARE_ERRORS=ON/OFF)"
)
if
(
USE_AUTOMATIC_INITIALIZATION STREQUAL ON
)
message
(
"-- MTAPI/Tasks automatic initialization enabled (default)"
)
else
()
message
(
"-- MTAPI/Tasks automatic initialization disabled"
)
endif
()
message
(
" (set with command line option -DUSE_AUTOMATIC_INITIALIZATION=ON/OFF)"
)
include
(
CMakeCommon/SetCompilerFlags.cmake
)
include
(
CMakeCommon/SetCompilerFlags.cmake
)
SetGNUCompilerFlags
(
compiler_libs compiler_flags
)
SetGNUCompilerFlags
(
compiler_libs compiler_flags
)
SetVisualStudioCompilerFlags
(
compiler_libs compiler_flags
)
SetVisualStudioCompilerFlags
(
compiler_libs compiler_flags
)
...
...
README.md
View file @
579bdb09
...
@@ -270,8 +270,8 @@ If you want to use the C++ functionalities of EMB², you have to link the
...
@@ -270,8 +270,8 @@ If you want to use the C++ functionalities of EMB², you have to link the
following libraries (names will be different on Windows and on Linux) in the
following libraries (names will be different on Windows and on Linux) in the
given order:
given order:
embb_
dataflow_cpp, embb_algorithms
_cpp, embb_containers_cpp,
embb_
base, embb_base_cpp, embb_mtapi_c, embb_mtapi
_cpp, embb_containers_cpp,
embb_
mtapi_cpp, embb_mtapi_c, embb_base_cpp, embb_base_c
embb_
algorithms_cpp, embb_dataflow_cpp
The C++ header files can be included as follows:
The C++ header files can be included as follows:
...
@@ -284,7 +284,7 @@ The C++ header files can be included as follows:
...
@@ -284,7 +284,7 @@ The C++ header files can be included as follows:
The following libraries have to be linked in the given order:
The following libraries have to be linked in the given order:
embb_
mtapi_c, embb_base
_c
embb_
base_c, mtapi
_c
The C header files can be included as follows:
The C header files can be included as follows:
...
@@ -323,8 +323,6 @@ Known Bugs and Limitations
...
@@ -323,8 +323,6 @@ Known Bugs and Limitations
is bounded by a predefined but modifiable constant (see functions
is bounded by a predefined but modifiable constant (see functions
embb_thread_get_max_count() / embb_thread_set_max_count() and class
embb_thread_get_max_count() / embb_thread_set_max_count() and class
embb::base::Thread).
embb::base::Thread).
-
While MTAPI fully supports heterogeneous systems, the algorithms and
dataflow components are currently limited to homogeneous systems.
Development and Contribution
Development and Contribution
...
...
algorithms_cpp/include/embb/algorithms/invoke.h
View file @
579bdb09
...
@@ -49,37 +49,33 @@ typedef embb::base::Function<void> InvokeFunctionType;
...
@@ -49,37 +49,33 @@ typedef embb::base::Function<void> InvokeFunctionType;
#ifdef DOXYGEN
#ifdef DOXYGEN
/**
/**
* Spawns
two
to ten function objects at once and runs them in parallel.
* Spawns
one
to ten function objects at once and runs them in parallel.
*
*
* Blocks until all of them are done.
* Blocks until all of them are done.
*
*
* \ingroup CPP_ALGORITHMS_INVOKE
* \ingroup CPP_ALGORITHMS_INVOKE
*/
*/
template
<
typename
Function1
,
typename
Function2
,
...
>
template
<
typename
Function1
,
...
>
void
Invoke
(
void
Invoke
(
Function1
func1
,
Function1
func1
,
/**< [in] First function object to invoke */
/**< [in] First function object to invoke */
Function2
func2
,
/**< [in] Second function object to invoke */
...);
...);
/**
/**
* Spawns
two
to ten function objects at once and runs them in parallel using the
* Spawns
one
to ten function objects at once and runs them in parallel using the
* given embb::mtapi::ExecutionPolicy.
* given embb::mtapi::ExecutionPolicy.
*
*
* Blocks until all of them are done.
* Blocks until all of them are done.
*
*
* \ingroup CPP_ALGORITHMS_INVOKE
* \ingroup CPP_ALGORITHMS_INVOKE
*/
*/
template
<
typename
Function1
,
typename
Function2
,
...
>
template
<
typename
Function1
,
...
>
void
Invoke
(
void
Invoke
(
Function1
func1
,
Function1
func1
,
/**< [in] Function object to invoke */
/**< [in] Function object to invoke */
Function2
func2
,
/**< [in] Second function object to invoke */
...,
...,
const
embb
::
tasks
::
ExecutionPolicy
&
policy
const
embb
::
mtapi
::
ExecutionPolicy
&
policy
/**< [in] embb::
tasks
::ExecutionPolicy to use */
/**< [in] embb::
mtapi
::ExecutionPolicy to use */
);
);
#else // DOXYGEN
#else // DOXYGEN
...
@@ -122,6 +118,13 @@ class TaskWrapper {
...
@@ -122,6 +118,13 @@ class TaskWrapper {
};
};
}
// namespace internal
}
// namespace internal
template
<
typename
Function1
>
void
Invoke
(
Function1
func1
,
const
embb
::
tasks
::
ExecutionPolicy
&
policy
)
{
internal
::
TaskWrapper
<
Function1
>
wrap1
(
func1
,
policy
);
}
template
<
typename
Function1
,
typename
Function2
>
template
<
typename
Function1
,
typename
Function2
>
void
Invoke
(
void
Invoke
(
Function1
func1
,
Function1
func1
,
...
@@ -287,6 +290,12 @@ template<typename Function1, typename Function2, typename Function3,
...
@@ -287,6 +290,12 @@ template<typename Function1, typename Function2, typename Function3,
internal
::
TaskWrapper
<
Function10
>
wrap10
(
func10
,
policy
);
internal
::
TaskWrapper
<
Function10
>
wrap10
(
func10
,
policy
);
}
}
template
<
typename
Function1
>
void
Invoke
(
Function1
func1
)
{
Invoke
(
func1
,
embb
::
tasks
::
ExecutionPolicy
());
}
template
<
typename
Function1
,
typename
Function2
>
template
<
typename
Function1
,
typename
Function2
>
void
Invoke
(
void
Invoke
(
Function1
func1
,
Function1
func1
,
...
...
algorithms_cpp/test/invoke_test.cc
View file @
579bdb09
...
@@ -44,6 +44,7 @@ static void Invocable10() {}
...
@@ -44,6 +44,7 @@ static void Invocable10() {}
void
InvokeTest
::
Test
()
{
void
InvokeTest
::
Test
()
{
using
embb
::
algorithms
::
Invoke
;
using
embb
::
algorithms
::
Invoke
;
Invoke
(
&
Invocable1
);
Invoke
(
&
Invocable1
,
&
Invocable2
);
Invoke
(
&
Invocable1
,
&
Invocable2
);
Invoke
(
&
Invocable1
,
&
Invocable2
,
&
Invocable3
);
Invoke
(
&
Invocable1
,
&
Invocable2
,
&
Invocable3
);
Invoke
(
&
Invocable1
,
&
Invocable2
,
&
Invocable3
,
&
Invocable4
);
Invoke
(
&
Invocable1
,
&
Invocable2
,
&
Invocable3
,
&
Invocable4
);
...
@@ -60,24 +61,4 @@ void InvokeTest::Test() {
...
@@ -60,24 +61,4 @@ void InvokeTest::Test() {
&
Invocable6
,
&
Invocable7
,
&
Invocable8
,
&
Invocable9
);
&
Invocable6
,
&
Invocable7
,
&
Invocable8
,
&
Invocable9
);
Invoke
(
&
Invocable1
,
&
Invocable2
,
&
Invocable3
,
&
Invocable4
,
&
Invocable5
,
Invoke
(
&
Invocable1
,
&
Invocable2
,
&
Invocable3
,
&
Invocable4
,
&
Invocable5
,
&
Invocable6
,
&
Invocable7
,
&
Invocable8
,
&
Invocable9
,
&
Invocable10
);
&
Invocable6
,
&
Invocable7
,
&
Invocable8
,
&
Invocable9
,
&
Invocable10
);
embb
::
tasks
::
ExecutionPolicy
policy
;
Invoke
(
&
Invocable1
,
&
Invocable2
,
policy
);
Invoke
(
&
Invocable1
,
&
Invocable2
,
&
Invocable3
,
policy
);
Invoke
(
&
Invocable1
,
&
Invocable2
,
&
Invocable3
,
&
Invocable4
,
policy
);
Invoke
(
&
Invocable1
,
&
Invocable2
,
&
Invocable3
,
&
Invocable4
,
&
Invocable5
,
policy
);
Invoke
(
&
Invocable1
,
&
Invocable2
,
&
Invocable3
,
&
Invocable4
,
&
Invocable5
,
&
Invocable6
,
policy
);
Invoke
(
&
Invocable1
,
&
Invocable2
,
&
Invocable3
,
&
Invocable4
,
&
Invocable5
,
&
Invocable6
,
&
Invocable7
,
policy
);
Invoke
(
&
Invocable1
,
&
Invocable2
,
&
Invocable3
,
&
Invocable4
,
&
Invocable5
,
&
Invocable6
,
&
Invocable7
,
&
Invocable8
,
policy
);
Invoke
(
&
Invocable1
,
&
Invocable2
,
&
Invocable3
,
&
Invocable4
,
&
Invocable5
,
&
Invocable6
,
&
Invocable7
,
&
Invocable8
,
&
Invocable9
,
policy
);
Invoke
(
&
Invocable1
,
&
Invocable2
,
&
Invocable3
,
&
Invocable4
,
&
Invocable5
,
&
Invocable6
,
&
Invocable7
,
&
Invocable8
,
&
Invocable9
,
policy
);
Invoke
(
&
Invocable1
,
&
Invocable2
,
&
Invocable3
,
&
Invocable4
,
&
Invocable5
,
&
Invocable6
,
&
Invocable7
,
&
Invocable8
,
&
Invocable9
,
&
Invocable10
,
policy
);
}
}
base_c/src/condition_variable.c
View file @
579bdb09
...
@@ -83,8 +83,8 @@ int embb_condition_wait_until(embb_condition_t* condition_var,
...
@@ -83,8 +83,8 @@ int embb_condition_wait_until(embb_condition_t* condition_var,
embb_time_t
now
;
embb_time_t
now
;
embb_time_now
(
&
now
);
embb_time_now
(
&
now
);
/* Check if absolute timepoint (in milliseconds) still is in the future */
/* Check if absolute timepoint (in milliseconds) still is in the future */
if
(
(
time
->
seconds
*
1000
+
time
->
nanoseconds
/
1000000
)
if
(
time
->
seconds
*
1000
+
time
->
nanoseconds
/
1000000
>
(
now
.
seconds
*
1000
+
now
.
nanoseconds
/
1000000
)
)
{
-
now
.
seconds
*
1000
-
now
.
nanoseconds
/
1000000
>
0
)
{
/* Convert to (unsigned type) milliseconds and round up */
/* Convert to (unsigned type) milliseconds and round up */
DWORD
time_diff
=
(
DWORD
)
(
DWORD
time_diff
=
(
DWORD
)
(
time
->
seconds
*
1000
+
time
->
nanoseconds
/
1000000
time
->
seconds
*
1000
+
time
->
nanoseconds
/
1000000
...
...
base_c/src/internal/thread_index.c
View file @
579bdb09
...
@@ -128,20 +128,6 @@ void embb_internal_thread_index_set_max(unsigned int max) {
...
@@ -128,20 +128,6 @@ void embb_internal_thread_index_set_max(unsigned int max) {
*
embb_max_number_thread_indices
()
=
max
;
*
embb_max_number_thread_indices
()
=
max
;
}
}
/**
* \pre the calling thread is the only active thread
*
* \post the thread indices count and calling thread index is reset
*/
void
embb_internal_thread_index_reset
()
{
void
embb_internal_thread_index_reset
()
{
/** This function is only called in tests, usually when all other threads
* except the main thread have terminated. However, the main thread still has
* potentially stored its old index value in its thread local storage,
* which might be assigned additionally to another thread (as the counter is
* reset), which may lead to hard to detect bugs. Therefore, reset the thread
* local thread id here.
*/
embb_internal_thread_index_var
=
UINT_MAX
;
embb_counter_init
(
embb_thread_index_counter
());
embb_counter_init
(
embb_thread_index_counter
());
}
}
\ No newline at end of file
base_c/test/condition_var_test.cc
View file @
579bdb09
...
@@ -38,7 +38,7 @@ ConditionVarTest::ConditionVarTest()
...
@@ -38,7 +38,7 @@ ConditionVarTest::ConditionVarTest()
embb_condition_init
(
&
cond_wait_
);
embb_condition_init
(
&
cond_wait_
);
embb_mutex_init
(
&
mutex_cond_wait_
,
EMBB_MUTEX_PLAIN
);
embb_mutex_init
(
&
mutex_cond_wait_
,
EMBB_MUTEX_PLAIN
);
CreateUnit
(
"Timed wait tim
e
outs"
)
CreateUnit
(
"Timed wait timouts"
)
.
Add
(
&
ConditionVarTest
::
TestTimedWaitTimeouts
,
this
);
.
Add
(
&
ConditionVarTest
::
TestTimedWaitTimeouts
,
this
);
if
(
num_threads_
>=
2
)
{
if
(
num_threads_
>=
2
)
{
CreateUnit
(
"Condition Notify Test"
)
CreateUnit
(
"Condition Notify Test"
)
...
@@ -64,10 +64,10 @@ void ConditionVarTest::TestNotify() {
...
@@ -64,10 +64,10 @@ void ConditionVarTest::TestNotify() {
while
(
embb_counter_get
(
&
counter_
)
while
(
embb_counter_get
(
&
counter_
)
<
static_cast
<
unsigned
int
>
(
num_threads_
-
1
))
<
static_cast
<
unsigned
int
>
(
num_threads_
-
1
))
{}
//
A
ll threads entered critical section
{}
//
a
ll threads entered critical section
embb_mutex_lock
(
&
mutex_cond_notify_
);
embb_mutex_lock
(
&
mutex_cond_notify_
);
embb_mutex_unlock
(
&
mutex_cond_notify_
);
embb_mutex_unlock
(
&
mutex_cond_notify_
);
// All threads called wait on the condition (
e
ven last thread)
// All threads called wait on the condition (
E
ven last thread)
embb_counter_init
(
&
counter_
);
embb_counter_init
(
&
counter_
);
...
@@ -75,7 +75,7 @@ void ConditionVarTest::TestNotify() {
...
@@ -75,7 +75,7 @@ void ConditionVarTest::TestNotify() {
embb_mutex_lock
(
&
mutex_cond_wait_
);
embb_mutex_lock
(
&
mutex_cond_wait_
);
embb_condition_wait_for
(
&
cond_wait_
,
&
mutex_cond_wait_
,
&
duration
);
embb_condition_wait_for
(
&
cond_wait_
,
&
mutex_cond_wait_
,
&
duration
);
while
(
embb_counter_get
(
&
counter_
)
==
0
)
while
(
embb_counter_get
(
&
counter_
)
==
0
)
{}
//
If test hangs here, signalling has not succe
eded
{}
//
if hangs here signal has not succ
eded
PT_ASSERT_EQ_MSG
(
embb_counter_get
(
&
counter_
),
static_cast
<
unsigned
int
>
(
1
),
PT_ASSERT_EQ_MSG
(
embb_counter_get
(
&
counter_
),
static_cast
<
unsigned
int
>
(
1
),
"Only one thread notified"
);
"Only one thread notified"
);
...
@@ -85,7 +85,7 @@ void ConditionVarTest::TestNotify() {
...
@@ -85,7 +85,7 @@ void ConditionVarTest::TestNotify() {
while
(
embb_counter_get
(
&
counter_
)
!=
while
(
embb_counter_get
(
&
counter_
)
!=
static_cast
<
unsigned
int
>
(
num_threads_
-
1
))
static_cast
<
unsigned
int
>
(
num_threads_
-
1
))
{}
// If t
est hangs here, not all threads were notified
{}
// If t
his hangs then not all threads were notified.
embb_mutex_unlock
(
&
mutex_cond_wait_
);
embb_mutex_unlock
(
&
mutex_cond_wait_
);
embb_mutex_destroy
(
&
mutex_cond_wait_
);
embb_mutex_destroy
(
&
mutex_cond_wait_
);
...
@@ -105,13 +105,13 @@ void ConditionVarTest::TestTimedWaitTimeouts() {
...
@@ -105,13 +105,13 @@ void ConditionVarTest::TestTimedWaitTimeouts() {
embb_time_t
time
;
embb_time_t
time
;
embb_duration_t
duration
=
EMBB_DURATION_INIT
;
embb_duration_t
duration
=
EMBB_DURATION_INIT
;
// Wait for
"now"
tests already passed time point
// Wait for
now
tests already passed time point
embb_time_now
(
&
time
);
embb_time_now
(
&
time
);
embb_mutex_lock
(
&
mutex
);
embb_mutex_lock
(
&
mutex
);
int
status
=
embb_condition_wait_until
(
&
cond
,
&
mutex
,
&
time
);
int
status
=
embb_condition_wait_until
(
&
cond
,
&
mutex
,
&
time
);
PT_EXPECT_EQ
(
status
,
EMBB_TIMEDOUT
);
PT_EXPECT_EQ
(
status
,
EMBB_TIMEDOUT
);
// Wait for a future time
point
// Wait for a future timepoint
status
=
embb_duration_set_milliseconds
(
&
duration
,
1
);
status
=
embb_duration_set_milliseconds
(
&
duration
,
1
);
PT_EXPECT_EQ
(
status
,
EMBB_SUCCESS
);
PT_EXPECT_EQ
(
status
,
EMBB_SUCCESS
);
status
=
embb_time_in
(
&
time
,
&
duration
);
// Time now
status
=
embb_time_in
(
&
time
,
&
duration
);
// Time now
...
...
base_c/test/time_test.cc
View file @
579bdb09
...
@@ -36,9 +36,6 @@ namespace test {
...
@@ -36,9 +36,6 @@ namespace test {
TimeTest
::
TimeTest
()
{
TimeTest
::
TimeTest
()
{
CreateUnit
(
"Time in duration"
).
Add
(
&
TimeTest
::
TestTimeInDuration
,
this
);
CreateUnit
(
"Time in duration"
).
Add
(
&
TimeTest
::
TestTimeInDuration
,
this
);
CreateUnit
(
"Monotonicity"
).
Add
(
&
TimeTest
::
TestMonotonicity
,
this
,
1
,
partest
::
TestSuite
::
GetDefaultNumIterations
()
*
10
);
}
}
void
TimeTest
::
TestTimeInDuration
()
{
void
TimeTest
::
TestTimeInDuration
()
{
...
@@ -51,20 +48,6 @@ void TimeTest::TestTimeInDuration() {
...
@@ -51,20 +48,6 @@ void TimeTest::TestTimeInDuration() {
PT_EXPECT_EQ
(
status
,
EMBB_SUCCESS
);
PT_EXPECT_EQ
(
status
,
EMBB_SUCCESS
);
}
}
void
TimeTest
::
TestMonotonicity
()
{
embb_time_t
first
;
embb_time_t
second
;
int
status1
=
embb_time_in
(
&
first
,
embb_duration_zero
());
int
status2
=
embb_time_in
(
&
second
,
embb_duration_zero
());
PT_EXPECT_EQ
(
status1
,
EMBB_SUCCESS
);
PT_EXPECT_EQ
(
status2
,
EMBB_SUCCESS
);
unsigned
long
long
first_abs
=
first
.
seconds
*
1000
+
first
.
nanoseconds
/
1000000
;
unsigned
long
long
second_abs
=
second
.
seconds
*
1000
+
second
.
nanoseconds
/
1000000
;
PT_EXPECT_GE
(
second_abs
,
first_abs
);
}
}
// namespace test
}
// namespace test
}
// namespace base
}
// namespace base
}
// namespace embb
}
// namespace embb
base_c/test/time_test.h
View file @
579bdb09
...
@@ -42,14 +42,9 @@ class TimeTest : public partest::TestCase {
...
@@ -42,14 +42,9 @@ class TimeTest : public partest::TestCase {
private
:
private
:
/**
/**
* Tests time
-in-
duration method.
* Tests time
in
duration method.
*/
*/
void
TestTimeInDuration
();
void
TestTimeInDuration
();
/**
* Tests that succeedingly taken times are monotonously increasing.
*/
void
TestMonotonicity
();
};
};
}
// namespace test
}
// namespace test
...
...
base_cpp/include/embb/base/atomic.h
View file @
579bdb09
...
@@ -478,7 +478,7 @@ class Atomic<BaseType*> : public embb::base::internal::atomic::
...
@@ -478,7 +478,7 @@ class Atomic<BaseType*> : public embb::base::internal::atomic::
public
:
public
:
Atomic
()
:
embb
::
base
::
internal
::
atomic
::
Atomic
()
:
embb
::
base
::
internal
::
atomic
::
AtomicPointer
<
BaseType
,
ptrdiff_t
,
sizeof
(
BaseType
*
)
>
()
{}
AtomicPointer
<
BaseType
,
ptrdiff_t
,
sizeof
(
BaseType
*
)
>
()
{}
explicit
Atomic
(
BaseType
*
p
)
:
embb
::
base
::
internal
::
atomic
::
Atomic
(
BaseType
*
p
)
:
embb
::
base
::
internal
::
atomic
::
AtomicPointer
<
BaseType
,
ptrdiff_t
,
sizeof
(
BaseType
*
)
>
(
p
)
{}
AtomicPointer
<
BaseType
,
ptrdiff_t
,
sizeof
(
BaseType
*
)
>
(
p
)
{}
BaseType
*
operator
=
(
BaseType
*
p
)
{
BaseType
*
operator
=
(
BaseType
*
p
)
{
...
...
base_cpp/include/embb/base/internal/atomic/atomic_base.h
View file @
579bdb09
...
@@ -177,7 +177,8 @@ CompareAndSwap(BaseType& expected, BaseType desired) {
...
@@ -177,7 +177,8 @@ CompareAndSwap(BaseType& expected, BaseType desired) {
compare_and_swap
(
&
AtomicValue
,
&
native_expected
,
native_desired
))
!=
0
compare_and_swap
(
&
AtomicValue
,
&
native_expected
,
native_desired
))
!=
0
?
true
:
false
;
?
true
:
false
;
memcpy
(
&
expected
,
&
native_expected
,
sizeof
(
expected
));
if
(
!
return_val
)
expected
=
Load
();
return
return_val
;
return
return_val
;
}
}
...
...
base_cpp/include/embb/base/internal/atomic/atomic_pointer.h
View file @
579bdb09
...
@@ -65,8 +65,8 @@ class AtomicPointer : public AtomicArithmetic<BaseType*, DifferenceType, S> {
...
@@ -65,8 +65,8 @@ class AtomicPointer : public AtomicArithmetic<BaseType*, DifferenceType, S> {
bool
IsPointer
()
const
;
bool
IsPointer
()
const
;
// The methods below are documented in atomic.h
// The methods below are documented in atomic.h
BaseType
*
operator
->
()
const
;
BaseType
*
operator
->
();
BaseType
&
operator
*
()
const
;
BaseType
&
operator
*
();
};
};
template
<
typename
BaseType
,
typename
DifferenceType
,
size_t
S
>
template
<
typename
BaseType
,
typename
DifferenceType
,
size_t
S
>
...
@@ -93,13 +93,13 @@ inline bool AtomicPointer<BaseType, DifferenceType, S>::
...
@@ -93,13 +93,13 @@ inline bool AtomicPointer<BaseType, DifferenceType, S>::
template
<
typename
BaseType
,
typename
DifferenceType
,
size_t
S
>
template
<
typename
BaseType
,
typename
DifferenceType
,
size_t
S
>
inline
BaseType
*
AtomicPointer
<
BaseType
,
DifferenceType
,
S
>::
inline
BaseType
*
AtomicPointer
<
BaseType
,
DifferenceType
,
S
>::
operator
->
()
const
{
operator
->
()
{
return
this
->
Load
();
return
this
->
Load
();
}
}
template
<
typename
BaseType
,
typename
DifferenceType
,
size_t
S
>
template
<
typename
BaseType
,
typename
DifferenceType
,
size_t
S
>
inline
BaseType
&
AtomicPointer
<
BaseType
,
DifferenceType
,
S
>::
inline
BaseType
&
AtomicPointer
<
BaseType
,
DifferenceType
,
S
>::
operator
*
()
const
{
operator
*
()
{
return
*
(
this
->
Load
());
return
*
(
this
->
Load
());
}
}
...
...
base_cpp/include/embb/base/internal/mutex-inl.h
View file @
579bdb09
...
@@ -28,7 +28,6 @@
...
@@ -28,7 +28,6 @@
#define EMBB_BASE_INTERNAL_MUTEX_INL_H_
#define EMBB_BASE_INTERNAL_MUTEX_INL_H_
#include <cassert>
#include <cassert>
#include <algorithm>
namespace
embb
{
namespace
embb
{
namespace
base
{
namespace
base
{
...
@@ -96,8 +95,8 @@ void UniqueLock<Mutex>::Unlock() {
...
@@ -96,8 +95,8 @@ void UniqueLock<Mutex>::Unlock() {
template
<
typename
Mutex
>
template
<
typename
Mutex
>
void
UniqueLock
<
Mutex
>::
Swap
(
UniqueLock
<
Mutex
>&
other
)
{
void
UniqueLock
<
Mutex
>::
Swap
(
UniqueLock
<
Mutex
>&
other
)
{
std
::
swap
(
mutex_
,
other
.
mutex_
)
;
locked_
=
other
.
locked_
;
std
::
swap
(
locked_
,
other
.
locked_
);
mutex_
=
other
.
Release
(
);
}
}
template
<
typename
Mutex
>
template
<
typename
Mutex
>
...
...
base_cpp/include/embb/base/mutex.h
View file @
579bdb09
...
@@ -439,11 +439,11 @@ class UniqueLock {
...
@@ -439,11 +439,11 @@ class UniqueLock {
void
Unlock
();
void
Unlock
();
/**
/**
*
Exchanges ownership of the wrapped mutex with another
lock.
*
Transfers ownership of a mutex to this
lock.
*/
*/
void
Swap
(
void
Swap
(
UniqueLock
<
Mutex
>&
other
UniqueLock
<
Mutex
>&
other
/**< [IN/OUT]
The lock to exchange ownership with
*/
/**< [IN/OUT]
Lock from which ownership shall be transferred
*/
);
);
/**
/**
...
...
base_cpp/test/mutex_test.cc
View file @
579bdb09
...
@@ -191,21 +191,13 @@ void MutexTest::TestUniqueLock() {
...
@@ -191,21 +191,13 @@ void MutexTest::TestUniqueLock() {
}
}
{
// Test lock swapping
{
// Test lock swapping
UniqueLock
<>
lock1
(
mutex_
);
UniqueLock
<>
lock1
;
UniqueLock
<>
lock2
(
mutex_
);
PT_EXPECT_EQ
(
lock1
.
OwnsLock
(),
false
);
PT_EXPECT_EQ
(
lock2
.
OwnsLock
(),
true
);
lock1
.
Swap
(
lock2
);
PT_EXPECT_EQ
(
lock1
.
OwnsLock
(),
true
);
PT_EXPECT_EQ
(
lock1
.
OwnsLock
(),
true
);
PT_EXPECT_EQ
(
lock2
.
OwnsLock
(),
false
);
{
UniqueLock
<>
lock2
;
PT_EXPECT_EQ
(
lock2
.
OwnsLock
(),
false
);
lock1
.
Swap
(
lock2
);
PT_EXPECT_EQ
(
lock1
.
OwnsLock
(),
false
);
PT_EXPECT_EQ
(
lock2
.
OwnsLock
(),
true
);
}
// At this point, "lock2" was destroyed and "mutex_" must be unlocked.
UniqueLock
<>
lock3
(
mutex_
,
embb
::
base
::
try_lock
);
PT_EXPECT_EQ
(
lock3
.
OwnsLock
(),
true
);
}
}
}
}
...
...
containers_cpp/include/embb/containers/internal/hazard_pointer-inl.h
View file @
579bdb09
...
@@ -30,360 +30,386 @@
...
@@ -30,360 +30,386 @@
namespace
embb
{
namespace
embb
{
namespace
containers
{
namespace
containers
{
namespace
internal
{
namespace
internal
{
// Visual Studio is complaining, that the return in the last line of this
template
<
typename
ElementT
>
// function is not reachable. This is true, as long as exceptions are enabled.
FixedSizeList
<
ElementT
>::
FixedSizeList
(
size_t
max_size
)
:
// Otherwise, the exception becomes an assertion and with disabling assertions,
max_size
(
max_size
),
// the code becomes reachable. So, disabling this warning.
size
(
0
)
{
#ifdef EMBB_PLATFORM_COMPILER_MSVC
elementsArray
=
static_cast
<
ElementT
*>
(
#pragma warning(push)
embb
::
base
::
Allocation
::
Allocate
(
sizeof
(
ElementT
)
*
#pragma warning(disable:4702)
max_size
));
#endif
}
template
<
typename
GuardType
>
unsigned
int
HazardPointer
<
GuardType
>::
GetObjectLocalThreadIndex
()
{
template
<
typename
ElementT
>
// first, get the EMBB native thread id.
inline
size_t
FixedSizeList
<
ElementT
>::
GetSize
()
const
{
unsigned
int
embb_thread_index
;
return
size
;
}
int
return_val
=
embb_internal_thread_index
(
&
embb_thread_index
);
template
<
typename
ElementT
>
if
(
return_val
!=
EMBB_SUCCESS
)
{
inline
size_t
FixedSizeList
<
ElementT
>::
GetMaxSize
()
const
{
EMBB_THROW
(
embb
::
base
::
ErrorException
,
"Could not get thread id"
);
return
max_size
;
}
}
// iterate over the mappings array
template
<
typename
ElementT
>
for
(
unsigned
int
i
=
0
;
i
!=
max_accessors_count_
;
++
i
)
{
inline
void
FixedSizeList
<
ElementT
>::
clear
()
{
// end of mappings? then we need to write our id
size
=
0
;
if
(
thread_id_mapping_
[
i
]
==
-
1
)
{
}
// try to CAS the initial value with out thread id
int
expected
=
-
1
;
template
<
typename
ElementT
>
if
(
thread_id_mapping_
[
i
].
CompareAndSwap
(
expected
,
typename
FixedSizeList
<
ElementT
>::
iterator
static_cast
<
int
>
(
embb_thread_index
)))
{
FixedSizeList
<
ElementT
>::
begin
()
const
{
//successful, return our mapping
return
&
elementsArray
[
0
];
return
i
;
}
}
}
template
<
typename
ElementT
>
typename
FixedSizeList
<
ElementT
>::
iterator
if
(
thread_id_mapping_
[
i
]
==
static_cast
<
int
>
(
embb_thread_index
))
{
FixedSizeList
<
ElementT
>::
end
()
const
{
// found our mapping!
return
&
elementsArray
[
size
];
return
i
;
}
}
}
template
<
typename
ElementT
>
FixedSizeList
<
ElementT
>
&
FixedSizeList
<
ElementT
>::
operator
=
(
const
FixedSizeList
&
other
)
{
size
=
0
;
if
(
max_size
<
other
.
size
)
{
EMBB_THROW
(
embb
::
base
::
ErrorException
,
"Copy target to small"
);
}
// when we reach this point, we have too many accessors
for
(
const_iterator
it
=
other
.
begin
();
it
!=
other
.
end
();
++
it
)
{
// (no mapping possible)
PushBack
(
*
it
);
EMBB_THROW
(
embb
::
base
::
ErrorException
,
"Too many accessors"
);
}
return
*
this
;
}
return
0
;
template
<
typename
ElementT
>
bool
FixedSizeList
<
ElementT
>::
PushBack
(
ElementT
const
el
)
{
if
(
size
+
1
>
max_size
)
{
return
false
;
}
}
#ifdef EMBB_PLATFORM_COMPILER_MSVC
elementsArray
[
size
]
=
el
;
#pragma warning(pop)
size
++
;
return
true
;
}
template
<
typename
ElementT
>
FixedSizeList
<
ElementT
>::~
FixedSizeList
()
{
embb
::
base
::
Allocation
::
Free
(
elementsArray
);
}
template
<
typename
GuardType
>
bool
HazardPointerThreadEntry
<
GuardType
>::
IsActive
()
{
return
is_active
;
}
template
<
typename
GuardType
>
bool
HazardPointerThreadEntry
<
GuardType
>::
TryReserve
()
{
bool
expected
=
false
;
return
is_active
.
CompareAndSwap
(
expected
,
true
);
}
template
<
typename
GuardType
>
void
HazardPointerThreadEntry
<
GuardType
>::
Deactivate
()
{
is_active
=
false
;
}
template
<
typename
GuardType
>
size_t
HazardPointerThreadEntry
<
GuardType
>::
GetRetiredCounter
()
{
return
retired_list
.
GetSize
();
}
template
<
typename
GuardType
>
FixedSizeList
<
GuardType
>&
HazardPointerThreadEntry
<
GuardType
>::
GetRetired
()
{
return
retired_list
;
}
template
<
typename
GuardType
>
FixedSizeList
<
GuardType
>&
HazardPointerThreadEntry
<
GuardType
>::
GetRetiredTemp
()
{
return
retired_list_temp
;
}
template
<
typename
GuardType
>
FixedSizeList
<
GuardType
>&
HazardPointerThreadEntry
<
GuardType
>::
GetHazardTemp
()
{
return
hazard_pointer_list_temp
;
}
template
<
typename
GuardType
>
void
HazardPointerThreadEntry
<
GuardType
>::
SetRetired
(
internal
::
FixedSizeList
<
GuardType
>
const
&
retired_list
)
{
this
->
retired_list
=
retired_list
;
}
template
<
typename
GuardType
>
HazardPointerThreadEntry
<
GuardType
>::
HazardPointerThreadEntry
(
GuardType
undefined_guard
,
int
guards_per_thread
,
size_t
max_size_retired_list
)
:
#ifdef EMBB_DEBUG
who_is_scanning
(
-
1
),
#endif
#endif
undefined_guard
(
undefined_guard
),
template
<
typename
GuardType
>
guards_per_thread
(
guards_per_thread
),
void
HazardPointer
<
GuardType
>::
RemoveGuard
(
int
guard_position
)
{
max_size_retired_list
(
max_size_retired_list
),
const
unsigned
int
my_thread_id
=
GetObjectLocalThreadIndex
();
// initially, each potential thread is active... if that is not the case
// another thread could call "HelpScan", and block this thread in making
// check invariants...
// progress.
assert
(
guard_position
<
max_guards_per_thread_
);
// Still, threads can be leave the hazard pointer processing (deactivation),
assert
(
my_thread_id
<
max_accessors_count_
);
// but this can only be done once, i.e., this is not revertable...
is_active
(
1
),
// set guard
retired_list
(
max_size_retired_list
),
guards_
[
guard_position
*
max_accessors_count_
+
my_thread_id
]
=
retired_list_temp
(
max_size_retired_list
),
undefined_guard_
;
hazard_pointer_list_temp
(
embb
::
base
::
Thread
::
GetThreadsMaxCount
()
*
guards_per_thread
)
{
// Initialize guarded pointer list
guarded_pointers
=
static_cast
<
embb
::
base
::
Atomic
<
GuardType
>*>
(
embb
::
base
::
Allocation
::
Allocate
(
sizeof
(
embb
::
base
::
Atomic
<
GuardType
>
)
*
guards_per_thread
));
for
(
int
i
=
0
;
i
!=
guards_per_thread
;
++
i
)
{
new
(
static_cast
<
void
*>
(
&
guarded_pointers
[
i
]))
embb
::
base
::
Atomic
<
GuardType
>
(
undefined_guard
);
}
}
}
template
<
typename
GuardType
>
template
<
typename
GuardType
>
HazardPointer
<
GuardType
>::
HazardPointer
(
HazardPointerThreadEntry
<
GuardType
>::~
HazardPointerThreadEntry
()
{
embb
::
base
::
Function
<
void
,
GuardType
>
freeGuardCallback
,
for
(
int
i
=
0
;
i
!=
guards_per_thread
;
++
i
)
{
GuardType
undefined_guard
,
int
guardsPerThread
,
int
accessors
)
:
guarded_pointers
[
i
].
~
Atomic
();
max_accessors_count_
(
accessors
<
0
?
embb
::
base
::
Thread
::
GetThreadsMaxCount
()
:
accessors
),
undefined_guard_
(
undefined_guard
),
max_guards_per_thread_
(
guardsPerThread
),
release_object_callback_
(
freeGuardCallback
),
thread_id_mapping_
(
static_cast
<
embb
::
base
::
Atomic
<
int
>*>
(
embb
::
base
::
Allocation
::
Allocate
(
sizeof
(
embb
::
base
::
Atomic
<
int
>
)
*
max_accessors_count_
))),
guards_
(
static_cast
<
embb
::
base
::
Atomic
<
GuardType
>*>
(
embb
::
base
::
Allocation
::
Allocate
(
sizeof
(
embb
::
base
::
Atomic
<
GuardType
>
)
*
max_guards_per_thread_
*
max_accessors_count_
))),
thread_local_retired_lists_temp_
(
static_cast
<
GuardType
*>
(
embb
::
base
::
Allocation
::
Allocate
(
sizeof
(
GuardType
)
*
max_guards_per_thread_
*
max_accessors_count_
*
max_accessors_count_
))),
thread_local_retired_lists_
(
static_cast
<
GuardType
*>
(
embb
::
base
::
Allocation
::
Allocate
(
sizeof
(
GuardType
)
*
max_guards_per_thread_
*
max_accessors_count_
*
max_accessors_count_
)))
{
const
unsigned
int
count_guards
=
max_guards_per_thread_
*
max_accessors_count_
;
const
unsigned
int
count_ret_elements
=
count_guards
*
max_accessors_count_
;
for
(
unsigned
int
i
=
0
;
i
!=
max_accessors_count_
;
++
i
)
{
//in-place new for each cell
new
(
&
thread_id_mapping_
[
i
])
embb
::
base
::
Atomic
<
int
>
(
-
1
);
}
for
(
unsigned
int
i
=
0
;
i
!=
count_guards
;
++
i
)
{
//in-place new for each cell
new
(
&
guards_
[
i
])
embb
::
base
::
Atomic
<
GuardType
>
(
undefined_guard
);
}
for
(
unsigned
int
i
=
0
;
i
!=
count_ret_elements
;
++
i
)
{
//in-place new for each cell
new
(
&
thread_local_retired_lists_temp_
[
i
])
GuardType
(
undefined_guard
);
}
for
(
unsigned
int
i
=
0
;
i
!=
count_ret_elements
;
++
i
)
{
//in-place new for each cell
new
(
&
thread_local_retired_lists_
[
i
])
GuardType
(
undefined_guard
);
}
}
}
template
<
typename
GuardType
>
embb
::
base
::
Allocation
::
Free
(
guarded_pointers
);
HazardPointer
<
GuardType
>::~
HazardPointer
()
{
}
const
unsigned
int
count_guards
=
max_guards_per_thread_
*
max_accessors_count_
;
template
<
typename
GuardType
>
GuardType
HazardPointerThreadEntry
<
GuardType
>::
GetGuard
(
int
pos
)
const
{
const
unsigned
int
count_ret_elements
=
return
guarded_pointers
[
pos
];
count_guards
*
max_accessors_count_
;
}
// Release references from all retired lists. Note that for this to work,
template
<
typename
GuardType
>
// the data structure using hazard pointer has still to be active... So
void
HazardPointerThreadEntry
<
GuardType
>::
AddRetired
(
GuardType
pointerToGuard
)
{
// first, the hazard pointer class shall be destructed, then the memory
retired_list
.
PushBack
(
pointerToGuard
);
// management class (e.g. some pool). Otherwise, the hazard pointer class
}
// would try to return memory to an already destructed memory manager.
for
(
unsigned
int
i
=
0
;
i
!=
count_ret_elements
;
++
i
)
{
template
<
typename
GuardType
>
GuardType
pointerToFree
=
void
HazardPointerThreadEntry
<
GuardType
>::
thread_local_retired_lists_
[
i
];
GuardPointer
(
int
guardNumber
,
GuardType
pointerToGuard
)
{
if
(
pointerToFree
==
undefined_guard_
)
{
guarded_pointers
[
guardNumber
]
=
pointerToGuard
;
break
;
}
template
<
typename
GuardType
>
void
HazardPointerThreadEntry
<
GuardType
>::
SetActive
(
bool
active
)
{
is_active
=
active
;
}
template
<
typename
GuardType
>
unsigned
int
HazardPointer
<
GuardType
>::
GetCurrentThreadIndex
()
{
unsigned
int
thread_index
;
int
return_val
=
embb_internal_thread_index
(
&
thread_index
);
if
(
return_val
!=
EMBB_SUCCESS
)
EMBB_THROW
(
embb
::
base
::
ErrorException
,
"Could not get thread id!"
);
return
thread_index
;
}
template
<
typename
GuardType
>
bool
HazardPointer
<
GuardType
>::
IsThresholdExceeded
()
{
double
retiredCounterLocThread
=
static_cast
<
double
>
(
GetHazardPointerElementForCurrentThread
().
GetRetiredCounter
());
return
(
retiredCounterLocThread
>=
RETIRE_THRESHOLD
*
static_cast
<
double
>
(
active_hazard_pointer
)
*
static_cast
<
double
>
(
guards_per_thread
));
}
template
<
typename
GuardType
>
size_t
HazardPointer
<
GuardType
>::
GetActiveHazardPointers
()
{
return
active_hazard_pointer
;
}
template
<
typename
GuardType
>
typename
HazardPointer
<
GuardType
>::
HazardPointerThreadEntry_t
&
HazardPointer
<
GuardType
>::
GetHazardPointerElementForCurrentThread
()
{
// For each thread, there is a slot in the hazard pointer array.
// Initially, the active flag of a hazard pointer entry is false.
// Only the respective thread changes the flag from true to false.
// This means that the current thread tells that he is about to
// stop operating, and the others are responsible for his retired
// list.
return
hazard_pointer_thread_entry_array
[
GetCurrentThreadIndex
()];
}
template
<
typename
GuardType
>
void
HazardPointer
<
GuardType
>::
HelpScan
()
{
// This is a little bit different than in the paper. In the paper,
// the retired nodes from other threads are added to our retired list.
// To be able to give a bound on memory consumption, we execute scan
// for those threads, without moving elements. The effect shall be
// the same.
for
(
size_t
i
=
0
;
i
!=
hazard_pointers
;
++
i
)
{
// Try to find non active lists...
if
(
!
hazard_pointer_thread_entry_array
[
i
].
IsActive
()
&&
hazard_pointer_thread_entry_array
[
i
].
TryReserve
())
{
// Here: grab retired things, first check if there are any...
if
(
hazard_pointer_thread_entry_array
[
i
].
GetRetiredCounter
()
>
0
)
{
Scan
(
&
hazard_pointer_thread_entry_array
[
i
]);
}
}
release_object_callback_
(
pointerToFree
);
}
for
(
unsigned
int
i
=
0
;
i
!=
max_accessors_count_
;
++
i
)
{
thread_id_mapping_
[
i
].
~
Atomic
();
}
embb
::
base
::
Allocation
::
Free
(
thread_id_mapping_
);
for
(
unsigned
int
i
=
0
;
i
!=
count_guards
;
++
i
)
{
guards_
[
i
].
~
Atomic
();
}
embb
::
base
::
Allocation
::
Free
(
guards_
);
// We are done, mark it as deactivated again
hazard_pointer_thread_entry_array
[
i
].
Deactivate
();
for
(
unsigned
int
i
=
0
;
i
!=
count_ret_elements
;
++
i
)
{
thread_local_retired_lists_temp_
[
i
].
~
GuardType
();
}
}
embb
::
base
::
Allocation
::
Free
(
thread_local_retired_lists_temp_
);
for
(
unsigned
int
i
=
0
;
i
!=
count_ret_elements
;
++
i
)
{
thread_local_retired_lists_
[
i
].
~
GuardType
();
}
embb
::
base
::
Allocation
::
Free
(
thread_local_retired_lists_
);
}
}
}
template
<
typename
GuardType
>
void
HazardPointer
<
GuardType
>::
Guard
(
int
guardPosition
,
template
<
typename
GuardType
>
GuardType
guardedElement
)
{
void
HazardPointer
<
GuardType
>::
const
unsigned
int
my_thread_id
=
GetObjectLocalThreadIndex
();
Scan
(
HazardPointerThreadEntry_t
*
currentHazardPointerEntry
)
{
#ifdef EMBB_DEBUG
// check invariants...
// scan should only be executed by one thread at a time, otherwise we have
assert
(
guardPosition
<
max_guards_per_thread_
);
// a bug... this assertions checks that
assert
(
my_thread_id
<
max_accessors_count_
)
;
int
expected
=
-
1
;
if
(
!
currentHazardPointerEntry
->
GetScanningThread
().
CompareAndSwap
(
// set guard
expected
,
static_cast
<
int
>
(
GetCurrentThreadIndex
())))
{
guards_
[
guardPosition
*
max_accessors_count_
+
my_thread_id
]
=
guardedElement
;
assert
(
false
)
;
}
}
#endif
template
<
typename
GuardType
>
// In this function, we compute the intersection between local retired
size_t
HazardPointer
<
GuardType
>::
ComputeMaximumRetiredObjectCount
(
// pointers and all hazard pointers. This intersection cannot be deleted and
size_t
guardsPerThread
,
int
accessors
)
{
// forms the new local retired pointers list.
unsigned
int
accessorCount
=
(
accessors
==
-
1
?
// It is assumed that the union of all retired pointers contains no two
embb
::
base
::
Thread
::
GetThreadsMaxCount
()
:
// pointers with the same value. However, the union of all hazard guards
accessors
);
// might.
return
static_cast
<
size_t
>
(
// Here, we store the temporary hazard pointers. We have to store them,
guardsPerThread
*
accessorCount
*
accessorCount
);
// as iterating multiple time over them might be expensive, as this
}
// atomic array is shared between threads.
currentHazardPointerEntry
->
GetHazardTemp
().
clear
();
/**
* Remark: it might be faster to just swap pointers for temp retired list and
// Get all active hazard pointers!
* retired list. However, with the current implementation (one array for all
for
(
unsigned
int
i
=
0
;
i
!=
hazard_pointers
;
++
i
)
{
* retired and retired temp lists, respectively) this is not possible. This is
// Only consider guards of active threads
* not changed until this copying accounts for a performance problem. The
if
(
hazard_pointer_thread_entry_array
[
i
].
IsActive
())
{
* copying is not the bottleneck currently.
// For each guard in an hazard pointer entry
*/
for
(
int
pos
=
0
;
pos
!=
guards_per_thread
;
++
pos
)
{
template
<
typename
GuardType
>
GuardType
guard
=
hazard_pointer_thread_entry_array
[
i
].
GetGuard
(
pos
);
void
HazardPointer
<
GuardType
>::
CopyRetiredList
(
GuardType
*
sourceList
,
GuardType
*
targetList
,
unsigned
int
retiredListSize
,
// UndefinedGuard means not guarded
GuardType
undefinedGuard
)
{
if
(
guard
==
undefined_guard
)
bool
done
=
false
;
continue
;
for
(
unsigned
int
ii
=
0
;
ii
!=
retiredListSize
;
++
ii
)
{
if
(
!
done
)
{
currentHazardPointerEntry
->
GetHazardTemp
().
PushBack
(
guard
);
GuardType
guardToCopy
=
sourceList
[
ii
];
if
(
guardToCopy
==
undefinedGuard
)
{
done
=
true
;
if
(
targetList
[
ii
]
==
undefinedGuard
)
{
// end of target list
break
;
}
}
targetList
[
ii
]
=
guardToCopy
;
}
else
{
// we copied the whole source list, remaining values in the target
// have to be zeroed.
if
(
targetList
[
ii
]
==
undefinedGuard
)
{
// end of target list
break
;
}
else
{
targetList
[
ii
]
=
undefinedGuard
;
}
}
}
}
}
}
}
template
<
typename
GuardType
>
currentHazardPointerEntry
->
GetRetiredTemp
().
clear
();
void
HazardPointer
<
GuardType
>::
UpdateRetiredList
(
GuardType
*
retired_list
,
GuardType
*
updated_retired_list
,
unsigned
int
retired_list_size
,
// Sort them, we will do a binary search on each entry from the retired list
GuardType
guarded_element
,
GuardType
considered_hazard
,
std
::
sort
(
GuardType
undefined_guard
)
{
currentHazardPointerEntry
->
GetHazardTemp
().
begin
(),
// no hazard set here
currentHazardPointerEntry
->
GetHazardTemp
().
end
());
if
(
considered_hazard
==
undefined_guard
)
return
;
for
(
EMBB_CONTAINERS_CPP_DEPENDANT_TYPENAME
FixedSizeList
<
GuardType
>::
iterator
// if this hazard is currently in the union of
it
=
currentHazardPointerEntry
->
GetRetired
().
begin
();
// threadLocalRetiredLists and pointerToRetire, but not yet in
it
!=
currentHazardPointerEntry
->
GetRetired
().
end
();
++
it
)
{
// threadLocalRetiredListsTemp, add it to that list
if
(
false
==
::
std
::
binary_search
(
bool
contained_in_union
=
false
;
currentHazardPointerEntry
->
GetHazardTemp
().
begin
(),
currentHazardPointerEntry
->
GetHazardTemp
().
end
(),
*
it
))
{
// first iterate over our retired list
this
->
free_guard_callback
(
*
it
);
for
(
unsigned
int
i
=
0
;
i
!=
retired_list_size
;
++
i
)
{
}
else
{
// when reaching 0, we can stop iterating (end of the "list")
currentHazardPointerEntry
->
GetRetiredTemp
().
PushBack
(
*
it
);
if
(
retired_list
[
i
]
==
0
)
break
;
// the hazard is contained in the retired list... it shall go
// into the temp list, if not already there
if
(
retired_list
[
i
]
==
considered_hazard
)
{
contained_in_union
=
true
;
break
;
}
}
// the union also contains pointerToRetire
if
(
!
contained_in_union
)
{
contained_in_union
=
(
considered_hazard
==
guarded_element
);
}
// add the pointer to temp. retired list, if not already there
if
(
contained_in_union
)
{
for
(
unsigned
int
ii
=
0
;
ii
!=
retired_list_size
;
++
ii
)
{
// is it already there?
if
(
updated_retired_list
[
ii
]
==
considered_hazard
)
break
;
// end of the list
if
(
updated_retired_list
[
ii
]
==
undefined_guard
)
{
// add hazard
updated_retired_list
[
ii
]
=
considered_hazard
;
// we are done here...
break
;
}
}
}
}
}
}
currentHazardPointerEntry
->
SetRetired
(
currentHazardPointerEntry
->
GetRetiredTemp
());
template
<
typename
GuardType
>
#ifdef EMBB_DEBUG
void
HazardPointer
<
GuardType
>::
EnqueueForDeletion
(
GuardType
toRetire
)
{
currentHazardPointerEntry
->
GetScanningThread
().
Store
(
-
1
);
unsigned
int
my_thread_id
=
GetObjectLocalThreadIndex
();
#endif
}
// check for invariant
assert
(
my_thread_id
<
max_accessors_count_
);
template
<
typename
GuardType
>
size_t
HazardPointer
<
GuardType
>::
GetRetiredListMaxSize
()
const
{
const
unsigned
int
retired_list_size
=
max_accessors_count_
*
return
static_cast
<
size_t
>
(
RETIRE_THRESHOLD
*
max_guards_per_thread_
;
static_cast
<
double
>
(
embb
::
base
::
Thread
::
GetThreadsMaxCount
())
*
static_cast
<
double
>
(
guards_per_thread
))
+
1
;
const
unsigned
int
count_guards
=
max_accessors_count_
*
}
max_guards_per_thread_
;
template
<
typename
GuardType
>
GuardType
*
retired_list
=
HazardPointer
<
GuardType
>::
HazardPointer
(
&
thread_local_retired_lists_
[
my_thread_id
*
retired_list_size
];
embb
::
base
::
Function
<
void
,
GuardType
>
free_guard_callback
,
GuardType
undefined_guard
,
int
guards_per_thread
)
:
GuardType
*
retired_list_temp
=
undefined_guard
(
undefined_guard
),
&
thread_local_retired_lists_temp_
[
my_thread_id
*
retired_list_size
];
guards_per_thread
(
guards_per_thread
),
//initially, all potential hazard pointers are active...
// wipe my temp. retired list...
active_hazard_pointer
(
embb
::
base
::
Thread
::
GetThreadsMaxCount
()),
for
(
unsigned
int
i
=
0
;
i
<
retired_list_size
;
++
i
)
{
free_guard_callback
(
free_guard_callback
)
{
// the list is filled always from left to right, so occurring the first
hazard_pointers
=
embb
::
base
::
Thread
::
GetThreadsMaxCount
();
// undefinedGuard, the remaining ones are also undefinedGuard...
if
(
retired_list_temp
[
i
]
==
undefined_guard_
)
hazard_pointer_thread_entry_array
=
static_cast
<
HazardPointerThreadEntry_t
*>
(
break
;
embb
::
base
::
Allocation
::
Allocate
(
sizeof
(
HazardPointerThreadEntry_t
)
*
hazard_pointers
));
retired_list_temp
[
i
]
=
undefined_guard_
;
}
for
(
size_t
i
=
0
;
i
!=
hazard_pointers
;
++
i
)
{
new
(
static_cast
<
void
*>
(
&
(
hazard_pointer_thread_entry_array
[
i
])))
// we test each hazard if it is in the union of retiredList and
HazardPointerThreadEntry_t
(
undefined_guard
,
guards_per_thread
,
// guardedElement. If it is, it goes into the new retired list...
GetRetiredListMaxSize
());
for
(
unsigned
int
i
=
0
;
i
!=
count_guards
;
++
i
)
{
}
// consider each current active guard
}
GuardType
considered_hazard
=
guards_
[
i
].
Load
();
UpdateRetiredList
(
retired_list
,
retired_list_temp
,
retired_list_size
,
toRetire
,
considered_hazard
,
undefined_guard_
);
}
int
retired_list_size_signed
=
static_cast
<
int
>
(
retired_list_size
);
assert
(
retired_list_size_signed
>=
0
);
// now we created a a new retired list... the elements that are "removed"
// from the old retired list can be safely deleted now...
for
(
int
i
=
-
1
;
i
!=
retired_list_size_signed
;
++
i
)
{
// we iterate over the current retired list... -1 is used as dummy element
// in the iteration, to also iterate over the pointerToRetire, which is
// logically also part of the current retired list...
// end of the list, stop iterating
if
(
i
>=
0
&&
retired_list
[
i
]
==
undefined_guard_
)
break
;
GuardType
to_check_if_in_new_list
=
undefined_guard_
;
to_check_if_in_new_list
=
(
i
==
-
1
?
toRetire
:
retired_list
[
i
]);
// still in the new retired list?
bool
still_in_list
=
false
;
for
(
unsigned
int
ii
=
0
;
ii
!=
retired_list_size
;
++
ii
)
{
// end of list
if
(
retired_list_temp
[
ii
]
==
undefined_guard_
)
break
;
if
(
to_check_if_in_new_list
==
retired_list_temp
[
ii
])
{
// still in list, cannot delete element!
still_in_list
=
true
;
break
;
}
}
if
(
!
still_in_list
)
{
template
<
typename
GuardType
>
this
->
release_object_callback_
(
to_check_if_in_new_list
);
HazardPointer
<
GuardType
>::~
HazardPointer
()
{
}
for
(
size_t
i
=
0
;
i
!=
hazard_pointers
;
++
i
)
{
}
hazard_pointer_thread_entry_array
[
i
].
~
HazardPointerThreadEntry_t
();
}
// copy the updated retired list (temp) to the retired list...
embb
::
base
::
Allocation
::
Free
(
static_cast
<
void
*
>
CopyRetiredList
(
retired_list_temp
,
retired_list
,
retired_list_size
,
(
hazard_pointer_thread_entry_array
));
undefined_guard_
);
}
template
<
typename
GuardType
>
void
HazardPointer
<
GuardType
>::
DeactivateCurrentThread
()
{
HazardPointerThreadEntry_t
*
current_thread_entry
=
&
hazard_pointer_thread_entry_array
[
GetCurrentThreadIndex
()];
// Deactivating a non-active hazard pointer entry has no effect!
if
(
!
current_thread_entry
->
IsActive
())
{
return
;
}
else
{
current_thread_entry
->
SetActive
(
false
);
active_hazard_pointer
--
;
}
}
}
template
<
typename
GuardType
>
void
HazardPointer
<
GuardType
>::
GuardPointer
(
int
guardPosition
,
GuardType
guardedElement
)
{
GetHazardPointerElementForCurrentThread
().
GuardPointer
(
guardPosition
,
guardedElement
);
}
template
<
typename
GuardType
>
void
HazardPointer
<
GuardType
>::
EnqueuePointerForDeletion
(
GuardType
guardedElement
)
{
GetHazardPointerElementForCurrentThread
().
AddRetired
(
guardedElement
);
if
(
IsThresholdExceeded
())
{
HazardPointerThreadEntry_t
*
currentHazardPointerEntry
=
&
GetHazardPointerElementForCurrentThread
();
Scan
(
currentHazardPointerEntry
);
// Help deactivated threads to clean their retired nodes.
HelpScan
();
}
}
template
<
typename
GuardType
>
const
double
embb
::
containers
::
internal
::
HazardPointer
<
GuardType
>::
RETIRE_THRESHOLD
=
1
.
25
f
;
}
// namespace internal
}
// namespace internal
}
// namespace containers
}
// namespace containers
}
// namespace embb
}
// namespace embb
...
...
containers_cpp/include/embb/containers/internal/hazard_pointer.h
View file @
579bdb09
...
@@ -40,274 +40,487 @@
...
@@ -40,274 +40,487 @@
#define EMBB_CONTAINERS_CPP_DEPENDANT_TYPENAME typename
#define EMBB_CONTAINERS_CPP_DEPENDANT_TYPENAME typename
#endif
#endif
// forward declaration for white-box test, used in friend declaration of
// HazardPointer class.
namespace
embb
{
namespace
containers
{
namespace
test
{
class
HazardPointerTest2
;
}
}
}
namespace
embb
{
namespace
embb
{
namespace
containers
{
namespace
containers
{
namespace
internal
{
namespace
internal
{
/**
/**
* This class contains a hazard pointer implementation following publication:
* A list with fixed size, implemented as an array. Replaces std::vector that
*
* was used in previous hazard pointer implementation.
* Maged M. Michael. "Hazard pointers: Safe memory reclamation for lock-free
* objects." IEEE Transactions on Parallel and Distributed Systems, 15.6 (2004)
* : 491-504.
*
*
* Hazard pointers are a wait-free memory reclamation scheme for lock-free
* Provides iterators, so we can apply algorithms from the STL.
* algorithms. Loosely speaking, they act as garbage collector. The release of
* objects contained within the memory, managed by the hazard pointer class, is
* intercepted and possibly delayed to avoid concurrency bugs.
*
*
* Before accessing an object, threads announce their intention to do so (i.e.
* \tparam ElementT Type of the elements contained in the list.
* the intention to dereference the respective pointer) to the hazard pointer
*/
* class. This is called guarding. From now on, the hazard pointer class will
template
<
typename
ElementT
>
* prohibit the release or reuse of the guarded object. This is necessary, to
class
FixedSizeList
{
* assure that the object is not released or reused while it is accessed and to
private
:
* assure that it has not unnoticed changed (effectively avoiding the ABA
/**
* problem).
* Capacity of the list
*
*/
* Note that after guarding an object, a consecutive check that the object (i.e.
size_t
max_size
;
* its pointer) is still valid is necessary; the object release could already
* have been started when guarding the object. Guarding is repeated, until this
/**
* check eventually succeeds. Note that this "guard-and-check" loop makes the
* Size of the list
* usage of the hazard pointer class lock-free, even though its implementation
*/
* is wait-free.
size_t
size
;
*
* Internally, guarding is realized by providing each thread slots, where
/**
* pointers can be placed that should not be freed (so called guards). When
* Pointer to the array containing the list
* trying to release an object, it is checked if the object's pointer is
*/
* guarded, and if so this object is not released, but instead put into a
ElementT
*
elementsArray
;
* retired list for later release, when all guards for this object have been
* removed.
/**
*
* Copy constructor not implemented. Would require dynamic memory allocation.
* In contrast to the original implementation, our implementation consumes only
*/
* fixed-size memory. Note that the number of threads accessing the hazard
FixedSizeList
(
* pointer object accounts quadratic for the memory consumption: managed objects
const
FixedSizeList
&
* are provided from outside and the number of accessors accounts quadric for
/**< [IN] Other list */
);
* the minimum count of those objects.
public
:
/**
* Definition of an iterator
*/
typedef
ElementT
*
iterator
;
/**
* Definition of a const iterator
*/
typedef
const
ElementT
*
const_iterator
;
/**
* Constructor, initializes list with given capacity
*/
FixedSizeList
(
size_t
max_size
/**< [IN] Capacity of the list */
);
/**
* Gets the current size of the list
*
* \return Size of the list
*/
inline
size_t
GetSize
()
const
;
/**
* Gets the capacity of the list
*
* \return The capacity of the list
*/
inline
size_t
GetMaxSize
()
const
;
/**
* Removes all elements from the list without changing the capacity
*/
inline
void
clear
();
/**
* Iterator pointing to the first element
*
* \return Begin iterator
*/
iterator
begin
()
const
;
/**
* Iterator pointing beyond the last element
*
* \return End iterator
*/
iterator
end
()
const
;
/**
* Copies the elements of another list to this list. The capacity of
* this list has to be greater than or equal to the size of the other list.
*/
FixedSizeList
&
operator
=
(
const
FixedSizeList
&
other
/**< [IN] Other list */
);
/**
* Appends an element to the end of the list
*
* \return \c false if the operation was not successful because the list is
* full, otherwise \c true.
*/
bool
PushBack
(
ElementT
const
el
/**< [IN] Element to append to the list */
);
/**
* Destructs the list.
*/
~
FixedSizeList
();
};
/**
* Hazard pointer entry for a single thread. Holds the actual guards that
* determine if the current thread is about to use the guarded pointer.
* Guarded pointers are protected and not deleted.
*
*
* Also in contrast to the original implementation, we do not provide a HelpScan
* Moreover, the retired list for this thread is contained. It determines
* functionality, which gives threads the possibility, to not participate in the
* the pointers that have been allocated from this thread, but are not used
* garbage collection anymore: other threads will help to clean-up the objects
* anymore by this thread. However, another thread could have a guard on it,
* protected by the exiting thread. The reason is, that the only use-case would
* so the pointer cannot be deleted immediately.
* be a crashing thread, not participating anymore. However, as the thread has
* to signal its exit himself, this is not possible to realize anyways. In the
* end, it is still guaranteed that all memory is properly returned (in the
* destructor).
*
*
* Additionally, the original implementation holds a threshold, which determines
* For the scan operation, the intersection of the guarded pointers from all
* when objects shall be freed. In this implementation, we free whenever it is
* threads and the retired list has to be computed. For this computation, we
* possibly to do so, as we want to keep the memory footprint as low as
* need thread local temporary lists which are also contained here.
* possible. We also don't see a performance drop in the current algorithms that
* are using hazard pointers, when not using a threshold.
*
*
* \tparam GuardType the type of the guards. Usually the pointer type of some
* \tparam GuardType The type of guard, usually a pointer.
* object to protect.
*/
*/
template
<
typename
GuardType
>
template
<
typename
GuardType
>
class
HazardPointer
{
class
HazardPointerThreadEntry
{
#ifdef EMBB_DEBUG
public
:
embb
::
base
::
Atomic
<
int
>&
GetScanningThread
()
{
return
who_is_scanning
;
}
private
:
embb
::
base
::
Atomic
<
int
>
who_is_scanning
;
#endif
private
:
/**
* Value of the undefined guard (means that no guard is set).
*/
GuardType
undefined_guard
;
/**
* The number of guards per thread. Determines the size of the guard array.
*/
int
guards_per_thread
;
/**
* The capacity of the retired list. It is determined by number of guards,
* retired threshold, and maximum number of threads.
*/
size_t
max_size_retired_list
;
/**
* Set to true if the current thread is active. Is used for a thread to
* signal that it is leaving. If a thread has left, the other threads are
* responsible for cleaning up its retired list.
*/
embb
::
base
::
Atomic
<
bool
>
is_active
;
/**
* The guarded pointer of this thread, has size \c guard_per_thread.
*/
embb
::
base
::
Atomic
<
GuardType
>*
guarded_pointers
;
/**
* The retired list of this thread, contains pointer that shall be released
* when no thread holds a guard on it anymore.
*/
FixedSizeList
<
GuardType
>
retired_list
;
/**
* Temporary retired list, has same capacity as \c retired_list, It is used to
* compute the intersection of all guards and the \c retired list.
*/
FixedSizeList
<
GuardType
>
retired_list_temp
;
/**
* Temporary guards list. Used to compute the intersection of all guards and
* the \c retired_list.
*/
FixedSizeList
<
GuardType
>
hazard_pointer_list_temp
;
/**
* HazardPointerThreadEntry shall not be copied
*/
HazardPointerThreadEntry
(
const
HazardPointerThreadEntry
&
);
/**
* HazardPointerThreadEntry shall not be assigned
*/
HazardPointerThreadEntry
&
operator
=
(
const
HazardPointerThreadEntry
&
);
public
:
public
:
/**
/**
* The user of the hazard pointer class has to provide the memory that is
* Checks if current thread is active (with respect to participating in hazard
* managed here. The user has to take into account, that releasing of memory
* pointer management)
* might be delayed. He has therefore to provide more memory than he wants to
* guarantee at each point in time. More specific, on top of the guaranteed
* count of objects, he has to provide the additional count of objects that
* can be (worst-case) contained in the retired lists and therefore are not
* released yet. The size sum of all retired lists is guardsPerThread *
* accessorCount * accessorCount, which is computed using this function. So
* the result of function denotes to the user, how many objects he has to
* allocate additionally to the guaranteed count.
*
*
* \
waitfree
* \
return \c true if the current thread is active, otherwise \c false.
*/
*/
static
size_t
ComputeMaximumRetiredObjectCount
(
bool
IsActive
();
size_t
guardsPerThread
,
/**<[IN] the count of guards per thread*/
int
accessors
=
-
1
/**<[IN] Number of accessors. Determines, how many threads will access
the hazard pointer object. Default value -1 will allow the
maximum amount of threads as defined with
\c embb::base::Thread::GetThreadsMaxCount()*/
);
/**
/**
* Initializes the hazard pointer object
* Tries to set the active flag to true (atomically). Used if the current
* thread is not active anymore as lock for another thread to help cleaning
* up hazard pointer.
*
*
* \notthreadsafe
* \return \c true if this thread was successful setting the active flag,
* otherwise \c false.
*/
bool
TryReserve
();
/**
* Deactivates current thread by atomically setting active flag to false.
*/
void
Deactivate
();
/**
* Gets the count of current retired pointer for the current thread.
*
*
* \memory We dynamically allocate the following:
* \return Count of current retired pointer
*/
size_t
GetRetiredCounter
();
/**
* Gets the retired list.
*
*
* (sizeof(Atomic<int>) * accessors) + (sizeof(Atomic<GuardType>) *
* \return Reference to \c retired_list
* guards_per_thread * accessors) + (2*sizeof(GuardType) *
*/
* guards_per_thread * accessors^2)
FixedSizeList
<
GuardType
>&
GetRetired
();
/**
* Gets the temporary retired list.
*
*
* The last addend is the dominant one, as accessorCount accounts
* \return Reference to \c retired_list_temp
* quadratically for it.
*/
*/
HazardPointer
(
FixedSizeList
<
GuardType
>&
GetRetiredTemp
();
embb
::
base
::
Function
<
void
,
GuardType
>
free_guard_callback
,
/**<[IN] Callback to the function that shall be called when a retired
/**
guard can be deleted */
* Gets the temporary hazard pointer list.
*
* \return Reference to \c hazard_pointer_list_temp
*/
FixedSizeList
<
GuardType
>&
GetHazardTemp
();
/**
* Sets the retired list.
*/
void
SetRetired
(
embb
::
containers
::
internal
::
FixedSizeList
<
GuardType
>
const
&
retired_list
/**< [IN] Retired list */
);
/**
* Constructor
*/
HazardPointerThreadEntry
(
GuardType
undefined_guard
,
GuardType
undefined_guard
,
/**<
[IN] The guard value denoting "not guarded"
*/
/**<
[IN] Value of the undefined guard (e.g. NULL)
*/
int
guards_per_thread
,
int
guards_per_thread
,
/**<[IN] Number of guards per thread*/
/**< [IN] Number of guards per thread */
int
accessors
=
-
1
size_t
max_size_retired_list
/**<[IN] Number of accessors. Determines, how many threads will access
/**< [IN] The capacity of the retired list(s) */
);
this hazard pointer object. Default value -1 will allow the
maximum amount of threads as defined with
/**
\c embb::base::Thread::GetThreadsMaxCount()*/
* Destructor
);
/**
* Deallocates internal data structures. Additionally releases all objects
* currently held in the retired lists, using the release functor passed in
* the constructor.
*
*
*
\notthreadsafe
*
Deallocate lists
*/
*/
~
HazardPointer
();
~
HazardPointer
ThreadEntry
();
/**
/**
* Guards \c to_guard. If the guarded_element is passed to \c EnqueueForDeletion
* Gets the guard at the specified position.
* it is prevented from release from now on. The user must have a check, that
* Positions are numbered, beginning with 0.
* EnqueueForDeletion has not been called on to_guard, before the guarding took
* effect.
*
* \waitfree
*/
*/
void
Guard
(
GuardType
GetGuard
(
int
guard_position
,
int
pos
/**<[IN] position to place guard*/
/**< [IN] Position of the guard */
)
const
;
GuardType
to_guard
/**<[IN] element to guard*/
);
/**
/**
* Enqueue guarded element for deletion. If not guarded, it is deleted
* Adds pointer to the retired list
* immediately. If it is guarded, it is added to a thread local retired list,
* and deleted in a subsequent call to \c EnqueueForDeletion, when no guard is
* placed on it anymore.
*/
*/
void
EnqueueForDeletion
(
void
AddRetired
(
GuardType
guarded_element
GuardType
pointerToGuard
/**<[IN] element to logically delete*/
/**< [IN] Guard to retire */
);
);
/**
/**
* Explicitly remove guard from thread local slot.
* Guards pointer
*
* \waitfree
*/
*/
void
RemoveGuard
(
int
guard_position
);
void
GuardPointer
(
int
guardNumber
,
/**< [IN] Position of guard */
GuardType
pointerToGuard
/**<[IN] Pointer to guard */
);
/**
* Sets the current thread active, i.e., announce that the thread
* participates in managing hazard pointer.
*/
void
SetActive
(
bool
active
/**<[IN] \c true for active, \c false for inactive */
);
};
/**
* HazardPointer implementation as presented in:
*
* Maged M. Michael. "Hazard pointers: Safe memory reclamation for lock-free
* objects." IEEE Transactions on Parallel and Distributed Systems, 15.6 (2004)
* : 491-504.
*
* In contrast to the original implementation, our implementation only uses
* fixed-size memory. There is a safe upper limit, hazard pointer are guaranteed
* to not consume more memory. Memory is allocated solely at initialization.
*
* Hazard pointers solve the ABA problem for lock-free algorithms. Before
* accessing a pointer, threads announce that they want to access this pointer
* and then check if the pointer is still valid. This announcement is done by
* placing a guard. It is guaranteed that the pointer is not reused until all
* threads remove their guards to this pointer. Objects, these pointers are
* pointing to, can therefore not be deleted directly. Instead, these pointers
* are put into a list for later deletion (retired list). Regularly, this list
* is processed to check which pointers can be deleted. If a pointer can be
* deleted, a callback function provided by the user is called. The user can
* then, e.g., free the respective object, so that the pointer can be safely
* reused.
*/
template
<
typename
GuardType
>
class
HazardPointer
{
private
:
private
:
/**
* Concrete hazard pointer entry type
*/
typedef
HazardPointerThreadEntry
<
GuardType
>
HazardPointerThreadEntry_t
;
/**
* The guard value denoting "not guarding"
*/
GuardType
undefined_guard
;
/**
/**
* HazardPointerTest2 is a white-box test, needing access to private members
* The capacity of the retired list (safe upper bound for retired list size)
* of this class. So declaring it as friend.
*/
*/
friend
class
embb
::
containers
::
test
::
HazardPointerTest2
;
int
retired_list_max_size
;
/**
/**
* This number determines the amount of maximal accessors (threads) that
* Guards that can be set per thread
* will access this hazard pointer instance. Note that a thread once
* accessing this object will be permanently count as accessor, even if not
* participating anymore. If too many threads access this object, an
* exception is thrown.
*/
*/
unsigned
int
max_accessors_count_
;
int
guards_per_thread
;
/**
/**
*
The guard value denoting "not guarded"
*
Array of HazardPointerElements. Each thread is assigned to one.
*/
*/
GuardType
undefined_guard_
;
HazardPointerThreadEntry_t
*
hazard_pointer_thread_entry_array
;
/**
/**
* The maximal count of guards that can be set per thread.
* The threshold, determines at which size of the retired list pointers
* are tried to be deleted.
*/
*/
int
max_guards_per_thread_
;
static
const
double
RETIRE_THRESHOLD
;
/**
/**
* The functor that is called to release an object. This is called by this
* Each thread is assigned a thread index (starting with 0).
* class, when it is safe to do so, i.e., no thread accesses this object
* Get the index of the current thread.
* anymore.
*/
*/
embb
::
base
::
Function
<
void
,
GuardType
>
release_object_callback_
;
static
unsigned
int
GetCurrentThreadIndex
()
;
/**
/**
* Mapping from EMBB thread id to hazard pointer thread ids. Hazard pointer
* The number of hazard pointers currently active.
* thread ids are in range [0;accesor_count-1]. The position of a EMBB thread
* id in that array determines the respective hazard pointer thread id.
*/
*/
embb
::
base
::
Atomic
<
int
>*
thread_id_mapping_
;
size_t
active_hazard_pointer
;
/**
/**
* The hazard pointer guards, represented as array. Each thread has a fixed
* Count of all hazard pointers.
* set of slots (guardsPerThread) within this array.
*/
*/
embb
::
base
::
Atomic
<
GuardType
>*
guards_
;
size_t
hazard_pointers
;
/**
/**
* \see threadLocalRetiredLists documentation
* The callback that is triggered when a retired guard can be
* freed. Usually, the user will call a free here.
*/
*/
GuardType
*
thread_local_retired_lists_temp_
;
embb
::
base
::
Function
<
void
,
GuardType
>
free_guard_callback
;
/**
/**
* A list of lists, represented as single array. Each thread maintains a list
* Checks if the current size of the retired list exceeds the threshold, so
* of retired pointers, that are objects that are logically released but not
* that each retired guard is checked for being not hazardous anymore.
* released because some thread placed a guard on it.
*
* \return \c true is threshold is exceeded, otherwise \c false.
*/
bool
IsThresholdExceeded
();
/**
* Gets the number of hazard pointe, currently active
*
* \return Number of active hazard pointers
*/
size_t
GetActiveHazardPointers
();
/**
* Gets the hazard pointer entry for the current thread
*
* \return Hazard pointer entry for current thread
*/
HazardPointerThreadEntry_t
&
GetHazardPointerElementForCurrentThread
();
/**
* Threads might leave from participating in hazard pointer management.
* This method helps all those threads processing their retired list.
*/
void
HelpScan
();
/**
* Checks the retired list of a hazard pointer entry for elements of the
* retired list that can be freed, and executes the delete callback for those
* elements.
*/
void
Scan
(
HazardPointerThreadEntry_t
*
currentHazardPointerEntry
/**<[IN] Hazard pointer entry that should be checked for elements that
can be deleted*/
);
public
:
/**
* Gets the capacity of one retired list
*
* \waitfree
*/
*/
GuardType
*
thread_local_retired_lists_
;
size_t
GetRetiredListMaxSize
()
const
;
/**
/**
* Each thread is assigned a thread index (starting with 0). Get the index of
* Initializes hazard pointer
* the current thread. Note that this is not the global index, but an hazard
* pointer class internal one. The user is free to define less accessors than
* the amount of default threads. This is useful, as the number of accessors
* accounts quadratic for the memory consumption, so the user should have the
* possibility to avoid memory wastage when only having a small, fixed size,
* number of accessors.
*
*
* @return current (hazard pointer object local) thread index
* \notthreadsafe
*/
*
unsigned
int
GetObjectLocalThreadIndex
();
* \memory
* - Let \c t be the number of maximal threads determined by EMBB
/**
* - Let \c g be the number of guards per thread
* Copy retired list \c sourceList to retired list \c targetList
* - Let \c x be 1.25*t*g + 1
*/
*
static
void
CopyRetiredList
(
* We dynamically allocate \c x*(3*t+1) elements of size \c sizeof(void*).
GuardType
*
source_list
,
*/
/**<[IN] the source retired list*/
HazardPointer
(
GuardType
*
target_list
,
embb
::
base
::
Function
<
void
,
GuardType
>
free_guard_callback
,
/**<[IN] the target retired list*/
/**<[IN] Callback to the function that shall be called when a retired
unsigned
int
single_retired_list_size
,
guard can be deleted */
/**<[IN] the size of a thread local retired list*/
GuardType
undefined_guard
,
GuardType
undefined_guard
/**<[IN] The guard value denoting "not guarded"*/
/**<[IN] the undefined guard (usually the NULL pointer)*/
int
guards_per_thread
);
/**<[IN] Number of guards per thread*/
);
static
void
UpdateRetiredList
(
/**
GuardType
*
retired_list
,
* Deallocates lists for hazard pointer management. Note that no objects
/**<[IN] the old retired list*/
* currently in the retired lists are deleted. This is the responsibility
GuardType
*
updated_retired_list
,
* of the user. Usually, HazardPointer manages pointers of an object pool.
/**<[IN] the updated retired list*/
* After destructing HazardPointer, the object pool is deleted, so that
unsigned
int
retired_list_size
,
* everything is properly cleaned up.
/**<[IN] the size of a thread local retired list*/
*/
GuardType
to_retire
,
~
HazardPointer
();
/**<[IN] the element to retire*/
GuardType
considered_hazard
,
/**
/**<[IN] the currently considered hazard*/
* Announces that the current thread stops participating in hazard pointer
GuardType
undefined_guard
* management. The other threads now take care of his retired list.
/**<[IN] the undefined guard (usually the NULL pointer)*/
*
);
* \waitfree
*/
void
DeactivateCurrentThread
();
/**
* Guards \c guardedElement with the guard at position \c guardPosition
*/
void
GuardPointer
(
int
guardPosition
,
GuardType
guardedElement
);
/**
* Enqueue a pointer for deletion. It is added to the retired list and
* deleted when no thread accesses it anymore.
*/
void
EnqueuePointerForDeletion
(
GuardType
guardedElement
);
};
};
}
// namespace internal
}
// namespace internal
}
// namespace containers
}
// namespace containers
...
...
containers_cpp/include/embb/containers/internal/lock_free_mpmc_queue-inl.h
View file @
579bdb09
...
@@ -77,12 +77,7 @@ LockFreeMPMCQueue<Type, ValuePool>::~LockFreeMPMCQueue() {
...
@@ -77,12 +77,7 @@ LockFreeMPMCQueue<Type, ValuePool>::~LockFreeMPMCQueue() {
template
<
typename
Type
,
typename
ValuePool
>
template
<
typename
Type
,
typename
ValuePool
>
LockFreeMPMCQueue
<
Type
,
ValuePool
>::
LockFreeMPMCQueue
(
size_t
capacity
)
:
LockFreeMPMCQueue
<
Type
,
ValuePool
>::
LockFreeMPMCQueue
(
size_t
capacity
)
:
capacity
(
capacity
),
capacity
(
capacity
),
// Object pool, size with respect to the maximum number of retired nodes not
// eligible for reuse. +1 for dummy node.
objectPool
(
MPMCQueueNodeHazardPointer_t
::
ComputeMaximumRetiredObjectCount
(
2
)
+
capacity
+
1
),
// Disable "this is used in base member initializer" warning.
// Disable "this is used in base member initializer" warning.
// We explicitly want this.
// We explicitly want this.
#ifdef EMBB_PLATFORM_COMPILER_MSVC
#ifdef EMBB_PLATFORM_COMPILER_MSVC
...
@@ -94,7 +89,13 @@ delete_pointer_callback(*this,
...
@@ -94,7 +89,13 @@ delete_pointer_callback(*this,
#ifdef EMBB_PLATFORM_COMPILER_MSVC
#ifdef EMBB_PLATFORM_COMPILER_MSVC
#pragma warning(pop)
#pragma warning(pop)
#endif
#endif
hazardPointer
(
delete_pointer_callback
,
NULL
,
2
)
{
hazardPointer
(
delete_pointer_callback
,
NULL
,
2
),
// Object pool, size with respect to the maximum number of retired nodes not
// eligible for reuse. +1 for dummy node.
objectPool
(
hazardPointer
.
GetRetiredListMaxSize
()
*
embb
::
base
::
Thread
::
GetThreadsMaxCount
()
+
capacity
+
1
)
{
// Allocate dummy node to reduce the number of special cases to consider.
// Allocate dummy node to reduce the number of special cases to consider.
internal
::
LockFreeMPMCQueueNode
<
Type
>*
dummyNode
=
objectPool
.
Allocate
();
internal
::
LockFreeMPMCQueueNode
<
Type
>*
dummyNode
=
objectPool
.
Allocate
();
// Initially, head and tail point to the dummy node.
// Initially, head and tail point to the dummy node.
...
@@ -119,7 +120,7 @@ bool LockFreeMPMCQueue<Type, ValuePool>::TryEnqueue(Type const& element) {
...
@@ -119,7 +120,7 @@ bool LockFreeMPMCQueue<Type, ValuePool>::TryEnqueue(Type const& element) {
for
(;;)
{
for
(;;)
{
my_tail
=
tail
;
my_tail
=
tail
;
hazardPointer
.
Guard
(
0
,
my_tail
);
hazardPointer
.
Guard
Pointer
(
0
,
my_tail
);
// Check if pointer is still valid after guarding.
// Check if pointer is still valid after guarding.
if
(
my_tail
!=
tail
)
{
if
(
my_tail
!=
tail
)
{
...
@@ -162,12 +163,12 @@ bool LockFreeMPMCQueue<Type, ValuePool>::TryDequeue(Type & element) {
...
@@ -162,12 +163,12 @@ bool LockFreeMPMCQueue<Type, ValuePool>::TryDequeue(Type & element) {
Type
data
;
Type
data
;
for
(;;)
{
for
(;;)
{
my_head
=
head
;
my_head
=
head
;
hazardPointer
.
Guard
(
0
,
my_head
);
hazardPointer
.
Guard
Pointer
(
0
,
my_head
);
if
(
my_head
!=
head
)
continue
;
if
(
my_head
!=
head
)
continue
;
my_tail
=
tail
;
my_tail
=
tail
;
my_next
=
my_head
->
GetNext
();
my_next
=
my_head
->
GetNext
();
hazardPointer
.
Guard
(
1
,
my_next
);
hazardPointer
.
Guard
Pointer
(
1
,
my_next
);
if
(
head
!=
my_head
)
continue
;
if
(
head
!=
my_head
)
continue
;
if
(
my_next
==
NULL
)
if
(
my_next
==
NULL
)
...
@@ -186,7 +187,7 @@ bool LockFreeMPMCQueue<Type, ValuePool>::TryDequeue(Type & element) {
...
@@ -186,7 +187,7 @@ bool LockFreeMPMCQueue<Type, ValuePool>::TryDequeue(Type & element) {
break
;
break
;
}
}
hazardPointer
.
EnqueueForDeletion
(
my_head
);
hazardPointer
.
Enqueue
Pointer
ForDeletion
(
my_head
);
element
=
data
;
element
=
data
;
return
true
;
return
true
;
}
}
...
...
containers_cpp/include/embb/containers/internal/lock_free_stack-inl.h
View file @
579bdb09
...
@@ -81,12 +81,13 @@ capacity(capacity),
...
@@ -81,12 +81,13 @@ capacity(capacity),
#ifdef EMBB_PLATFORM_COMPILER_MSVC
#ifdef EMBB_PLATFORM_COMPILER_MSVC
#pragma warning(pop)
#pragma warning(pop)
#endif
#endif
hazardPointer
(
delete_pointer_callback
,
NULL
,
1
),
// Object pool, size with respect to the maximum number of retired nodes not
// Object pool, size with respect to the maximum number of retired nodes not
// eligible for reuse:
// eligible for reuse:
objectPool
(
objectPool
(
StackNodeHazardPointer_t
::
ComputeMaximumRetiredObjectCount
(
1
)
+
hazardPointer
.
GetRetiredListMaxSize
()
*
capacity
),
embb
::
base
::
Thread
::
GetThreadsMaxCount
()
+
hazardPointer
(
delete_pointer_callback
,
NULL
,
1
)
{
capacity
)
{
}
}
template
<
typename
Type
,
typename
ValuePool
>
template
<
typename
Type
,
typename
ValuePool
>
...
@@ -127,7 +128,7 @@ bool LockFreeStack< Type, ValuePool >::TryPop(Type & element) {
...
@@ -127,7 +128,7 @@ bool LockFreeStack< Type, ValuePool >::TryPop(Type & element) {
return
false
;
return
false
;
// Guard top_cached
// Guard top_cached
hazardPointer
.
Guard
(
0
,
top_cached
);
hazardPointer
.
Guard
Pointer
(
0
,
top_cached
);
// Check if top is still top. If this is the case, it has not been
// Check if top is still top. If this is the case, it has not been
// retired yet (because before retiring that thing, the retiring thread
// retired yet (because before retiring that thing, the retiring thread
...
@@ -143,16 +144,16 @@ bool LockFreeStack< Type, ValuePool >::TryPop(Type & element) {
...
@@ -143,16 +144,16 @@ bool LockFreeStack< Type, ValuePool >::TryPop(Type & element) {
break
;
break
;
}
else
{
}
else
{
// We continue with the next and can unguard top_cached
// We continue with the next and can unguard top_cached
hazardPointer
.
Guard
(
0
,
NULL
);
hazardPointer
.
Guard
Pointer
(
0
,
NULL
);
}
}
}
}
Type
data
=
top_cached
->
GetElement
();
Type
data
=
top_cached
->
GetElement
();
// We don't need to read from this reference anymore, unguard it
// We don't need to read from this reference anymore, unguard it
hazardPointer
.
Guard
(
0
,
NULL
);
hazardPointer
.
Guard
Pointer
(
0
,
NULL
);
hazardPointer
.
EnqueueForDeletion
(
top_cached
);
hazardPointer
.
Enqueue
Pointer
ForDeletion
(
top_cached
);
element
=
data
;
element
=
data
;
return
true
;
return
true
;
...
...
containers_cpp/include/embb/containers/internal/lock_free_tree_value_pool-inl.h
View file @
579bdb09
...
@@ -42,7 +42,7 @@ template<typename Type, Type Undefined, class PoolAllocator,
...
@@ -42,7 +42,7 @@ template<typename Type, Type Undefined, class PoolAllocator,
class
TreeAllocator
>
class
TreeAllocator
>
bool
LockFreeTreeValuePool
<
Type
,
Undefined
,
PoolAllocator
,
TreeAllocator
>::
bool
LockFreeTreeValuePool
<
Type
,
Undefined
,
PoolAllocator
,
TreeAllocator
>::
IsLeaf
(
int
node
)
{
IsLeaf
(
int
node
)
{
if
(
node
>=
size
_
-
1
&&
node
<=
2
*
size_
-
1
)
{
if
(
node
>=
size
-
1
&&
node
<=
2
*
size
-
1
)
{
return
true
;
return
true
;
}
}
return
false
;
return
false
;
...
@@ -52,7 +52,7 @@ template<typename Type, Type Undefined, class PoolAllocator,
...
@@ -52,7 +52,7 @@ template<typename Type, Type Undefined, class PoolAllocator,
class
TreeAllocator
>
class
TreeAllocator
>
bool
LockFreeTreeValuePool
<
Type
,
Undefined
,
PoolAllocator
,
TreeAllocator
>::
bool
LockFreeTreeValuePool
<
Type
,
Undefined
,
PoolAllocator
,
TreeAllocator
>::
IsValid
(
int
node
)
{
IsValid
(
int
node
)
{
return
(
node
>=
0
&&
node
<=
2
*
size
_
-
1
);
return
(
node
>=
0
&&
node
<=
2
*
size
-
1
);
}
}
template
<
typename
Type
,
Type
Undefined
,
class
PoolAllocator
,
template
<
typename
Type
,
Type
Undefined
,
class
PoolAllocator
,
...
@@ -77,14 +77,14 @@ template<typename T, T Undefined, class PoolAllocator, class TreeAllocator >
...
@@ -77,14 +77,14 @@ template<typename T, T Undefined, class PoolAllocator, class TreeAllocator >
int
LockFreeTreeValuePool
<
T
,
Undefined
,
PoolAllocator
,
TreeAllocator
>::
int
LockFreeTreeValuePool
<
T
,
Undefined
,
PoolAllocator
,
TreeAllocator
>::
NodeIndexToPoolIndex
(
int
node
)
{
NodeIndexToPoolIndex
(
int
node
)
{
assert
(
IsLeaf
(
node
));
assert
(
IsLeaf
(
node
));
return
(
node
-
(
size
_
-
1
));
return
(
node
-
(
size
-
1
));
}
}
template
<
typename
Type
,
Type
Undefined
,
class
PoolAllocator
,
template
<
typename
Type
,
Type
Undefined
,
class
PoolAllocator
,
class
TreeAllocator
>
class
TreeAllocator
>
int
LockFreeTreeValuePool
<
Type
,
Undefined
,
PoolAllocator
,
TreeAllocator
>::
int
LockFreeTreeValuePool
<
Type
,
Undefined
,
PoolAllocator
,
TreeAllocator
>::
PoolIndexToNodeIndex
(
int
index
)
{
PoolIndexToNodeIndex
(
int
index
)
{
int
node
=
index
+
(
size
_
-
1
);
int
node
=
index
+
(
size
-
1
);
assert
(
IsLeaf
(
node
));
assert
(
IsLeaf
(
node
));
return
node
;
return
node
;
}
}
...
@@ -100,7 +100,7 @@ template<typename T, T Undefined, class PoolAllocator, class TreeAllocator >
...
@@ -100,7 +100,7 @@ template<typename T, T Undefined, class PoolAllocator, class TreeAllocator >
int
LockFreeTreeValuePool
<
T
,
Undefined
,
PoolAllocator
,
TreeAllocator
>::
int
LockFreeTreeValuePool
<
T
,
Undefined
,
PoolAllocator
,
TreeAllocator
>::
GetParentNode
(
int
node
)
{
GetParentNode
(
int
node
)
{
int
parent
=
(
node
-
1
)
/
2
;
int
parent
=
(
node
-
1
)
/
2
;
assert
(
parent
>=
0
&&
parent
<
size
_
-
1
);
assert
(
parent
>=
0
&&
parent
<
size
-
1
);
return
parent
;
return
parent
;
}
}
...
@@ -112,11 +112,11 @@ allocate_rec(int node, Type& element) {
...
@@ -112,11 +112,11 @@ allocate_rec(int node, Type& element) {
if
(
IsLeaf
(
node
))
{
if
(
IsLeaf
(
node
))
{
int
pool_index
=
NodeIndexToPoolIndex
(
node
);
int
pool_index
=
NodeIndexToPoolIndex
(
node
);
Type
expected
=
pool
_
[
pool_index
];
Type
expected
=
pool
[
pool_index
];
if
(
expected
==
Undefined
)
if
(
expected
==
Undefined
)
return
-
1
;
return
-
1
;
if
(
pool
_
[
pool_index
].
CompareAndSwap
(
expected
,
Undefined
))
{
if
(
pool
[
pool_index
].
CompareAndSwap
(
expected
,
Undefined
))
{
element
=
expected
;
element
=
expected
;
return
pool_index
;
return
pool_index
;
}
}
...
@@ -131,11 +131,11 @@ allocate_rec(int node, Type& element) {
...
@@ -131,11 +131,11 @@ allocate_rec(int node, Type& element) {
// atomically decrement the value in the node if the result is greater than
// atomically decrement the value in the node if the result is greater than
// or equal to zero. This cannot be done atomically.
// or equal to zero. This cannot be done atomically.
do
{
do
{
current
=
tree
_
[
node
];
current
=
tree
[
node
];
desired
=
current
-
1
;
desired
=
current
-
1
;
if
(
desired
<
0
)
if
(
desired
<
0
)
return
-
1
;
return
-
1
;
}
while
(
!
tree
_
[
node
].
CompareAndSwap
(
current
,
desired
));
}
while
(
!
tree
[
node
].
CompareAndSwap
(
current
,
desired
));
int
leftResult
=
allocate_rec
(
GetLeftChildIndex
(
node
),
element
);
int
leftResult
=
allocate_rec
(
GetLeftChildIndex
(
node
),
element
);
if
(
leftResult
!=
-
1
)
{
if
(
leftResult
!=
-
1
)
{
...
@@ -156,7 +156,7 @@ Fill(int node, int elementsToStore, int power2Value) {
...
@@ -156,7 +156,7 @@ Fill(int node, int elementsToStore, int power2Value) {
if
(
IsLeaf
(
node
))
if
(
IsLeaf
(
node
))
return
;
return
;
tree
_
[
node
]
=
elementsToStore
;
tree
[
node
]
=
elementsToStore
;
int
postPower2Value
=
power2Value
>>
1
;
int
postPower2Value
=
power2Value
>>
1
;
...
@@ -188,14 +188,14 @@ Free(Type element, int index) {
...
@@ -188,14 +188,14 @@ Free(Type element, int index) {
assert
(
element
!=
Undefined
);
assert
(
element
!=
Undefined
);
// Put the element back
// Put the element back
pool
_
[
index
].
Store
(
element
);
pool
[
index
].
Store
(
element
);
assert
(
index
>=
0
&&
index
<
size
_
);
assert
(
index
>=
0
&&
index
<
size
);
int
node
=
PoolIndexToNodeIndex
(
index
);
int
node
=
PoolIndexToNodeIndex
(
index
);
while
(
!
IsRoot
(
node
))
{
while
(
!
IsRoot
(
node
))
{
node
=
GetParentNode
(
node
);
node
=
GetParentNode
(
node
);
tree
_
[
node
].
FetchAndAdd
(
1
);
tree
[
node
].
FetchAndAdd
(
1
);
}
}
}
}
...
@@ -205,76 +205,37 @@ template< typename ForwardIterator >
...
@@ -205,76 +205,37 @@ template< typename ForwardIterator >
LockFreeTreeValuePool
<
Type
,
Undefined
,
PoolAllocator
,
TreeAllocator
>::
LockFreeTreeValuePool
<
Type
,
Undefined
,
PoolAllocator
,
TreeAllocator
>::
LockFreeTreeValuePool
(
ForwardIterator
first
,
ForwardIterator
last
)
{
LockFreeTreeValuePool
(
ForwardIterator
first
,
ForwardIterator
last
)
{
// Number of elements to store
// Number of elements to store
real_size
_
=
static_cast
<
int
>
(
::
std
::
distance
(
first
,
last
));
real_size
=
static_cast
<
int
>
(
::
std
::
distance
(
first
,
last
));
// Let k be smallest number so that real_size <= 2^k, size = 2^k
// Let k be smallest number so that real_size <= 2^k, size = 2^k
size
_
=
GetSmallestPowerByTwoValue
(
real_size_
);
size
=
GetSmallestPowerByTwoValue
(
real_size
);
// Size of binary tree without the leaves
// Size of binary tree without the leaves
tree_size_
=
size_
-
1
;
tree_size
=
size
-
1
;
// make sure, signed values are not negative
assert
(
tree_size_
>=
0
);
assert
(
real_size_
>=
0
);
size_t
tree_size_unsigned
=
static_cast
<
size_t
>
(
tree_size_
);
size_t
real_size_unsigned
=
static_cast
<
size_t
>
(
real_size_
);
// Pool stores elements of type T
// Pool stores elements of type T
pool_
=
pool_allocator_
.
allocate
(
real_size_unsigned
);
pool
=
poolAllocator
.
allocate
(
static_cast
<
size_t
>
(
real_size
));
// invoke inplace new for each pool element
for
(
size_t
i
=
0
;
i
!=
real_size_unsigned
;
++
i
)
{
new
(
&
pool_
[
i
])
embb
::
base
::
Atomic
<
Type
>
();
}
// Tree holds the counter of not allocated elements
// Tree holds the counter of not allocated elements
tree_
=
tree_allocator_
.
allocate
(
tree_size_unsigned
);
tree
=
treeAllocator
.
allocate
(
static_cast
<
size_t
>
(
tree_size
));
// invoke inplace new for each tree element
for
(
size_t
i
=
0
;
i
!=
tree_size_unsigned
;
++
i
)
{
new
(
&
tree_
[
i
])
embb
::
base
::
Atomic
<
int
>
();
}
int
i
=
0
;
int
i
=
0
;
// Store the elements from the range
// Store the elements from the range
for
(
ForwardIterator
curIter
(
first
);
curIter
!=
last
;
++
curIter
)
{
for
(
ForwardIterator
curIter
(
first
);
curIter
!=
last
;
++
curIter
)
{
pool
_
[
i
++
]
=
*
curIter
;
pool
[
i
++
]
=
*
curIter
;
}
}
// Initialize the binary tree without leaves (counters)
// Initialize the binary tree without leaves (counters)
Fill
(
0
,
static_cast
<
int
>
(
::
std
::
distance
(
first
,
last
)),
size
_
);
Fill
(
0
,
static_cast
<
int
>
(
::
std
::
distance
(
first
,
last
)),
size
);
}
}
template
<
typename
Type
,
Type
Undefined
,
class
PoolAllocator
,
template
<
typename
Type
,
Type
Undefined
,
class
PoolAllocator
,
class
TreeAllocator
>
class
TreeAllocator
>
LockFreeTreeValuePool
<
Type
,
Undefined
,
PoolAllocator
,
TreeAllocator
>::
LockFreeTreeValuePool
<
Type
,
Undefined
,
PoolAllocator
,
TreeAllocator
>::
~
LockFreeTreeValuePool
()
{
~
LockFreeTreeValuePool
()
{
size_t
tree_size_unsigned
=
static_cast
<
size_t
>
(
tree_size_
);
poolAllocator
.
deallocate
(
pool
,
static_cast
<
size_t
>
(
real_size
));
size_t
real_size_unsigned
=
static_cast
<
size_t
>
(
real_size_
);
treeAllocator
.
deallocate
(
tree
,
static_cast
<
size_t
>
(
tree_size
));
// invoke destructor for each pool element
for
(
size_t
i
=
0
;
i
!=
real_size_unsigned
;
++
i
)
{
pool_
[
i
].
~
Atomic
();
}
pool_allocator_
.
deallocate
(
pool_
,
real_size_unsigned
);
// invoke destructor for each tree element
for
(
size_t
i
=
0
;
i
!=
tree_size_unsigned
;
++
i
)
{
tree_
[
i
].
~
Atomic
();
}
tree_allocator_
.
deallocate
(
tree_
,
tree_size_unsigned
);
}
template
<
typename
Type
,
Type
Undefined
,
class
PoolAllocator
,
class
TreeAllocator
>
size_t
LockFreeTreeValuePool
<
Type
,
Undefined
,
PoolAllocator
,
TreeAllocator
>::
GetMinimumElementCountForGuaranteedCapacity
(
size_t
capacity
)
{
// for this value pool, this is just capacity...
return
capacity
;
}
}
}
// namespace containers
}
// namespace containers
...
...
containers_cpp/include/embb/containers/internal/object_pool-inl.h
View file @
579bdb09
...
@@ -83,8 +83,7 @@ ReturningTrueIterator::operator!=(const self_type& rhs) {
...
@@ -83,8 +83,7 @@ ReturningTrueIterator::operator!=(const self_type& rhs) {
template
<
class
Type
,
typename
ValuePool
,
class
ObjectAllocator
>
template
<
class
Type
,
typename
ValuePool
,
class
ObjectAllocator
>
bool
ObjectPool
<
Type
,
ValuePool
,
ObjectAllocator
>::
bool
ObjectPool
<
Type
,
ValuePool
,
ObjectAllocator
>::
IsContained
(
const
Type
&
obj
)
const
{
IsContained
(
const
Type
&
obj
)
const
{
if
((
&
obj
<
&
objects_array_
[
0
])
||
if
((
&
obj
<
&
objects
[
0
])
||
(
&
obj
>
&
objects
[
capacity
-
1
]))
{
(
&
obj
>
&
objects_array_
[
value_pool_size_
-
1
]))
{
return
false
;
return
false
;
}
else
{
}
else
{
return
true
;
return
true
;
...
@@ -95,17 +94,17 @@ template<class Type, typename ValuePool, class ObjectAllocator>
...
@@ -95,17 +94,17 @@ template<class Type, typename ValuePool, class ObjectAllocator>
int
ObjectPool
<
Type
,
ValuePool
,
ObjectAllocator
>::
int
ObjectPool
<
Type
,
ValuePool
,
ObjectAllocator
>::
GetIndexOfObject
(
const
Type
&
obj
)
const
{
GetIndexOfObject
(
const
Type
&
obj
)
const
{
assert
(
IsContained
(
obj
));
assert
(
IsContained
(
obj
));
return
(
static_cast
<
int
>
(
&
obj
-
&
objects
_array_
[
0
]));
return
(
static_cast
<
int
>
(
&
obj
-
&
objects
[
0
]));
}
}
template
<
class
Type
,
typename
ValuePool
,
class
ObjectAllocator
>
template
<
class
Type
,
typename
ValuePool
,
class
ObjectAllocator
>
Type
*
ObjectPool
<
Type
,
ValuePool
,
ObjectAllocator
>::
AllocateRaw
()
{
Type
*
ObjectPool
<
Type
,
ValuePool
,
ObjectAllocator
>::
AllocateRaw
()
{
bool
val
;
bool
val
;
int
allocated_index
=
value_pool_
.
Allocate
(
val
);
int
allocated_index
=
p
.
Allocate
(
val
);
if
(
allocated_index
==
-
1
)
{
if
(
allocated_index
==
-
1
)
{
return
NULL
;
return
NULL
;
}
else
{
}
else
{
Type
*
ret_pointer
=
&
(
objects
_array_
[
allocated_index
]);
Type
*
ret_pointer
=
&
(
objects
[
allocated_index
]);
return
ret_pointer
;
return
ret_pointer
;
}
}
...
@@ -113,17 +112,15 @@ Type* ObjectPool<Type, ValuePool, ObjectAllocator>::AllocateRaw() {
...
@@ -113,17 +112,15 @@ Type* ObjectPool<Type, ValuePool, ObjectAllocator>::AllocateRaw() {
template
<
class
Type
,
typename
ValuePool
,
class
ObjectAllocator
>
template
<
class
Type
,
typename
ValuePool
,
class
ObjectAllocator
>
size_t
ObjectPool
<
Type
,
ValuePool
,
ObjectAllocator
>::
GetCapacity
()
{
size_t
ObjectPool
<
Type
,
ValuePool
,
ObjectAllocator
>::
GetCapacity
()
{
return
capacity
_
;
return
capacity
;
}
}
template
<
class
Type
,
typename
ValuePool
,
class
ObjectAllocator
>
template
<
class
Type
,
typename
ValuePool
,
class
ObjectAllocator
>
ObjectPool
<
Type
,
ValuePool
,
ObjectAllocator
>::
ObjectPool
(
size_t
capacity
)
:
ObjectPool
<
Type
,
ValuePool
,
ObjectAllocator
>::
ObjectPool
(
size_t
capacity
)
:
capacity_
(
capacity
),
capacity
(
capacity
),
value_pool_size_
(
p
(
ReturningTrueIterator
(
0
),
ReturningTrueIterator
(
capacity
))
{
ValuePool
::
GetMinimumElementCountForGuaranteedCapacity
(
capacity
)),
// Allocate the objects (without construction, just get the memory)
value_pool_
(
ReturningTrueIterator
(
0
),
ReturningTrueIterator
(
objects
=
objectAllocator
.
allocate
(
capacity
);
value_pool_size_
)),
objects_array_
(
object_allocator_
.
allocate
(
value_pool_size_
))
{
}
}
template
<
class
Type
,
typename
ValuePool
,
class
ObjectAllocator
>
template
<
class
Type
,
typename
ValuePool
,
class
ObjectAllocator
>
...
@@ -131,7 +128,7 @@ void ObjectPool<Type, ValuePool, ObjectAllocator>::Free(Type* obj) {
...
@@ -131,7 +128,7 @@ void ObjectPool<Type, ValuePool, ObjectAllocator>::Free(Type* obj) {
int
index
=
GetIndexOfObject
(
*
obj
);
int
index
=
GetIndexOfObject
(
*
obj
);
obj
->~
Type
();
obj
->~
Type
();
value_pool_
.
Free
(
true
,
index
);
p
.
Free
(
true
,
index
);
}
}
template
<
class
Type
,
typename
ValuePool
,
class
ObjectAllocator
>
template
<
class
Type
,
typename
ValuePool
,
class
ObjectAllocator
>
...
@@ -192,7 +189,7 @@ Type* ObjectPool<Type, ValuePool, ObjectAllocator>::Allocate(
...
@@ -192,7 +189,7 @@ Type* ObjectPool<Type, ValuePool, ObjectAllocator>::Allocate(
template
<
class
Type
,
typename
ValuePool
,
class
ObjectAllocator
>
template
<
class
Type
,
typename
ValuePool
,
class
ObjectAllocator
>
ObjectPool
<
Type
,
ValuePool
,
ObjectAllocator
>::~
ObjectPool
()
{
ObjectPool
<
Type
,
ValuePool
,
ObjectAllocator
>::~
ObjectPool
()
{
// Deallocate the objects
// Deallocate the objects
object
_allocator_
.
deallocate
(
objects_array_
,
value_pool_size_
);
object
Allocator
.
deallocate
(
objects
,
capacity
);
}
}
}
// namespace containers
}
// namespace containers
}
// namespace embb
}
// namespace embb
...
...
containers_cpp/include/embb/containers/internal/wait_free_array_value_pool-inl.h
View file @
579bdb09
...
@@ -35,21 +35,21 @@ Free(Type element, int index) {
...
@@ -35,21 +35,21 @@ Free(Type element, int index) {
assert
(
element
!=
Undefined
);
assert
(
element
!=
Undefined
);
// Just put back the element
// Just put back the element
pool
_array_
[
index
].
Store
(
element
);
pool
[
index
].
Store
(
element
);
}
}
template
<
typename
Type
,
Type
Undefined
,
class
Allocator
>
template
<
typename
Type
,
Type
Undefined
,
class
Allocator
>
int
WaitFreeArrayValuePool
<
Type
,
Undefined
,
Allocator
>::
int
WaitFreeArrayValuePool
<
Type
,
Undefined
,
Allocator
>::
Allocate
(
Type
&
element
)
{
Allocate
(
Type
&
element
)
{
for
(
int
i
=
0
;
i
!=
size
_
;
++
i
)
{
for
(
int
i
=
0
;
i
!=
size
;
++
i
)
{
Type
expected
;
Type
expected
;
// If the memory cell is not available, go ahead
// If the memory cell is not available, go ahead
if
(
Undefined
==
(
expected
=
pool
_array_
[
i
].
Load
()))
if
(
Undefined
==
(
expected
=
pool
[
i
].
Load
()))
continue
;
continue
;
// Try to get the memory cell
// Try to get the memory cell
if
(
pool
_array_
[
i
].
CompareAndSwap
(
expected
,
Undefined
))
{
if
(
pool
[
i
].
CompareAndSwap
(
expected
,
Undefined
))
{
// When the CAS was successful, this element is ours
// When the CAS was successful, this element is ours
element
=
expected
;
element
=
expected
;
return
i
;
return
i
;
...
@@ -64,45 +64,23 @@ WaitFreeArrayValuePool<Type, Undefined, Allocator>::
...
@@ -64,45 +64,23 @@ WaitFreeArrayValuePool<Type, Undefined, Allocator>::
WaitFreeArrayValuePool
(
ForwardIterator
first
,
ForwardIterator
last
)
{
WaitFreeArrayValuePool
(
ForwardIterator
first
,
ForwardIterator
last
)
{
size_t
dist
=
static_cast
<
size_t
>
(
std
::
distance
(
first
,
last
));
size_t
dist
=
static_cast
<
size_t
>
(
std
::
distance
(
first
,
last
));
size_
=
static_cast
<
int
>
(
dist
);
size
=
static_cast
<
int
>
(
dist
);
// conversion may result in negative number. check!
assert
(
size_
>=
0
);
// Use the allocator to allocate an array of size dist
// Use the allocator to allocate an array of size dist
pool_array_
=
allocator_
.
allocate
(
dist
);
pool
=
allocator
.
allocate
(
dist
);
// invoke inplace new for each pool element
for
(
size_t
i
=
0
;
i
!=
dist
;
++
i
)
{
new
(
&
pool_array_
[
i
])
embb
::
base
::
Atomic
<
Type
>
();
}
int
i
=
0
;
int
i
=
0
;
// Store the elements of the range
// Store the elements of the range
for
(
ForwardIterator
curIter
(
first
);
curIter
!=
last
;
++
curIter
)
{
for
(
ForwardIterator
curIter
(
first
);
curIter
!=
last
;
++
curIter
)
{
pool
_array_
[
i
++
]
=
*
curIter
;
pool
[
i
++
]
=
*
curIter
;
}
}
}
}
template
<
typename
Type
,
Type
Undefined
,
class
Allocator
>
template
<
typename
Type
,
Type
Undefined
,
class
Allocator
>
WaitFreeArrayValuePool
<
Type
,
Undefined
,
Allocator
>::~
WaitFreeArrayValuePool
()
{
WaitFreeArrayValuePool
<
Type
,
Undefined
,
Allocator
>::~
WaitFreeArrayValuePool
()
{
// invoke destructor for each pool element
allocator
.
deallocate
(
pool
,
(
size_t
)
size
);
for
(
int
i
=
0
;
i
!=
size_
;
++
i
)
{
pool_array_
[
i
].
~
Atomic
();
}
// free memory
allocator_
.
deallocate
(
pool_array_
,
static_cast
<
size_t
>
(
size_
));
}
}
template
<
typename
Type
,
Type
Undefined
,
class
Allocator
>
size_t
WaitFreeArrayValuePool
<
Type
,
Undefined
,
Allocator
>::
GetMinimumElementCountForGuaranteedCapacity
(
size_t
capacity
)
{
// for this value pool, this is just capacity...
return
capacity
;
}
}
// namespace containers
}
// namespace containers
}
// namespace embb
}
// namespace embb
...
...
containers_cpp/include/embb/containers/lock_free_mpmc_queue.h
View file @
579bdb09
...
@@ -113,17 +113,8 @@ class LockFreeMPMCQueue {
...
@@ -113,17 +113,8 @@ class LockFreeMPMCQueue {
* least as many elements, maybe more.
* least as many elements, maybe more.
*/
*/
size_t
capacity
;
size_t
capacity
;
// Do not change the ordering of class local variables.
/**
// Important for initialization.
* The object pool, used for lock-free memory allocation.
*
* Warning: the objectPool has to be initialized before the hazardPointer
* object, to be sure that the hazardPointer object is destructed before the
* Pool as the hazardPointer object might return elements to the pool in its
* destructor. So the ordering of the members objectPool and hazardPointer is
* important here!
*/
ObjectPool
<
internal
::
LockFreeMPMCQueueNode
<
Type
>
,
ValuePool
>
objectPool
;
/**
/**
* Callback to the method that is called by hazard pointers if a pointer is
* Callback to the method that is called by hazard pointers if a pointer is
...
@@ -133,17 +124,15 @@ class LockFreeMPMCQueue {
...
@@ -133,17 +124,15 @@ class LockFreeMPMCQueue {
delete_pointer_callback
;
delete_pointer_callback
;
/**
/**
*
Definition of the used hazard pointer type
*
The hazard pointer object, used for memory management.
*/
*/
typedef
embb
::
containers
::
internal
::
HazardPointer
embb
::
containers
::
internal
::
HazardPointer
<
internal
::
LockFreeMPMCQueueNode
<
Type
>*
>
<
internal
::
LockFreeMPMCQueueNode
<
Type
>*
>
hazardPointer
;
MPMCQueueNodeHazardPointer_t
;
/**
/**
* The
hazard pointer object, used for memory management
.
* The
object pool, used for lock-free memory allocation
.
*/
*/
MPMCQueueNodeHazardPointer_t
hazardPointer
;
ObjectPool
<
internal
::
LockFreeMPMCQueueNode
<
Type
>
,
ValuePool
>
objectPool
;
/**
/**
* Atomic pointer to the head node of the queue
* Atomic pointer to the head node of the queue
...
...
containers_cpp/include/embb/containers/lock_free_stack.h
View file @
579bdb09
...
@@ -187,6 +187,11 @@ class LockFreeStack {
...
@@ -187,6 +187,11 @@ class LockFreeStack {
delete_pointer_callback
;
delete_pointer_callback
;
/**
/**
* The hazard pointer object, used for memory management.
*/
internal
::
HazardPointer
<
internal
::
LockFreeStackNode
<
Type
>*>
hazardPointer
;
/**
* The callback function, used to cleanup non-hazardous pointers.
* The callback function, used to cleanup non-hazardous pointers.
* \see delete_pointer_callback
* \see delete_pointer_callback
*/
*/
...
@@ -194,27 +199,10 @@ class LockFreeStack {
...
@@ -194,27 +199,10 @@ class LockFreeStack {
/**
/**
* The object pool, used for lock-free memory allocation.
* The object pool, used for lock-free memory allocation.
*
* Warning: the objectPool has to be initialized before the hazardPointer
* object, to be sure that the hazardPointer object is destructed before the
* Pool as the hazardPointer object might return elements to the pool in its
* destructor. So the ordering of the members objectPool and hazardPointer is
* important here!
*/
*/
ObjectPool
<
internal
::
LockFreeStackNode
<
Type
>
,
ValuePool
>
objectPool
;
ObjectPool
<
internal
::
LockFreeStackNode
<
Type
>
,
ValuePool
>
objectPool
;
/**
/**
* Definition of the used hazard pointer type
*/
typedef
internal
::
HazardPointer
<
internal
::
LockFreeStackNode
<
Type
>*
>
StackNodeHazardPointer_t
;
/**
* The hazard pointer object, used for memory management.
*/
StackNodeHazardPointer_t
hazardPointer
;
/**
* Atomic pointer to the top node of the stack (element that is popped next)
* Atomic pointer to the top node of the stack (element that is popped next)
*/
*/
embb
::
base
::
Atomic
<
internal
::
LockFreeStackNode
<
Type
>*>
top
;
embb
::
base
::
Atomic
<
internal
::
LockFreeStackNode
<
Type
>*>
top
;
...
...
containers_cpp/include/embb/containers/lock_free_tree_value_pool.h
View file @
579bdb09
...
@@ -123,25 +123,22 @@ class LockFreeTreeValuePool {
...
@@ -123,25 +123,22 @@ class LockFreeTreeValuePool {
LockFreeTreeValuePool
&
operator
=
(
const
LockFreeTreeValuePool
&
);
LockFreeTreeValuePool
&
operator
=
(
const
LockFreeTreeValuePool
&
);
// See algorithm description above
// See algorithm description above
int
size
_
;
int
size
;
// See algorithm description above
// See algorithm description above
int
tree_size
_
;
int
tree_size
;
// See algorithm description above
// See algorithm description above
int
real_size
_
;
int
real_size
;
// The tree above the pool
// The tree above the pool
embb
::
base
::
Atomic
<
int
>*
tree
_
;
embb
::
base
::
Atomic
<
int
>*
tree
;
// The actual pool
// The actual pool
embb
::
base
::
Atomic
<
Type
>*
pool
_
;
embb
::
base
::
Atomic
<
Type
>*
pool
;
// respective allocator
PoolAllocator
poolAllocator
;
PoolAllocator
pool_allocator_
;
TreeAllocator
treeAllocator
;
// respective allocator
TreeAllocator
tree_allocator_
;
/**
/**
* Computes smallest power of two fitting the specified value
* Computes smallest power of two fitting the specified value
...
@@ -281,18 +278,6 @@ class LockFreeTreeValuePool {
...
@@ -281,18 +278,6 @@ class LockFreeTreeValuePool {
);
);
/**
/**
* Due to concurrency effects, a pool might provide less elements than managed
* by it. However, usually one wants to guarantee a minimal capacity. The
* count of elements, that must be given to the pool when to guarantee \c
* capacity elements is computed using this function.
*
* \return count of indices the pool has to be initialized with
*/
static
size_t
GetMinimumElementCountForGuaranteedCapacity
(
size_t
capacity
/**< [IN] count of indices that shall be guaranteed */
);
/**
* Destructs the pool.
* Destructs the pool.
*
*
* \notthreadsafe
* \notthreadsafe
...
...
containers_cpp/include/embb/containers/object_pool.h
View file @
579bdb09
...
@@ -35,6 +35,7 @@
...
@@ -35,6 +35,7 @@
namespace
embb
{
namespace
embb
{
namespace
containers
{
namespace
containers
{
/**
/**
* \defgroup CPP_CONTAINERS_POOLS Pools
* \defgroup CPP_CONTAINERS_POOLS Pools
* Concurrent pools
* Concurrent pools
...
@@ -61,29 +62,22 @@ class ObjectPool {
...
@@ -61,29 +62,22 @@ class ObjectPool {
/**
/**
* Allocator used to allocate elements of the object pool
* Allocator used to allocate elements of the object pool
*/
*/
ObjectAllocator
object
_allocator_
;
ObjectAllocator
object
Allocator
;
/**
/**
*
Capacity of the object pool
*
Array holding the allocated object
*/
*/
size_t
capacity_
;
Type
*
objects
;
/**
/**
* The size of the underlying value pool. This is also the size of the object
* Capacity of the object pool
* array in this class. It is assumed, that the valuepool manages indices in
* range [0;value_pool_size_-1].
*/
*/
size_t
value_pool_size_
;
size_t
capacity
;
/**
/**
* Underlying value pool
* Underlying value pool
*/
*/
ValuePool
value_pool_
;
ValuePool
p
;
/**
* Array holding the allocated object
*/
Type
*
objects_array_
;
/**
/**
* Helper providing a virtual iterator that just returns true in each
* Helper providing a virtual iterator that just returns true in each
...
...
containers_cpp/include/embb/containers/wait_free_array_value_pool.h
View file @
579bdb09
...
@@ -39,30 +39,12 @@ namespace containers {
...
@@ -39,30 +39,12 @@ namespace containers {
* \ingroup CPP_CONCEPT
* \ingroup CPP_CONCEPT
* \{
* \{
* \par Description
* \par Description
* A value pool is a multi-set of elements, where each element has a unique,
* A value pool is a fixed-size multiset of elements, where each element has a
* continuous (starting with 0) index. The elements cannot be modified and are
* unique index. The elements cannot be modified and are given at construction
* given at construction time by providing first/last iterators.
* time (by providing first/last iterators). A value pool provides two
*
* operations: \c Allocate and \c Free. \c Allocate removes an element from the
* \par
* pool, and \c Free returns an element to the pool. It is only allowed to
* A value pool provides two primary operations: \c Allocate and \c Free. \c
* free elements that have previously been allocated.
* Allocate allocates an element/index "pair" (index via return, element via
* reference parameter) from the pool, and \c Free returns an element/index pair
* to the pool. To guarantee linearizability, \c element is not allowed to be
* modified between \c Allocate and \c Free. It is only allowed to free elements
* that have previously been allocated. The \c Allocate function does not
* guarantee an order on which indices are allocated. The count of elements that
* can be allocated with \c Allocate might be smaller than the count of
* elements, the pool is initialized with. This might be because of
* implementation details and respective concurrency effects: for example, if
* indices are managed within a queue, one has to protect queue elements from
* concurrency effects (reuse and access). As long as a thread potentially
* accesses a node (and with that an index), the respective index cannot not be
* given out to the user, even if being logically not part of the pool anymore.
* However, the user might want to guarantee a certain amount of indices to the
* user. Therefore, the static \c GetMinimumElementCountForGuaranteedCapacity
* method is used. The user passes the count of indices to this method, that
* shall be guaranteed by the pool. The method returns the count on indices, the
* pool has to be initialized with in order to guarantee this count on indices.
*
*
* \par Requirements
* \par Requirements
* - Let \c Pool be the pool class
* - Let \c Pool be the pool class
...
@@ -72,7 +54,6 @@ namespace containers {
...
@@ -72,7 +54,6 @@ namespace containers {
* - Let \c i, j be forward iterators supporting \c std::distance.
* - Let \c i, j be forward iterators supporting \c std::distance.
* - Let \c c be an object of type \c Type&
* - Let \c c be an object of type \c Type&
* - Let \c e be a value of type \c int
* - Let \c e be a value of type \c int
* - Let \c f be a value of type \c int
*
*
* \par Valid Expressions
* \par Valid Expressions
*
*
...
@@ -91,7 +72,7 @@ namespace containers {
...
@@ -91,7 +72,7 @@ namespace containers {
* the bottom element. The bottom element cannot be stored in the pool, it
* the bottom element. The bottom element cannot be stored in the pool, it
* is exclusively used to mark empty cells. The pool initially contains
* is exclusively used to mark empty cells. The pool initially contains
* \c std::distance(i, j) elements which are copied during construction from
* \c std::distance(i, j) elements which are copied during construction from
* the range \c [i, j
]
. A concrete class satisfying the value pool concept
* the range \c [i, j
)
. A concrete class satisfying the value pool concept
* might provide additional template parameters for specifying allocators.
* might provide additional template parameters for specifying allocators.
* </td>
* </td>
* </tr>
* </tr>
...
@@ -99,10 +80,9 @@ namespace containers {
...
@@ -99,10 +80,9 @@ namespace containers {
* <td>\code{.cpp} Allocate(c) \endcode</td>
* <td>\code{.cpp} Allocate(c) \endcode</td>
* <td>\c int</td>
* <td>\c int</td>
* <td>
* <td>
* Allocates an element/index "pair" from the pool. Returns -1, if no
* Gets an element from the pool. Returns -1, if no element is available,
* element is available, i.e., the pool is empty. Otherwise, returns the
* i.e., the pool is empty. Otherwise, returns the index of the element in
* index of the element in the pool. The value of the pool element is
* the pool. The value of the pool element is written into reference \c c.
* written into parameter reference \c c.
* </td>
* </td>
* </tr>
* </tr>
* <tr>
* <tr>
...
@@ -113,15 +93,6 @@ namespace containers {
...
@@ -113,15 +93,6 @@ namespace containers {
* \c Allocate. For each allocated element, \c Free must be called exactly
* \c Allocate. For each allocated element, \c Free must be called exactly
* once.</td>
* once.</td>
* </tr>
* </tr>
* <tr>
* <td>\code{.cpp} GetMinimumElementCountForGuaranteedCapacity(f)
* \endcode</td>
* <td>\c void</td>
* <td>Static method, returns the count of indices, the user has to
* initialize the pool with in order to guarantee a count of \c f elements
* (irrespective of concurrency effects).
* </td>
* </tr>
* </table>
* </table>
*
*
* \}
* \}
...
@@ -145,10 +116,10 @@ template<typename Type,
...
@@ -145,10 +116,10 @@ template<typename Type,
class
Allocator
=
embb
::
base
::
Allocator
<
embb
::
base
::
Atomic
<
Type
>
>
>
class
Allocator
=
embb
::
base
::
Allocator
<
embb
::
base
::
Atomic
<
Type
>
>
>
class
WaitFreeArrayValuePool
{
class
WaitFreeArrayValuePool
{
private
:
private
:
int
size
_
;
int
size
;
embb
::
base
::
Atomic
<
Type
>*
pool
_array_
;
embb
::
base
::
Atomic
<
Type
>*
pool
;
WaitFreeArrayValuePool
();
WaitFreeArrayValuePool
();
Allocator
allocator
_
;
Allocator
allocator
;
// Prevent copy-construction
// Prevent copy-construction
WaitFreeArrayValuePool
(
const
WaitFreeArrayValuePool
&
);
WaitFreeArrayValuePool
(
const
WaitFreeArrayValuePool
&
);
...
@@ -179,18 +150,6 @@ class WaitFreeArrayValuePool {
...
@@ -179,18 +150,6 @@ class WaitFreeArrayValuePool {
);
);
/**
/**
* Due to concurrency effects, a pool might provide less elements than managed
* by it. However, usually one wants to guarantee a minimal capacity. The
* count of elements, that must be given to the pool when to guarantee \c
* capacity elements is computed using this function.
*
* \return count of indices the pool has to be initialized with
*/
static
size_t
GetMinimumElementCountForGuaranteedCapacity
(
size_t
capacity
/**< [IN] count of indices that shall be guaranteed */
);
/**
* Destructs the pool.
* Destructs the pool.
*
*
* \notthreadsafe
* \notthreadsafe
...
@@ -216,7 +175,7 @@ class WaitFreeArrayValuePool {
...
@@ -216,7 +175,7 @@ class WaitFreeArrayValuePool {
* Returns an element to the pool.
* Returns an element to the pool.
*
*
* \note The element must have been allocated with Allocate().
* \note The element must have been allocated with Allocate().
*
*
* \waitfree
* \waitfree
*
*
* \see CPP_CONCEPTS_VALUE_POOL
* \see CPP_CONCEPTS_VALUE_POOL
...
...
containers_cpp/test/hazard_pointer_test.cc
View file @
579bdb09
...
@@ -31,71 +31,24 @@
...
@@ -31,71 +31,24 @@
namespace
embb
{
namespace
embb
{
namespace
containers
{
namespace
containers
{
namespace
test
{
namespace
test
{
IntObjectTestPool
::
IntObjectTestPool
(
unsigned
int
pool_size
)
:
poolSize
(
pool_size
)
{
simplePoolObjects
=
static_cast
<
int
*>
(
embb
::
base
::
Allocation
::
Allocate
(
sizeof
(
int
)
*
pool_size
));
simplePool
=
static_cast
<
embb
::
base
::
Atomic
<
int
>*>
(
embb
::
base
::
Allocation
::
Allocate
(
sizeof
(
embb
::
base
::
Atomic
<
int
>
)
*
pool_size
));
for
(
unsigned
int
i
=
0
;
i
!=
pool_size
;
++
i
)
{
// in-place new for each array cell
new
(
&
simplePool
[
i
])
embb
::
base
::
Atomic
<
int
>
;
}
for
(
unsigned
int
i
=
0
;
i
!=
pool_size
;
++
i
)
{
simplePool
[
i
]
=
FREE_MARKER
;
simplePoolObjects
[
i
]
=
0
;
}
}
IntObjectTestPool
::~
IntObjectTestPool
()
{
embb
::
base
::
Allocation
::
Free
(
simplePoolObjects
);
for
(
unsigned
int
i
=
0
;
i
!=
poolSize
;
++
i
)
{
// in-place new for each array cell
simplePool
[
i
].
~
Atomic
();
}
embb
::
base
::
Allocation
::
Free
(
simplePool
);
}
int
*
IntObjectTestPool
::
Allocate
()
{
for
(
unsigned
int
i
=
0
;
i
!=
poolSize
;
++
i
)
{
int
expected
=
FREE_MARKER
;
if
(
simplePool
[
i
].
CompareAndSwap
(
expected
,
ALLOCATED_MARKER
))
{
return
&
simplePoolObjects
[
i
];
}
}
return
0
;
}
void
IntObjectTestPool
::
Release
(
int
*
object_pointer
)
{
int
cell
=
object_pointer
-
simplePoolObjects
;
simplePool
[
cell
].
Store
(
FREE_MARKER
);
}
HazardPointerTest
::
HazardPointerTest
()
:
HazardPointerTest
::
HazardPointerTest
()
:
#ifdef EMBB_PLATFORM_COMPILER_MSVC
#ifdef EMBB_PLATFORM_COMPILER_MSVC
#pragma warning(push)
#pragma warning(push)
#pragma warning(disable:4355)
#pragma warning(disable:4355)
#endif
#endif
delete_pointer_callback_
(
*
this
,
&
HazardPointerTest
::
DeletePointerCallback
),
delete_pointer_callback
(
*
this
,
&
HazardPointerTest
::
DeletePointerCallback
),
#ifdef EMBB_PLATFORM_COMPILER_MSVC
#ifdef EMBB_PLATFORM_COMPILER_MSVC
#pragma warning(pop)
#pragma warning(pop)
#endif
#endif
object_pool
_
(
NULL
),
object_pool
(
NULL
),
stack
_
(
NULL
),
stack
(
NULL
),
h
azard_pointer_
(
NULL
),
h
p
(
NULL
),
n_threads_
(
static_cast
<
int
>
n_threads
(
static_cast
<
int
>
(
partest
::
TestSuite
::
GetDefaultNumThreads
()))
{
(
partest
::
TestSuite
::
GetDefaultNumThreads
()))
{
n_elements_per_thread
_
=
100
;
n_elements_per_thread
=
100
;
n_elements
_
=
n_threads_
*
n_elements_per_thread_
;
n_elements
=
n_threads
*
n_elements_per_thread
;
embb
::
base
::
Function
<
void
,
embb
::
base
::
Atomic
<
int
>*
>
embb
::
base
::
Function
<
void
,
embb
::
base
::
Atomic
<
int
>*
>
delete
PointerC
allback
(
delete
_pointer_c
allback
(
*
this
,
*
this
,
&
HazardPointerTest
::
DeletePointerCallback
);
&
HazardPointerTest
::
DeletePointerCallback
);
...
@@ -106,52 +59,45 @@ delete_pointer_callback_(*this, &HazardPointerTest::DeletePointerCallback),
...
@@ -106,52 +59,45 @@ delete_pointer_callback_(*this, &HazardPointerTest::DeletePointerCallback),
// placed, the pointer is not allowed to be deleted until the second thread
// placed, the pointer is not allowed to be deleted until the second thread
// removes this guard.
// removes this guard.
CreateUnit
(
"HazardPointerTestThatGuardWorks"
).
CreateUnit
(
"HazardPointerTestThatGuardWorks"
).
Pre
(
&
HazardPointerTest
::
HazardPointerTest1Pre
,
this
).
Pre
(
&
HazardPointerTest
::
HazardPointerTest1
_
Pre
,
this
).
Add
(
Add
(
&
HazardPointerTest
::
HazardPointerTest1ThreadMethod
,
&
HazardPointerTest
::
HazardPointerTest1
_
ThreadMethod
,
this
,
static_cast
<
size_t
>
(
n_threads
_
)).
this
,
static_cast
<
size_t
>
(
n_threads
)).
Post
(
&
HazardPointerTest
::
HazardPointerTest1Post
,
this
);
Post
(
&
HazardPointerTest
::
HazardPointerTest1
_
Post
,
this
);
}
}
void
HazardPointerTest
::
HazardPointerTest1Pre
()
{
void
HazardPointerTest
::
HazardPointerTest1
_
Pre
()
{
embb_internal_thread_index_reset
();
embb_internal_thread_index_reset
();
object_pool
=
new
embb
::
containers
::
ObjectPool
<
embb
::
base
::
Atomic
<
int
>
>
object_pool_
=
(
static_cast
<
size_t
>
(
n_elements
));
embb
::
base
::
Allocation
::
stack
=
new
embb
::
containers
::
LockFreeStack
<
embb
::
base
::
Atomic
<
int
>*
>
New
<
embb
::
containers
::
ObjectPool
<
embb
::
base
::
Atomic
<
int
>
>
>
(
static_cast
<
size_t
>
(
n_elements
));
(
static_cast
<
size_t
>
(
n_elements_
));
hp
=
new
embb
::
containers
::
internal
::
HazardPointer
<
embb
::
base
::
Atomic
<
int
>*>
(
delete_pointer_callback
,
stack_
=
embb
::
base
::
Allocation
::
NULL
,
New
<
embb
::
containers
::
LockFreeStack
<
embb
::
base
::
Atomic
<
int
>*
>
>
(
static_cast
<
size_t
>
(
n_elements_
));
hazard_pointer_
=
embb
::
base
::
Allocation
::
New
<
embb
::
containers
::
internal
::
HazardPointer
<
embb
::
base
::
Atomic
<
int
>*
>
>
(
delete_pointer_callback_
,
static_cast
<
embb
::
base
::
Atomic
<
int
>*>
(
NULL
),
1
);
1
);
}
}
void
HazardPointerTest
::
HazardPointerTest1Post
()
{
void
HazardPointerTest
::
HazardPointerTest1
_
Post
()
{
embb
::
base
::
Allocation
::
Delete
(
hazard_pointer_
)
;
delete
object_pool
;
embb
::
base
::
Allocation
::
Delete
(
object_pool_
)
;
delete
stack
;
embb
::
base
::
Allocation
::
Delete
(
stack_
)
;
delete
hp
;
}
}
void
HazardPointerTest
::
HazardPointerTest1ThreadMethod
()
{
void
HazardPointerTest
::
HazardPointerTest1
_
ThreadMethod
()
{
unsigned
int
thread_index
;
unsigned
int
thread_index
;
embb_internal_thread_index
(
&
thread_index
);
embb_internal_thread_index
(
&
thread_index
);
for
(
int
i
=
0
;
i
!=
n_elements_per_thread
_
;
++
i
)
{
for
(
int
i
=
0
;
i
!=
n_elements_per_thread
;
++
i
)
{
embb
::
base
::
Atomic
<
int
>*
allocated_object
=
object_pool
_
->
Allocate
(
0
);
embb
::
base
::
Atomic
<
int
>*
allocated_object
=
object_pool
->
Allocate
(
0
);
h
azard_pointer_
->
Guard
(
0
,
allocated_object
);
h
p
->
GuardPointer
(
0
,
allocated_object
);
bool
success
=
stack
_
->
TryPush
(
allocated_object
);
bool
success
=
stack
->
TryPush
(
allocated_object
);
PT_ASSERT
(
success
==
true
);
PT_ASSERT
(
success
==
true
);
embb
::
base
::
Atomic
<
int
>*
allocated_object_from_different_thread
(
0
)
;
embb
::
base
::
Atomic
<
int
>*
allocated_object_from_different_thread
;
int
diff_count
=
0
;
int
diff_count
=
0
;
...
@@ -159,366 +105,51 @@ void HazardPointerTest::HazardPointerTest1ThreadMethod() {
...
@@ -159,366 +105,51 @@ void HazardPointerTest::HazardPointerTest1ThreadMethod() {
bool
success_pop
;
bool
success_pop
;
while
(
while
(
(
success_pop
=
stack
_
->
TryPop
(
allocated_object_from_different_thread
))
(
success_pop
=
stack
->
TryPop
(
allocated_object_from_different_thread
))
==
true
==
true
&&
allocated_object_from_different_thread
==
allocated_object
&&
allocated_object_from_different_thread
==
allocated_object
)
{
)
{
//
try to make it probable to get an element from a different thread
//try to make it probable to get an element from a different thread
//
however, can be the same. Try 10000 times to get a different element.
//however, can be the same. Try 10000 times to get a different element.
if
(
diff_count
++
>
10000
)
{
if
(
diff_count
++
>
10000
)
{
same
=
true
;
same
=
true
;
break
;
break
;
}
}
bool
success
=
stack
_
->
TryPush
(
allocated_object_from_different_thread
);
bool
success
=
stack
->
TryPush
(
allocated_object_from_different_thread
);
PT_ASSERT
(
success
==
true
);
PT_ASSERT
(
success
==
true
);
}
}
PT_ASSERT
(
success_pop
==
true
);
PT_ASSERT
(
success_pop
==
true
);
allocated_object
->
Store
(
1
);
allocated_object
->
Store
(
1
);
h
azard_pointer_
->
Enqueue
ForDeletion
(
allocated_object
);
h
p
->
EnqueuePointer
ForDeletion
(
allocated_object
);
if
(
!
same
)
{
if
(
!
same
)
{
h
azard_pointer_
->
Guard
(
0
,
allocated_object_from_different_thread
);
h
p
->
GuardPointer
(
0
,
allocated_object_from_different_thread
);
// if this holds, we were successful in guarding... otherwise we
// if this holds, we were successful in guarding... otherwise we
// were to late, because the pointer has already been added
// were to late, because the pointer has already been added
// to the retired list.
// to the retired list.
if
(
*
allocated_object_from_different_thread
==
0
)
{
if
(
*
allocated_object_from_different_thread
==
0
)
{
// the pointer must not be deleted here!
// the pointer must not be deleted here!
vector_mutex
_
.
Lock
();
vector_mutex
.
Lock
();
for
(
std
::
vector
<
embb
::
base
::
Atomic
<
int
>*
>::
iterator
for
(
std
::
vector
<
embb
::
base
::
Atomic
<
int
>*
>::
iterator
it
=
deleted_vector
_
.
begin
();
it
=
deleted_vector
.
begin
();
it
!=
deleted_vector
_
.
end
();
it
!=
deleted_vector
.
end
();
++
it
)
{
++
it
)
{
PT_ASSERT
(
*
it
!=
allocated_object_from_different_thread
);
PT_ASSERT
(
*
it
!=
allocated_object_from_different_thread
);
}
}
vector_mutex
_
.
Unlock
();
vector_mutex
.
Unlock
();
}
}
h
azard_pointer_
->
Guard
(
0
,
NULL
);
h
p
->
GuardPointer
(
0
,
NULL
);
}
}
}
}
}
}
void
HazardPointerTest
::
DeletePointerCallback
void
HazardPointerTest
::
DeletePointerCallback
(
embb
::
base
::
Atomic
<
int
>*
to_delete
)
{
(
embb
::
base
::
Atomic
<
int
>*
to_delete
)
{
vector_mutex_
.
Lock
();
vector_mutex
.
Lock
();
deleted_vector_
.
push_back
(
to_delete
);
deleted_vector
.
push_back
(
to_delete
);
vector_mutex_
.
Unlock
();
vector_mutex
.
Unlock
();
}
void
HazardPointerTest2
::
DeletePointerCallback
(
int
*
to_delete
)
{
test_pool_
->
Release
(
to_delete
);
}
bool
HazardPointerTest2
::
SetRelativeGuards
()
{
unsigned
int
thread_index
;
embb_internal_thread_index
(
&
thread_index
);
unsigned
int
my_begin
=
guards_per_phread_count_
*
thread_index
;
int
guard_number
=
0
;
unsigned
int
alreadyGuarded
=
0
;
for
(
unsigned
int
i
=
my_begin
;
i
!=
my_begin
+
guards_per_phread_count_
;
++
i
)
{
if
(
shared_guarded_
[
i
]
!=
0
)
{
alreadyGuarded
++
;
guard_number
++
;
continue
;
}
int
*
to_guard
=
shared_allocated_
[
i
];
if
(
to_guard
)
{
hazard_pointer_
->
Guard
(
guard_number
,
to_guard
);
// changed in the meantime?
if
(
to_guard
==
shared_allocated_
[
i
].
Load
())
{
// guard was successful. Communicate to other threads.
shared_guarded_
[
i
]
=
to_guard
;
}
else
{
// reset the guard, couldn't guard...
hazard_pointer_
->
RemoveGuard
(
guard_number
);
}
}
guard_number
++
;
}
return
(
alreadyGuarded
==
guards_per_phread_count_
);
}
void
HazardPointerTest2
::
HazardPointerTest2Master
()
{
// while the hazard pointer guard array is not full
int
**
allocatedLocal
=
static_cast
<
int
**>
(
embb
::
base
::
Allocation
::
Allocate
(
sizeof
(
int
*
)
*
guaranteed_capacity_pool_
));
bool
full
=
false
;
while
(
!
full
)
{
full
=
true
;
for
(
unsigned
int
i
=
0
;
i
!=
guaranteed_capacity_pool_
;
++
i
)
{
if
(
shared_guarded_
[
i
]
==
0
)
{
full
=
false
;
break
;
}
}
// not all guards set
for
(
unsigned
int
i
=
0
;
i
!=
guaranteed_capacity_pool_
;
++
i
)
{
allocatedLocal
[
i
]
=
test_pool_
->
Allocate
();
shared_allocated_
[
i
].
Store
(
allocatedLocal
[
i
]);
}
// set my hazards. We do not have to check, this must be successful
// here.
SetRelativeGuards
();
// free
for
(
unsigned
int
i
=
0
;
i
!=
guaranteed_capacity_pool_
;
++
i
)
{
shared_allocated_
[
i
].
Store
(
0
);
hazard_pointer_
->
EnqueueForDeletion
(
allocatedLocal
[
i
]);
}
}
embb
::
base
::
Allocation
::
Free
(
allocatedLocal
);
}
void
HazardPointerTest2
::
HazardPointerTest2Slave
()
{
unsigned
int
thread_index
;
embb_internal_thread_index
(
&
thread_index
);
while
(
!
SetRelativeGuards
())
{}
}
void
HazardPointerTest2
::
HazardPointerTest2Pre
()
{
embb_internal_thread_index_reset
();
current_master_
=
0
;
sync1_
=
0
;
sync2_
=
0
;
// first the test pool has to be created
test_pool_
=
embb
::
base
::
Allocation
::
New
<
IntObjectTestPool
>
(
pool_size_using_hazard_pointer_
);
// after the pool has been created, we create the hp class
hazard_pointer_
=
embb
::
base
::
Allocation
::
New
<
embb
::
containers
::
internal
::
HazardPointer
<
int
*>
>
(
delete_pointer_callback_
,
static_cast
<
int
*>
(
NULL
),
static_cast
<
int
>
(
guards_per_phread_count_
),
n_threads
);
shared_guarded_
=
static_cast
<
embb
::
base
::
Atomic
<
int
*>*>
(
embb
::
base
::
Allocation
::
Allocate
(
sizeof
(
embb
::
base
::
Atomic
<
int
*>
)
*
guaranteed_capacity_pool_
));
for
(
unsigned
int
i
=
0
;
i
!=
guaranteed_capacity_pool_
;
++
i
)
{
// in-place new for each array cell
new
(
&
shared_guarded_
[
i
])
embb
::
base
::
Atomic
<
int
*
>
;
}
shared_allocated_
=
static_cast
<
embb
::
base
::
Atomic
<
int
*>*>
(
embb
::
base
::
Allocation
::
Allocate
(
sizeof
(
embb
::
base
::
Atomic
<
int
*>
)
*
guaranteed_capacity_pool_
));
for
(
unsigned
int
i
=
0
;
i
!=
guaranteed_capacity_pool_
;
++
i
)
{
// in-place new for each array cell
new
(
&
shared_allocated_
[
i
])
embb
::
base
::
Atomic
<
int
*
>
;
}
for
(
unsigned
int
i
=
0
;
i
!=
guaranteed_capacity_pool_
;
++
i
)
{
shared_guarded_
[
i
]
=
0
;
shared_allocated_
[
i
]
=
0
;
}
}
void
HazardPointerTest2
::
HazardPointerTest2Post
()
{
for
(
unsigned
int
i
=
0
;
i
!=
static_cast
<
unsigned
int
>
(
n_threads
);
++
i
)
{
for
(
unsigned
int
i2
=
0
;
i2
!=
static_cast
<
unsigned
int
>
(
n_threads
)
*
guards_per_phread_count_
;
++
i2
)
{
if
(
hazard_pointer_
->
thread_local_retired_lists_
[
i2
+
i
*
n_threads
*
guards_per_phread_count_
]
==
NULL
)
{
// all retired lists must be completely filled
PT_ASSERT
(
false
);
}
}
}
unsigned
int
checks
=
0
;
for
(
unsigned
int
i
=
0
;
i
!=
static_cast
<
unsigned
int
>
(
n_threads
);
++
i
)
{
for
(
unsigned
int
i2
=
0
;
i2
!=
static_cast
<
unsigned
int
>
(
n_threads
)
*
guards_per_phread_count_
;
++
i2
)
{
for
(
unsigned
int
j
=
0
;
j
!=
static_cast
<
unsigned
int
>
(
n_threads
);
++
j
)
{
for
(
unsigned
int
j2
=
0
;
j2
!=
static_cast
<
unsigned
int
>
(
n_threads
)
*
guards_per_phread_count_
;
++
j2
)
{
if
(
i2
==
j2
&&
i
==
j
)
continue
;
// all retired elements have to be disjoint
PT_ASSERT
(
hazard_pointer_
->
thread_local_retired_lists_
[
i2
+
i
*
n_threads
*
guards_per_phread_count_
]
!=
hazard_pointer_
->
thread_local_retired_lists_
[
j2
+
j
*
n_threads
*
guards_per_phread_count_
]);
checks
++
;
}
}
}
}
// sanity check on the count of expected comparisons.
PT_ASSERT
(
checks
==
n_threads
*
n_threads
*
guards_per_phread_count_
*
(
n_threads
*
n_threads
*
guards_per_phread_count_
-
1
));
std
::
vector
<
int
*
>
additionallyAllocated
;
// we should be able to still allocate the guaranteed capacity of
// elements from the pool.
for
(
unsigned
int
i
=
0
;
i
!=
guaranteed_capacity_pool_
;
++
i
)
{
int
*
allocated
=
test_pool_
->
Allocate
();
// allocated is not allowed to be zero
PT_ASSERT
(
allocated
!=
NULL
);
// push to vector, to check if elements are disjunctive and to release
// afterwards.
additionallyAllocated
.
push_back
(
allocated
);
}
// the pool should now be empty
PT_ASSERT
(
test_pool_
->
Allocate
()
==
NULL
);
// release allocated elements...
for
(
unsigned
int
i
=
0
;
i
!=
additionallyAllocated
.
size
();
++
i
)
{
test_pool_
->
Release
(
additionallyAllocated
[
i
]);
}
// the additionallyAllocated elements shall be disjoint
for
(
unsigned
int
i
=
0
;
i
!=
additionallyAllocated
.
size
();
++
i
)
{
for
(
unsigned
int
i2
=
0
;
i2
!=
additionallyAllocated
.
size
();
++
i2
)
{
if
(
i
==
i2
)
continue
;
PT_ASSERT
(
additionallyAllocated
[
i
]
!=
additionallyAllocated
[
i2
]);
}
}
// no allocated element should be in any retired list...
for
(
unsigned
int
a
=
0
;
a
!=
additionallyAllocated
.
size
();
++
a
)
{
for
(
unsigned
int
i
=
0
;
i
!=
static_cast
<
unsigned
int
>
(
n_threads
);
++
i
)
{
for
(
unsigned
int
i2
=
0
;
i2
!=
static_cast
<
unsigned
int
>
(
n_threads
)
*
guards_per_phread_count_
;
++
i2
)
{
PT_ASSERT
(
hazard_pointer_
->
thread_local_retired_lists_
[
i2
+
i
*
n_threads
*
guards_per_phread_count_
]
!=
additionallyAllocated
[
a
]);
}
}
}
for
(
unsigned
int
i
=
0
;
i
!=
guaranteed_capacity_pool_
;
++
i
)
{
// in-place new for each array cell
shared_guarded_
[
i
].
~
Atomic
();
}
embb
::
base
::
Allocation
::
Free
(
shared_guarded_
);
for
(
unsigned
int
i
=
0
;
i
!=
guaranteed_capacity_pool_
;
++
i
)
{
// in-place new for each array cell
shared_allocated_
[
i
].
~
Atomic
();
}
embb
::
base
::
Allocation
::
Free
(
shared_allocated_
);
embb
::
base
::
Allocation
::
Delete
(
hazard_pointer_
);
// after deleting the hazard pointer object, all retired pointers have
// to be returned to the pool!
std
::
vector
<
int
*>
elementsInPool
;
int
*
nextElement
;
while
((
nextElement
=
test_pool_
->
Allocate
())
!=
NULL
)
{
for
(
unsigned
int
i
=
0
;
i
!=
elementsInPool
.
size
();
++
i
)
{
// all elements need to be disjoint
PT_ASSERT
(
elementsInPool
[
i
]
!=
nextElement
);
}
elementsInPool
.
push_back
(
nextElement
);
}
// all elements should have been returned by the hp object, so we should be
// able to acquire all elements.
PT_ASSERT
(
elementsInPool
.
size
()
==
pool_size_using_hazard_pointer_
);
embb
::
base
::
Allocation
::
Delete
(
test_pool_
);
}
void
HazardPointerTest2
::
HazardPointerTest2ThreadMethod
()
{
for
(;;)
{
unsigned
int
thread_index
;
embb_internal_thread_index
(
&
thread_index
);
if
(
thread_index
==
current_master_
)
{
HazardPointerTest2Master
();
}
else
{
HazardPointerTest2Slave
();
}
sync1_
.
FetchAndAdd
(
1
);
// wait until cleanup thread signals to be finished
while
(
sync1_
!=
0
)
{
int
expected
=
n_threads
;
int
desired
=
FINISH_MARKER
;
// select thread, responsible for cleanup
if
(
sync1_
.
CompareAndSwap
(
expected
,
desired
))
{
// wipe arrays!
for
(
unsigned
int
i
=
0
;
i
!=
guaranteed_capacity_pool_
;
++
i
)
{
shared_guarded_
[
i
]
=
0
;
shared_allocated_
[
i
]
=
0
;
}
// increase master
current_master_
.
FetchAndAdd
(
1
);
sync2_
=
0
;
sync1_
.
Store
(
0
);
}
}
// wait for all threads to reach this position
sync2_
.
FetchAndAdd
(
1
);
while
(
sync2_
!=
static_cast
<
unsigned
int
>
(
n_threads
))
{}
// if each thread was master once, terminate.
if
(
current_master_
==
static_cast
<
unsigned
int
>
(
n_threads
))
{
return
;
}
}
}
HazardPointerTest2
::
HazardPointerTest2
()
:
n_threads
(
static_cast
<
int
>
(
partest
::
TestSuite
::
GetDefaultNumThreads
())),
#ifdef EMBB_PLATFORM_COMPILER_MSVC
#pragma warning(push)
#pragma warning(disable:4355)
#endif
delete_pointer_callback_
(
*
this
,
&
HazardPointerTest2
::
DeletePointerCallback
)
#ifdef EMBB_PLATFORM_COMPILER_MSVC
#pragma warning(pop)
#endif
{
guards_per_phread_count_
=
5
;
guaranteed_capacity_pool_
=
guards_per_phread_count_
*
n_threads
;
pool_size_using_hazard_pointer_
=
guaranteed_capacity_pool_
+
guards_per_phread_count_
*
n_threads
*
n_threads
;
embb
::
base
::
Thread
::
GetThreadsMaxCount
();
CreateUnit
(
"HazardPointerTestSimulateMemoryWorstCase"
).
Pre
(
&
HazardPointerTest2
::
HazardPointerTest2Pre
,
this
).
Add
(
&
HazardPointerTest2
::
HazardPointerTest2ThreadMethod
,
this
,
static_cast
<
size_t
>
(
n_threads
)).
Post
(
&
HazardPointerTest2
::
HazardPointerTest2Post
,
this
);
}
}
}
// namespace test
}
// namespace test
}
// namespace containers
}
// namespace containers
}
// namespace embb
}
// namespace embb
containers_cpp/test/hazard_pointer_test.h
View file @
579bdb09
...
@@ -36,112 +36,32 @@
...
@@ -36,112 +36,32 @@
namespace
embb
{
namespace
embb
{
namespace
containers
{
namespace
containers
{
namespace
test
{
namespace
test
{
/**
class
HazardPointerTest
:
public
partest
::
TestCase
{
* @brief a very simple wait-free object pool implementation to have tests
* being independent of the EMBB object pool implementation.
*/
class
IntObjectTestPool
{
private
:
private
:
int
*
simplePoolObjects
;
embb
::
base
::
Function
<
void
,
embb
::
base
::
Atomic
<
int
>*>
delete_pointer_callback
;
embb
::
base
::
Atomic
<
int
>*
simplePool
;
public
:
static
const
int
ALLOCATED_MARKER
=
1
;
static
const
int
FREE_MARKER
=
0
;
unsigned
int
poolSize
;
explicit
IntObjectTestPool
(
unsigned
int
pool_size
);
~
IntObjectTestPool
();
//used to allocate random stuff, we will just use the pointers, not the
//contents
/**
embb
::
containers
::
ObjectPool
<
embb
::
base
::
Atomic
<
int
>
>*
object_pool
;
* Allocate object from the pool
*
* @return the allocated object
*/
int
*
Allocate
();
/**
//used to move pointer between threads
* Return an element to the pool
embb
::
containers
::
LockFreeStack
<
embb
::
base
::
Atomic
<
int
>*
>*
stack
;
*
embb
::
base
::
Mutex
vector_mutex
;
* @param objectPointer the object to be freed
embb
::
containers
::
internal
::
HazardPointer
<
embb
::
base
::
Atomic
<
int
>*>*
hp
;
*/
std
::
vector
<
embb
::
base
::
Atomic
<
int
>*
>
deleted_vector
;
void
Release
(
int
*
object_pointer
);
int
n_threads
;
};
int
n_elements_per_thread
;
int
n_elements
;
class
HazardPointerTest
:
public
partest
::
TestCase
{
public
:
public
:
/**
/**
* Adds test methods.
* Adds test methods.
*/
*/
HazardPointerTest
();
HazardPointerTest
();
void
HazardPointerTest1Pre
();
void
HazardPointerTest1
_
Pre
();
void
HazardPointerTest1Post
();
void
HazardPointerTest1
_
Post
();
void
HazardPointerTest1ThreadMethod
();
void
HazardPointerTest1
_
ThreadMethod
();
void
DeletePointerCallback
(
embb
::
base
::
Atomic
<
int
>*
to_delete
);
void
DeletePointerCallback
(
embb
::
base
::
Atomic
<
int
>*
to_delete
);
private
:
embb
::
base
::
Function
<
void
,
embb
::
base
::
Atomic
<
int
>*>
delete_pointer_callback_
;
//used to allocate random stuff, we will just use the pointers, not the
//contents
embb
::
containers
::
ObjectPool
<
embb
::
base
::
Atomic
<
int
>
>*
object_pool_
;
//used to move pointer between threads
embb
::
containers
::
LockFreeStack
<
embb
::
base
::
Atomic
<
int
>*
>*
stack_
;
embb
::
base
::
Mutex
vector_mutex_
;
embb
::
containers
::
internal
::
HazardPointer
<
embb
::
base
::
Atomic
<
int
>*>*
hazard_pointer_
;
std
::
vector
<
embb
::
base
::
Atomic
<
int
>*
>
deleted_vector_
;
int
n_threads_
;
int
n_elements_per_thread_
;
int
n_elements_
;
};
class
HazardPointerTest2
:
public
partest
::
TestCase
{
public
:
void
DeletePointerCallback
(
int
*
to_delete
);
bool
SetRelativeGuards
();
void
HazardPointerTest2Master
();
void
HazardPointerTest2Slave
();
void
HazardPointerTest2Pre
();
void
HazardPointerTest2Post
();
void
HazardPointerTest2ThreadMethod
();
HazardPointerTest2
();
private
:
// number of threads, participating in that test
int
n_threads
;
embb
::
base
::
Function
<
void
,
int
*>
delete_pointer_callback_
;
// the thread id of the master
embb
::
base
::
Atomic
<
unsigned
int
>
current_master_
;
// variables, to synchronize threads. At each point in time, one master,
// the master changes each round until each thread was assigned master once.
embb
::
base
::
Atomic
<
int
>
sync1_
;
embb
::
base
::
Atomic
<
unsigned
int
>
sync2_
;
unsigned
int
guards_per_phread_count_
;
unsigned
int
guaranteed_capacity_pool_
;
unsigned
int
pool_size_using_hazard_pointer_
;
// The threads write here, if they guarded an object successfully. Used to
// determine when all allocated objects were guarded successfully.
embb
::
base
::
Atomic
<
int
*>*
shared_guarded_
;
// This array is used by the master, to communicate and share what he has
// allocated with the slaves.
embb
::
base
::
Atomic
<
int
*>*
shared_allocated_
;
// Reference to the object pool
IntObjectTestPool
*
test_pool_
;
embb
::
containers
::
internal
::
HazardPointer
<
int
*>*
hazard_pointer_
;
static
const
int
FINISH_MARKER
=
-
1
;
};
};
}
// namespace test
}
// namespace test
}
// namespace containers
}
// namespace containers
...
...
containers_cpp/test/main.cc
View file @
579bdb09
...
@@ -55,7 +55,6 @@ using embb::containers::test::HazardPointerTest;
...
@@ -55,7 +55,6 @@ using embb::containers::test::HazardPointerTest;
using
embb
::
containers
::
test
::
QueueTest
;
using
embb
::
containers
::
test
::
QueueTest
;
using
embb
::
containers
::
test
::
StackTest
;
using
embb
::
containers
::
test
::
StackTest
;
using
embb
::
containers
::
test
::
ObjectPoolTest
;
using
embb
::
containers
::
test
::
ObjectPoolTest
;
using
embb
::
containers
::
test
::
HazardPointerTest2
;
PT_MAIN
(
"Data Structures C++"
)
{
PT_MAIN
(
"Data Structures C++"
)
{
unsigned
int
max_threads
=
static_cast
<
unsigned
int
>
(
unsigned
int
max_threads
=
static_cast
<
unsigned
int
>
(
...
@@ -65,7 +64,6 @@ PT_MAIN("Data Structures C++") {
...
@@ -65,7 +64,6 @@ PT_MAIN("Data Structures C++") {
PT_RUN
(
PoolTest
<
WaitFreeArrayValuePool
<
int
COMMA
-
1
>
>
);
PT_RUN
(
PoolTest
<
WaitFreeArrayValuePool
<
int
COMMA
-
1
>
>
);
PT_RUN
(
PoolTest
<
LockFreeTreeValuePool
<
int
COMMA
-
1
>
>
);
PT_RUN
(
PoolTest
<
LockFreeTreeValuePool
<
int
COMMA
-
1
>
>
);
PT_RUN
(
HazardPointerTest
);
PT_RUN
(
HazardPointerTest
);
PT_RUN
(
HazardPointerTest2
);
PT_RUN
(
QueueTest
<
WaitFreeSPSCQueue
<
::
std
::
pair
<
size_t
COMMA
int
>
>
>
);
PT_RUN
(
QueueTest
<
WaitFreeSPSCQueue
<
::
std
::
pair
<
size_t
COMMA
int
>
>
>
);
PT_RUN
(
QueueTest
<
LockFreeMPMCQueue
<
::
std
::
pair
<
size_t
COMMA
int
>
>
PT_RUN
(
QueueTest
<
LockFreeMPMCQueue
<
::
std
::
pair
<
size_t
COMMA
int
>
>
COMMA
true
COMMA
true
>
);
COMMA
true
COMMA
true
>
);
...
...
dataflow_cpp/test/dataflow_cpp_test_simple.cc
View file @
579bdb09
...
@@ -39,7 +39,7 @@
...
@@ -39,7 +39,7 @@
#define NUM_SLICES 8
#define NUM_SLICES 8
#define TEST_COUNT 12
#define TEST_COUNT 12
typedef
embb
::
dataflow
::
Network
<
NUM_SLICES
>
MyNetwork
;
typedef
embb
::
dataflow
::
Network
<
8
>
MyNetwork
;
typedef
MyNetwork
::
ConstantSource
<
int
>
MyConstantSource
;
typedef
MyNetwork
::
ConstantSource
<
int
>
MyConstantSource
;
typedef
MyNetwork
::
Source
<
int
>
MySource
;
typedef
MyNetwork
::
Source
<
int
>
MySource
;
typedef
MyNetwork
::
SerialProcess
<
MyNetwork
::
Inputs
<
int
>::
Type
,
typedef
MyNetwork
::
SerialProcess
<
MyNetwork
::
Inputs
<
int
>::
Type
,
...
@@ -156,7 +156,9 @@ void SimpleTest::TestBasic() {
...
@@ -156,7 +156,9 @@ void SimpleTest::TestBasic() {
core_set
,
core_set
,
1024
,
// max tasks (default: 1024)
1024
,
// max tasks (default: 1024)
128
,
// max groups (default: 128)
128
,
// max groups (default: 128)
num_cores
,
// max queues (default: 16)
// Currently needs to be initialized
// with (max_queues + 1), see defect embb449
num_cores
+
1
,
// max queues (default: 16)
1024
,
// queue capacity (default: 1024)
1024
,
// queue capacity (default: 1024)
4
);
// num priorities (default: 4)
4
);
// num priorities (default: 4)
...
...
mtapi_c/src/embb_mtapi_id_pool_t.c
View file @
579bdb09
...
@@ -71,7 +71,7 @@ mtapi_uint_t embb_mtapi_id_pool_allocate(embb_mtapi_id_pool_t * that) {
...
@@ -71,7 +71,7 @@ mtapi_uint_t embb_mtapi_id_pool_allocate(embb_mtapi_id_pool_t * that) {
/* acquire position to fetch id from */
/* acquire position to fetch id from */
mtapi_uint_t
id_position
=
that
->
get_id_position
;
mtapi_uint_t
id_position
=
that
->
get_id_position
;
that
->
get_id_position
++
;
that
->
get_id_position
++
;
if
(
that
->
capacity
<
that
->
get_id_position
)
{
if
(
that
->
capacity
<
=
that
->
get_id_position
)
{
that
->
get_id_position
=
0
;
that
->
get_id_position
=
0
;
}
}
...
@@ -97,7 +97,7 @@ void embb_mtapi_id_pool_deallocate(
...
@@ -97,7 +97,7 @@ void embb_mtapi_id_pool_deallocate(
/* acquire position to put id to */
/* acquire position to put id to */
mtapi_uint_t
id_position
=
that
->
put_id_position
;
mtapi_uint_t
id_position
=
that
->
put_id_position
;
that
->
put_id_position
++
;
that
->
put_id_position
++
;
if
(
that
->
capacity
<
that
->
put_id_position
)
{
if
(
that
->
capacity
<
=
that
->
put_id_position
)
{
that
->
put_id_position
=
0
;
that
->
put_id_position
=
0
;
}
}
...
...
mtapi_c/test/embb_mtapi_test_id_pool.cc
deleted
100644 → 0
View file @
a01fc717
/*
* Copyright (c) 2014-2015, Siemens AG. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <embb_mtapi_test_id_pool.h>
#include <vector>
IdPoolTest
::
IdPoolTest
()
{
CreateUnit
(
"mtapi id pool test single threaded"
).
Add
(
&
IdPoolTest
::
TestBasic
,
this
,
1
,
1000
).
Pre
(
&
IdPoolTest
::
TestBasicPre
,
this
).
Post
(
&
IdPoolTest
::
TestBasicPost
,
this
);
CreateUnit
(
"mtapi id pool test concurrent"
).
Add
(
&
IdPoolTest
::
TestParallel
,
this
,
concurrent_accessors_id_pool_2
,
20
).
Post
(
&
IdPoolTest
::
TestParallelPost
,
this
).
Pre
(
&
IdPoolTest
::
TestParallelPre
,
this
);
}
void
IdPoolTest
::
TestParallel
()
{
// allocate ID_ELEMENTS_PER_ACCESSOR elements. Each test thread is
// guaranteed to be able to allocate this amount of elements.
TestAllocateDeallocateNElementsFromPool
(
id_pool_parallel
,
id_elements_per_accessor
);
}
void
IdPoolTest
::
TestParallelPre
()
{
// create second id pool with CONCURRENT_ACCESSORS_ID_POOL_2*
// ID_ELEMENTS_PER_ACCESSOR elements
embb_mtapi_id_pool_initialize
(
&
id_pool_parallel
,
concurrent_accessors_id_pool_2
*
id_elements_per_accessor
);
}
void
IdPoolTest
::
TestParallelPost
()
{
// after the parallel tests, try to again allocate and deallocate all
// elements sequentially.
TestAllocateDeallocateNElementsFromPool
(
id_pool_parallel
,
concurrent_accessors_id_pool_2
*
id_elements_per_accessor
,
true
);
// finalize pool
embb_mtapi_id_pool_finalize
(
&
id_pool_parallel
);
}
void
IdPoolTest
::
TestBasic
()
{
TestAllocateDeallocateNElementsFromPool
(
id_pool
,
id_pool_size_1
,
true
);
}
void
IdPoolTest
::
TestBasicPre
()
{
// create id pool with ID_POOL_SIZE_1 elements
embb_mtapi_id_pool_initialize
(
&
id_pool
,
id_pool_size_1
);
}
void
IdPoolTest
::
TestBasicPost
()
{
// finalize pool
embb_mtapi_id_pool_finalize
(
&
id_pool
);
}
void
IdPoolTest
::
TestAllocateDeallocateNElementsFromPool
(
embb_mtapi_id_pool_t
&
pool
,
int
count_elements
,
bool
empty_check
)
{
std
::
vector
<
unsigned
int
>
allocated
;
for
(
int
i
=
0
;
i
!=
count_elements
;
++
i
)
{
allocated
.
push_back
(
embb_mtapi_id_pool_allocate
(
&
pool
));
}
// the allocated elements should be disjunctive, and never invalid element
for
(
unsigned
int
x
=
0
;
x
!=
allocated
.
size
();
++
x
)
{
PT_ASSERT
(
allocated
[
x
]
!=
EMBB_MTAPI_IDPOOL_INVALID_ID
);
for
(
unsigned
int
y
=
0
;
y
!=
allocated
.
size
();
++
y
)
{
if
(
x
==
y
)
{
continue
;
}
PT_ASSERT
(
allocated
[
x
]
!=
allocated
[
y
]);
}
}
// now the id pool should be empty... try ten times to get an id,
// we should always get the invalid element
if
(
empty_check
)
{
for
(
int
i
=
0
;
i
!=
10
;
++
i
)
{
PT_ASSERT_EQ
(
embb_mtapi_id_pool_allocate
(
&
pool
),
static_cast
<
unsigned
int
>
(
EMBB_MTAPI_IDPOOL_INVALID_ID
)
)
}
}
// now return allocated elements in a shuffled manner.
::
std
::
random_shuffle
(
allocated
.
begin
(),
allocated
.
end
());
for
(
int
i
=
0
;
i
!=
count_elements
;
++
i
)
{
embb_mtapi_id_pool_deallocate
(
&
pool
,
allocated
[
static_cast
<
unsigned
int
>
(
i
)]);
}
}
mtapi_c/test/embb_mtapi_test_id_pool.h
deleted
100644 → 0
View file @
a01fc717
/*
* Copyright (c) 2014-2015, Siemens AG. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MTAPI_C_TEST_EMBB_MTAPI_TEST_ID_POOL_H_
#define MTAPI_C_TEST_EMBB_MTAPI_TEST_ID_POOL_H_
#include <partest/partest.h>
#include <embb_mtapi_id_pool_t.h>
// for shuffling a vector
#include <algorithm>
class
IdPoolTest
:
public
partest
::
TestCase
{
public
:
embb_mtapi_id_pool_t
id_pool
;
embb_mtapi_id_pool_t
id_pool_parallel
;
IdPoolTest
();
private
:
static
const
unsigned
int
id_pool_size_1
=
100
;
static
const
unsigned
int
concurrent_accessors_id_pool_2
=
10
;
static
const
unsigned
int
id_elements_per_accessor
=
10
;
/**
* We create a pool of size number_accessors*elements_per_accessor, so
* at each time we can guarantee each thread to be able to allocate
* elements_per_accessor elements.
* We create number_accessor threads, where each thread iteratively
* allocates and frees elements_per_accessor elements, which in each case
* has to be successful. Additionally, the sanity checks from the basic tests
* are repeated. The TestParallelPost function also repeats all
* sequential tests.
*/
void
TestParallel
();
void
TestParallelPre
();
void
TestParallelPost
();
/**
* Create a pool of size N. We repeatedly allocate and free N elements, check
* if the pool always returns disjunctive ids and check that the pool never
* returns the invalid element, if the pool is not empty. Check that the
* invalid element is returned if the pool is empty.
*/
void
TestBasic
();
void
TestBasicPre
();
void
TestBasicPost
();
static
void
TestAllocateDeallocateNElementsFromPool
(
embb_mtapi_id_pool_t
&
pool
,
int
count_elements
,
bool
empty_check
=
false
);
};
#endif // MTAPI_C_TEST_EMBB_MTAPI_TEST_ID_POOL_H_
mtapi_c/test/main.cc
View file @
579bdb09
...
@@ -37,9 +37,6 @@
...
@@ -37,9 +37,6 @@
#include <embb_mtapi_test_group.h>
#include <embb_mtapi_test_group.h>
#include <embb_mtapi_test_queue.h>
#include <embb_mtapi_test_queue.h>
#include <embb_mtapi_test_error.h>
#include <embb_mtapi_test_error.h>
#include <embb_mtapi_test_id_pool.h>
#include <embb/base/c/memory_allocation.h>
PT_MAIN
(
"MTAPI C"
)
{
PT_MAIN
(
"MTAPI C"
)
{
embb_log_set_log_level
(
EMBB_LOG_LEVEL_NONE
);
embb_log_set_log_level
(
EMBB_LOG_LEVEL_NONE
);
...
@@ -51,7 +48,4 @@ PT_MAIN("MTAPI C") {
...
@@ -51,7 +48,4 @@ PT_MAIN("MTAPI C") {
PT_RUN
(
InitFinalizeTest
);
PT_RUN
(
InitFinalizeTest
);
PT_RUN
(
GroupTest
);
PT_RUN
(
GroupTest
);
PT_RUN
(
QueueTest
);
PT_RUN
(
QueueTest
);
PT_RUN
(
IdPoolTest
);
PT_EXPECT
(
embb_get_bytes_allocated
()
==
0
);
}
}
mtapi_cpp/CMakeLists.txt
View file @
579bdb09
...
@@ -5,10 +5,14 @@ file(GLOB_RECURSE EMBB_MTAPI_CPP_HEADERS "include/*.h")
...
@@ -5,10 +5,14 @@ file(GLOB_RECURSE EMBB_MTAPI_CPP_HEADERS "include/*.h")
file
(
GLOB_RECURSE EMBB_MTAPI_CPP_TEST_SOURCES
"test/*.cc"
"test/*.h"
)
file
(
GLOB_RECURSE EMBB_MTAPI_CPP_TEST_SOURCES
"test/*.cc"
"test/*.h"
)
if
(
USE_AUTOMATIC_INITIALIZATION STREQUAL ON
)
if
(
USE_AUTOMATIC_INITIALIZATION STREQUAL ON
)
message
(
"-- Automatic initialization enabled (default)"
)
set
(
MTAPI_CPP_AUTOMATIC_INITIALIZE 1
)
set
(
MTAPI_CPP_AUTOMATIC_INITIALIZE 1
)
else
()
else
()
set
(
MTAPI_CPP_AUTOMATIC_INITIALIZE 0
)
set
(
MTAPI_CPP_AUTOMATIC_INITIALIZE 0
)
message
(
"-- Automatic initialization disabled"
)
endif
()
endif
()
message
(
" (set with command line option -DUSE_AUTOMATIC_INITIALIZATION=ON/OFF)"
)
# Execute the GroupSources macro
# Execute the GroupSources macro
include
(
${
CMAKE_SOURCE_DIR
}
/CMakeCommon/GroupSourcesMSVC.cmake
)
include
(
${
CMAKE_SOURCE_DIR
}
/CMakeCommon/GroupSourcesMSVC.cmake
)
...
...
tasks_cpp/CMakeLists.txt
View file @
579bdb09
...
@@ -5,10 +5,13 @@ file(GLOB_RECURSE EMBB_TASKS_CPP_HEADERS "include/*.h")
...
@@ -5,10 +5,13 @@ file(GLOB_RECURSE EMBB_TASKS_CPP_HEADERS "include/*.h")
file
(
GLOB_RECURSE EMBB_TASKS_CPP_TEST_SOURCES
"test/*.cc"
"test/*.h"
)
file
(
GLOB_RECURSE EMBB_TASKS_CPP_TEST_SOURCES
"test/*.cc"
"test/*.h"
)
if
(
USE_AUTOMATIC_INITIALIZATION STREQUAL ON
)
if
(
USE_AUTOMATIC_INITIALIZATION STREQUAL ON
)
message
(
"-- Automatic initialization enabled (default)"
)
set
(
TASKS_CPP_AUTOMATIC_INITIALIZE 1
)
set
(
TASKS_CPP_AUTOMATIC_INITIALIZE 1
)
else
()
else
()
set
(
TASKS_CPP_AUTOMATIC_INITIALIZE 0
)
set
(
TASKS_CPP_AUTOMATIC_INITIALIZE 0
)
message
(
"-- Automatic initialization disabled"
)
endif
()
endif
()
message
(
" (set with command line option -DUSE_AUTOMATIC_INITIALIZATION=ON/OFF)"
)
configure_file
(
"include/embb/tasks/internal/cmake_config.h.in"
configure_file
(
"include/embb/tasks/internal/cmake_config.h.in"
"include/embb/tasks/internal/cmake_config.h"
)
"include/embb/tasks/internal/cmake_config.h"
)
...
...
tasks_cpp/test/tasks_cpp_test_task.cc
View file @
579bdb09
...
@@ -78,19 +78,13 @@ void TaskTest::TestBasic() {
...
@@ -78,19 +78,13 @@ void TaskTest::TestBasic() {
PT_EXPECT_EQ
(
policy
.
GetPriority
(),
0u
);
PT_EXPECT_EQ
(
policy
.
GetPriority
(),
0u
);
policy
.
AddWorker
(
0u
);
policy
.
AddWorker
(
0u
);
PT_EXPECT_EQ
(
policy
.
GetAffinity
(),
1u
);
PT_EXPECT_EQ
(
policy
.
GetAffinity
(),
1u
);
policy
.
AddWorker
(
1u
);
if
(
policy
.
GetCoreCount
()
>
1
)
{
PT_EXPECT_EQ
(
policy
.
GetAffinity
(),
3u
);
policy
.
AddWorker
(
1u
);
PT_EXPECT_EQ
(
policy
.
GetAffinity
(),
3u
);
}
policy
.
RemoveWorker
(
0u
);
policy
.
RemoveWorker
(
0u
);
PT_EXPECT_EQ
(
policy
.
GetAffinity
(),
2u
);
PT_EXPECT_EQ
(
policy
.
IsSetWorker
(
0
),
false
);
PT_EXPECT_EQ
(
policy
.
IsSetWorker
(
0
),
false
);
PT_EXPECT_EQ
(
policy
.
IsSetWorker
(
1
),
true
);
if
(
policy
.
GetCoreCount
()
>
1
)
{
PT_EXPECT_EQ
(
policy
.
GetAffinity
(),
2u
);
PT_EXPECT_EQ
(
policy
.
IsSetWorker
(
1
),
true
);
}
std
::
string
test
;
std
::
string
test
;
embb
::
tasks
::
Task
task
=
node
.
Spawn
(
embb
::
tasks
::
Task
task
=
node
.
Spawn
(
embb
::
base
::
Bind
(
embb
::
base
::
Bind
(
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment