Skip to content
Toggle navigation
P
Projects
G
Groups
S
Snippets
Help
las3_pub
/
predictable_parallel_patterns
This project
Loading...
Sign in
Toggle navigation
Go to a project
Project
Repository
Issues
0
Merge Requests
0
Pipelines
Wiki
Members
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Commit
d16ad3eb
authored
Apr 18, 2019
by
FritzFlorian
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Add basic exponential backoff to our code.
parent
aad1db1f
Pipeline
#1158
failed with stages
in 4 minutes 8 seconds
Changes
10
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
98 additions
and
45 deletions
+98
-45
lib/pls/CMakeLists.txt
+1
-0
lib/pls/include/pls/internal/base/backoff.h
+50
-0
lib/pls/include/pls/internal/base/spin_lock.h
+1
-1
lib/pls/include/pls/internal/base/tas_spin_lock.h
+2
-3
lib/pls/include/pls/internal/base/ttas_spin_lock.h
+4
-3
lib/pls/src/internal/base/swmr_spin_lock.cpp
+5
-12
lib/pls/src/internal/base/tas_spin_lock.cpp
+11
-9
lib/pls/src/internal/base/ttas_spin_lock.cpp
+13
-8
lib/pls/src/internal/scheduling/abstract_task.cpp
+2
-2
lib/pls/src/internal/scheduling/fork_join_task.cpp
+9
-7
No files found.
lib/pls/CMakeLists.txt
View file @
d16ad3eb
...
@@ -15,6 +15,7 @@ add_library(pls STATIC
...
@@ -15,6 +15,7 @@ add_library(pls STATIC
include/pls/internal/base/system_details.h
include/pls/internal/base/system_details.h
include/pls/internal/base/error_handling.h
include/pls/internal/base/error_handling.h
include/pls/internal/base/alignment.h src/internal/base/alignment.cpp
include/pls/internal/base/alignment.h src/internal/base/alignment.cpp
include/pls/internal/base/backoff.h
include/pls/internal/data_structures/aligned_stack.h src/internal/data_structures/aligned_stack.cpp
include/pls/internal/data_structures/aligned_stack.h src/internal/data_structures/aligned_stack.cpp
include/pls/internal/data_structures/aligned_stack_impl.h
include/pls/internal/data_structures/aligned_stack_impl.h
...
...
lib/pls/include/pls/internal/base/backoff.h
0 → 100644
View file @
d16ad3eb
#ifndef PLS_BACKOFF_H_
#define PLS_BACKOFF_H_
#include "pls/internal/base/system_details.h"
#include "pls/internal/helpers/profiler.h"
#include "pls/internal/base/thread.h"
#include <random>
#include <math.h>
namespace
pls
{
namespace
internal
{
namespace
base
{
class
backoff
{
static
constexpr
unsigned
long
INITIAL_SPIN_ITERS
=
2u
<<
2u
;
static
constexpr
unsigned
long
MAX_SPIN_ITERS
=
2u
<<
6u
;
unsigned
long
current_
=
INITIAL_SPIN_ITERS
;
std
::
minstd_rand
random_
;
static
void
spin
(
unsigned
long
iterations
)
{
for
(
volatile
unsigned
long
i
=
0
;
i
<
iterations
;
i
++
)
system_details
::
relax_cpu
();
// Spin
}
public
:
backoff
()
:
current_
{
INITIAL_SPIN_ITERS
},
random_
{
std
::
random_device
{}()}
{}
void
do_backoff
()
{
PROFILE_LOCK
(
"Backoff"
)
spin
(
random_
()
%
std
::
min
(
current_
,
MAX_SPIN_ITERS
));
current_
=
current_
*
2
;
if
(
current_
>
MAX_SPIN_ITERS
)
{
current_
=
MAX_SPIN_ITERS
;
}
}
void
reset
()
{
current_
=
INITIAL_SPIN_ITERS
;
}
};
}
}
}
#endif //PLS_BACKOFF_H_
lib/pls/include/pls/internal/base/spin_lock.h
View file @
d16ad3eb
...
@@ -10,7 +10,7 @@ namespace internal {
...
@@ -10,7 +10,7 @@ namespace internal {
namespace
base
{
namespace
base
{
// Default Spin-Lock implementation for this project.
// Default Spin-Lock implementation for this project.
using
spin_lock
=
tas_spin_lock
;
using
spin_lock
=
t
t
as_spin_lock
;
}
}
}
}
...
...
lib/pls/include/pls/internal/base/tas_spin_lock.h
View file @
d16ad3eb
...
@@ -21,11 +21,10 @@ namespace base {
...
@@ -21,11 +21,10 @@ namespace base {
*/
*/
class
tas_spin_lock
{
class
tas_spin_lock
{
std
::
atomic_flag
flag_
;
std
::
atomic_flag
flag_
;
unsigned
int
yield_at_tries_
;
public
:
public
:
tas_spin_lock
()
:
flag_
{
ATOMIC_FLAG_INIT
}
,
yield_at_tries_
{
1024
}
{};
tas_spin_lock
()
:
flag_
{
ATOMIC_FLAG_INIT
}
{};
tas_spin_lock
(
const
tas_spin_lock
&
other
)
:
flag_
{
ATOMIC_FLAG_INIT
},
yield_at_tries_
{
other
.
yield_at_tries_
}
{}
tas_spin_lock
(
const
tas_spin_lock
&
/*other*/
)
:
flag_
{
ATOMIC_FLAG_INIT
}
{}
void
lock
();
void
lock
();
bool
try_lock
(
unsigned
int
num_tries
=
1
);
bool
try_lock
(
unsigned
int
num_tries
=
1
);
...
...
lib/pls/include/pls/internal/base/ttas_spin_lock.h
View file @
d16ad3eb
...
@@ -6,6 +6,7 @@
...
@@ -6,6 +6,7 @@
#include <iostream>
#include <iostream>
#include "pls/internal/base/thread.h"
#include "pls/internal/base/thread.h"
#include "pls/internal/base/backoff.h"
namespace
pls
{
namespace
pls
{
namespace
internal
{
namespace
internal
{
...
@@ -18,11 +19,11 @@ namespace base {
...
@@ -18,11 +19,11 @@ namespace base {
*/
*/
class
ttas_spin_lock
{
class
ttas_spin_lock
{
std
::
atomic
<
int
>
flag_
;
std
::
atomic
<
int
>
flag_
;
const
unsigned
int
yield_at_tries
_
;
backoff
backoff
_
;
public
:
public
:
ttas_spin_lock
()
:
flag_
{
0
},
yield_at_tries_
{
1024
}
{};
ttas_spin_lock
()
:
flag_
{
0
},
backoff_
{
}
{};
ttas_spin_lock
(
const
ttas_spin_lock
&
other
)
:
flag_
{
0
},
yield_at_tries_
{
other
.
yield_at_tries_
}
{}
ttas_spin_lock
(
const
ttas_spin_lock
&
/*other*/
)
:
flag_
{
0
},
backoff_
{
}
{}
void
lock
();
void
lock
();
bool
try_lock
(
unsigned
int
num_tries
=
1
);
bool
try_lock
(
unsigned
int
num_tries
=
1
);
...
...
lib/pls/src/internal/base/swmr_spin_lock.cpp
View file @
d16ad3eb
...
@@ -7,14 +7,14 @@ namespace base {
...
@@ -7,14 +7,14 @@ namespace base {
bool
swmr_spin_lock
::
reader_try_lock
()
{
bool
swmr_spin_lock
::
reader_try_lock
()
{
PROFILE_LOCK
(
"Try Acquire Read Lock"
)
PROFILE_LOCK
(
"Try Acquire Read Lock"
)
if
(
write_request_
.
load
(
std
::
memory_order_
relaxed
)
==
1
)
{
if
(
write_request_
.
load
(
std
::
memory_order_
acquire
)
==
1
)
{
return
false
;
return
false
;
}
}
// We think we can enter the region
// We think we can enter the region
readers_
.
fetch_add
(
1
,
std
::
memory_order_acquire
);
readers_
.
fetch_add
(
1
,
std
::
memory_order_acquire
);
if
(
write_request_
.
load
()
==
1
)
{
if
(
write_request_
.
load
(
std
::
memory_order_acquire
)
==
1
)
{
// Whoops, the writer acquires the lock, so we back off again
// Whoops, the writer acquires the lock, so we back off again
readers_
--
;
readers_
.
fetch_add
(
-
1
,
std
::
memory_order_release
)
;
return
false
;
return
false
;
}
}
...
@@ -29,17 +29,10 @@ void swmr_spin_lock::reader_unlock() {
...
@@ -29,17 +29,10 @@ void swmr_spin_lock::reader_unlock() {
void
swmr_spin_lock
::
writer_lock
()
{
void
swmr_spin_lock
::
writer_lock
()
{
PROFILE_LOCK
(
"Acquire Write Lock"
)
PROFILE_LOCK
(
"Acquire Write Lock"
)
// Tell the readers that we would like to write
// Tell the readers that we would like to write
int
expected
;
write_request_
.
store
(
1
,
std
::
memory_order_acquire
);
while
(
true
)
{
expected
=
0
;
if
(
write_request_
.
compare_exchange_weak
(
expected
,
1
,
std
::
memory_order_acquire
))
{
break
;
}
system_details
::
relax_cpu
();
// Spin until WE set the write lock flag
}
// Wait for all of them to exit the critical section
// Wait for all of them to exit the critical section
while
(
readers_
.
load
()
>
0
)
while
(
readers_
.
load
(
std
::
memory_order_acquire
)
>
0
)
system_details
::
relax_cpu
();
// Spin, not expensive as relaxed load
system_details
::
relax_cpu
();
// Spin, not expensive as relaxed load
}
}
...
...
lib/pls/src/internal/base/tas_spin_lock.cpp
View file @
d16ad3eb
#include "pls/internal/helpers/profiler.h"
#include "pls/internal/helpers/profiler.h"
#include "pls/internal/base/tas_spin_lock.h"
#include "pls/internal/base/tas_spin_lock.h"
#include "pls/internal/base/backoff.h"
namespace
pls
{
namespace
pls
{
namespace
internal
{
namespace
internal
{
...
@@ -7,30 +8,31 @@ namespace base {
...
@@ -7,30 +8,31 @@ namespace base {
void
tas_spin_lock
::
lock
()
{
void
tas_spin_lock
::
lock
()
{
PROFILE_LOCK
(
"Acquire Lock"
)
PROFILE_LOCK
(
"Acquire Lock"
)
int
tries
=
0
;
backoff
backoff_strategy
;
while
(
true
)
{
tries
++
;
if
(
tries
%
yield_at_tries_
==
0
)
{
this_thread
::
yield
();
}
while
(
true
)
{
if
(
flag_
.
test_and_set
(
std
::
memory_order_acquire
)
==
0
)
{
if
(
flag_
.
test_and_set
(
std
::
memory_order_acquire
)
==
0
)
{
return
;
return
;
}
}
backoff_strategy
.
do_backoff
();
}
}
}
}
bool
tas_spin_lock
::
try_lock
(
unsigned
int
num_tries
)
{
bool
tas_spin_lock
::
try_lock
(
unsigned
int
num_tries
)
{
PROFILE_LOCK
(
"Try Acquire Lock"
)
PROFILE_LOCK
(
"Try Acquire Lock"
)
backoff
backoff_strategy
;
while
(
true
)
{
while
(
true
)
{
if
(
flag_
.
test_and_set
(
std
::
memory_order_acquire
)
==
0
)
{
return
true
;
}
num_tries
--
;
num_tries
--
;
if
(
num_tries
<=
0
)
{
if
(
num_tries
<=
0
)
{
return
false
;
return
false
;
}
}
if
(
flag_
.
test_and_set
(
std
::
memory_order_acquire
)
==
0
)
{
backoff_strategy
.
do_backoff
();
return
true
;
}
}
}
}
}
...
...
lib/pls/src/internal/base/ttas_spin_lock.cpp
View file @
d16ad3eb
#include "pls/internal/helpers/profiler.h"
#include "pls/internal/helpers/profiler.h"
#include "pls/internal/base/ttas_spin_lock.h"
#include "pls/internal/base/ttas_spin_lock.h"
#include "pls/internal/base/backoff.h"
namespace
pls
{
namespace
pls
{
namespace
internal
{
namespace
internal
{
...
@@ -7,40 +8,44 @@ namespace base {
...
@@ -7,40 +8,44 @@ namespace base {
void
ttas_spin_lock
::
lock
()
{
void
ttas_spin_lock
::
lock
()
{
PROFILE_LOCK
(
"Acquire Lock"
)
PROFILE_LOCK
(
"Acquire Lock"
)
int
tries
=
0
;
int
expected
=
0
;
int
expected
=
0
;
backoff_
.
reset
();
while
(
true
)
{
while
(
true
)
{
while
(
flag_
.
load
(
std
::
memory_order_relaxed
)
==
1
)
{
while
(
flag_
.
load
(
std
::
memory_order_relaxed
)
==
1
)
tries
++
;
system_details
::
relax_cpu
();
// Spin
if
(
tries
%
yield_at_tries_
==
0
)
{
this_thread
::
yield
();
}
}
expected
=
0
;
expected
=
0
;
if
(
flag_
.
compare_exchange_weak
(
expected
,
1
,
std
::
memory_order_acquire
))
{
if
(
flag_
.
compare_exchange_weak
(
expected
,
1
,
std
::
memory_order_acquire
))
{
return
;
return
;
}
}
backoff_
.
do_backoff
();
}
}
}
}
bool
ttas_spin_lock
::
try_lock
(
unsigned
int
num_tries
)
{
bool
ttas_spin_lock
::
try_lock
(
unsigned
int
num_tries
)
{
PROFILE_LOCK
(
"Try Acquire Lock"
)
PROFILE_LOCK
(
"Try Acquire Lock"
)
int
expected
=
0
;
int
expected
=
0
;
backoff_
.
reset
();
while
(
true
)
{
while
(
true
)
{
while
(
flag_
.
load
(
std
::
memory_order_relaxed
)
==
1
)
{
while
(
flag_
.
load
()
==
1
)
{
num_tries
--
;
num_tries
--
;
if
(
num_tries
<=
0
)
{
if
(
num_tries
<=
0
)
{
return
false
;
return
false
;
}
}
system_details
::
relax_cpu
();
}
}
expected
=
0
;
expected
=
0
;
if
(
flag_
.
compare_exchange_weak
(
expected
,
1
,
std
::
memory_order_acquire
))
{
if
(
flag_
.
compare_exchange_weak
(
expected
,
1
,
std
::
memory_order_acquire
))
{
return
true
;
return
true
;
}
}
num_tries
--
;
if
(
num_tries
<=
0
)
{
return
false
;
}
backoff_
.
do_backoff
();
}
}
}
}
...
...
lib/pls/src/internal/scheduling/abstract_task.cpp
View file @
d16ad3eb
...
@@ -15,11 +15,11 @@ bool abstract_task::steal_work() {
...
@@ -15,11 +15,11 @@ bool abstract_task::steal_work() {
const
size_t
my_id
=
my_state
->
id_
;
const
size_t
my_id
=
my_state
->
id_
;
const
size_t
offset
=
my_state
->
random_
()
%
my_scheduler
->
num_threads
();
const
size_t
offset
=
my_state
->
random_
()
%
my_scheduler
->
num_threads
();
const
size_t
max_tries
=
my_scheduler
->
num_threads
();
// TODO: Tune this value
const
size_t
max_tries
=
my_scheduler
->
num_threads
()
-
1
;
// TODO: Tune this value
for
(
size_t
i
=
0
;
i
<
max_tries
;
i
++
)
{
for
(
size_t
i
=
0
;
i
<
max_tries
;
i
++
)
{
size_t
target
=
(
offset
+
i
)
%
my_scheduler
->
num_threads
();
size_t
target
=
(
offset
+
i
)
%
my_scheduler
->
num_threads
();
if
(
target
==
my_id
)
{
if
(
target
==
my_id
)
{
continue
;
target
=
(
target
+
1
)
%
my_scheduler
->
num_threads
()
;
}
}
auto
target_state
=
my_scheduler
->
thread_state_for
(
target
);
auto
target_state
=
my_scheduler
->
thread_state_for
(
target
);
...
...
lib/pls/src/internal/scheduling/fork_join_task.cpp
View file @
d16ad3eb
...
@@ -54,13 +54,15 @@ void fork_join_sub_task::wait_for_all() {
...
@@ -54,13 +54,15 @@ void fork_join_sub_task::wait_for_all() {
if
(
local_task
!=
nullptr
)
{
if
(
local_task
!=
nullptr
)
{
local_task
->
execute
();
local_task
->
execute
();
}
else
{
}
else
{
// Try to steal work.
while
(
ref_count_
>
0
)
{
// External steal will be executed implicitly if success
// Try to steal work.
PROFILE_STEALING
(
"steal work"
)
// External steal will be executed implicitly if success
bool
internal_steal_success
=
tbb_task_
->
steal_work
();
PROFILE_STEALING
(
"steal work"
)
PROFILE_END_BLOCK
bool
internal_steal_success
=
tbb_task_
->
steal_work
();
if
(
internal_steal_success
)
{
PROFILE_END_BLOCK
tbb_task_
->
last_stolen_
->
execute
();
if
(
internal_steal_success
)
{
tbb_task_
->
last_stolen_
->
execute
();
}
}
}
}
}
}
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment