Skip to content
Toggle navigation
P
Projects
G
Groups
S
Snippets
Help
las3_pub
/
predictable_parallel_patterns
This project
Loading...
Sign in
Toggle navigation
Go to a project
Project
Repository
Issues
0
Merge Requests
0
Pipelines
Wiki
Members
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Commit
d16ad3eb
authored
6 years ago
by
FritzFlorian
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Add basic exponential backoff to our code.
parent
aad1db1f
Pipeline
#1158
failed with stages
in 4 minutes 8 seconds
Changes
10
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
98 additions
and
45 deletions
+98
-45
lib/pls/CMakeLists.txt
+1
-0
lib/pls/include/pls/internal/base/backoff.h
+50
-0
lib/pls/include/pls/internal/base/spin_lock.h
+1
-1
lib/pls/include/pls/internal/base/tas_spin_lock.h
+2
-3
lib/pls/include/pls/internal/base/ttas_spin_lock.h
+4
-3
lib/pls/src/internal/base/swmr_spin_lock.cpp
+5
-12
lib/pls/src/internal/base/tas_spin_lock.cpp
+11
-9
lib/pls/src/internal/base/ttas_spin_lock.cpp
+13
-8
lib/pls/src/internal/scheduling/abstract_task.cpp
+2
-2
lib/pls/src/internal/scheduling/fork_join_task.cpp
+9
-7
No files found.
lib/pls/CMakeLists.txt
View file @
d16ad3eb
...
...
@@ -15,6 +15,7 @@ add_library(pls STATIC
include/pls/internal/base/system_details.h
include/pls/internal/base/error_handling.h
include/pls/internal/base/alignment.h src/internal/base/alignment.cpp
include/pls/internal/base/backoff.h
include/pls/internal/data_structures/aligned_stack.h src/internal/data_structures/aligned_stack.cpp
include/pls/internal/data_structures/aligned_stack_impl.h
...
...
This diff is collapsed.
Click to expand it.
lib/pls/include/pls/internal/base/backoff.h
0 → 100644
View file @
d16ad3eb
#ifndef PLS_BACKOFF_H_
#define PLS_BACKOFF_H_
#include "pls/internal/base/system_details.h"
#include "pls/internal/helpers/profiler.h"
#include "pls/internal/base/thread.h"
#include <random>
#include <math.h>
namespace
pls
{
namespace
internal
{
namespace
base
{
class
backoff
{
static
constexpr
unsigned
long
INITIAL_SPIN_ITERS
=
2u
<<
2u
;
static
constexpr
unsigned
long
MAX_SPIN_ITERS
=
2u
<<
6u
;
unsigned
long
current_
=
INITIAL_SPIN_ITERS
;
std
::
minstd_rand
random_
;
static
void
spin
(
unsigned
long
iterations
)
{
for
(
volatile
unsigned
long
i
=
0
;
i
<
iterations
;
i
++
)
system_details
::
relax_cpu
();
// Spin
}
public
:
backoff
()
:
current_
{
INITIAL_SPIN_ITERS
},
random_
{
std
::
random_device
{}()}
{}
void
do_backoff
()
{
PROFILE_LOCK
(
"Backoff"
)
spin
(
random_
()
%
std
::
min
(
current_
,
MAX_SPIN_ITERS
));
current_
=
current_
*
2
;
if
(
current_
>
MAX_SPIN_ITERS
)
{
current_
=
MAX_SPIN_ITERS
;
}
}
void
reset
()
{
current_
=
INITIAL_SPIN_ITERS
;
}
};
}
}
}
#endif //PLS_BACKOFF_H_
This diff is collapsed.
Click to expand it.
lib/pls/include/pls/internal/base/spin_lock.h
View file @
d16ad3eb
...
...
@@ -10,7 +10,7 @@ namespace internal {
namespace
base
{
// Default Spin-Lock implementation for this project.
using
spin_lock
=
tas_spin_lock
;
using
spin_lock
=
t
t
as_spin_lock
;
}
}
...
...
This diff is collapsed.
Click to expand it.
lib/pls/include/pls/internal/base/tas_spin_lock.h
View file @
d16ad3eb
...
...
@@ -21,11 +21,10 @@ namespace base {
*/
class
tas_spin_lock
{
std
::
atomic_flag
flag_
;
unsigned
int
yield_at_tries_
;
public
:
tas_spin_lock
()
:
flag_
{
ATOMIC_FLAG_INIT
}
,
yield_at_tries_
{
1024
}
{};
tas_spin_lock
(
const
tas_spin_lock
&
other
)
:
flag_
{
ATOMIC_FLAG_INIT
},
yield_at_tries_
{
other
.
yield_at_tries_
}
{}
tas_spin_lock
()
:
flag_
{
ATOMIC_FLAG_INIT
}
{};
tas_spin_lock
(
const
tas_spin_lock
&
/*other*/
)
:
flag_
{
ATOMIC_FLAG_INIT
}
{}
void
lock
();
bool
try_lock
(
unsigned
int
num_tries
=
1
);
...
...
This diff is collapsed.
Click to expand it.
lib/pls/include/pls/internal/base/ttas_spin_lock.h
View file @
d16ad3eb
...
...
@@ -6,6 +6,7 @@
#include <iostream>
#include "pls/internal/base/thread.h"
#include "pls/internal/base/backoff.h"
namespace
pls
{
namespace
internal
{
...
...
@@ -18,11 +19,11 @@ namespace base {
*/
class
ttas_spin_lock
{
std
::
atomic
<
int
>
flag_
;
const
unsigned
int
yield_at_tries
_
;
backoff
backoff
_
;
public
:
ttas_spin_lock
()
:
flag_
{
0
},
yield_at_tries_
{
1024
}
{};
ttas_spin_lock
(
const
ttas_spin_lock
&
other
)
:
flag_
{
0
},
yield_at_tries_
{
other
.
yield_at_tries_
}
{}
ttas_spin_lock
()
:
flag_
{
0
},
backoff_
{
}
{};
ttas_spin_lock
(
const
ttas_spin_lock
&
/*other*/
)
:
flag_
{
0
},
backoff_
{
}
{}
void
lock
();
bool
try_lock
(
unsigned
int
num_tries
=
1
);
...
...
This diff is collapsed.
Click to expand it.
lib/pls/src/internal/base/swmr_spin_lock.cpp
View file @
d16ad3eb
...
...
@@ -7,14 +7,14 @@ namespace base {
bool
swmr_spin_lock
::
reader_try_lock
()
{
PROFILE_LOCK
(
"Try Acquire Read Lock"
)
if
(
write_request_
.
load
(
std
::
memory_order_
relaxed
)
==
1
)
{
if
(
write_request_
.
load
(
std
::
memory_order_
acquire
)
==
1
)
{
return
false
;
}
// We think we can enter the region
readers_
.
fetch_add
(
1
,
std
::
memory_order_acquire
);
if
(
write_request_
.
load
()
==
1
)
{
if
(
write_request_
.
load
(
std
::
memory_order_acquire
)
==
1
)
{
// Whoops, the writer acquires the lock, so we back off again
readers_
--
;
readers_
.
fetch_add
(
-
1
,
std
::
memory_order_release
)
;
return
false
;
}
...
...
@@ -29,17 +29,10 @@ void swmr_spin_lock::reader_unlock() {
void
swmr_spin_lock
::
writer_lock
()
{
PROFILE_LOCK
(
"Acquire Write Lock"
)
// Tell the readers that we would like to write
int
expected
;
while
(
true
)
{
expected
=
0
;
if
(
write_request_
.
compare_exchange_weak
(
expected
,
1
,
std
::
memory_order_acquire
))
{
break
;
}
system_details
::
relax_cpu
();
// Spin until WE set the write lock flag
}
write_request_
.
store
(
1
,
std
::
memory_order_acquire
);
// Wait for all of them to exit the critical section
while
(
readers_
.
load
()
>
0
)
while
(
readers_
.
load
(
std
::
memory_order_acquire
)
>
0
)
system_details
::
relax_cpu
();
// Spin, not expensive as relaxed load
}
...
...
This diff is collapsed.
Click to expand it.
lib/pls/src/internal/base/tas_spin_lock.cpp
View file @
d16ad3eb
#include "pls/internal/helpers/profiler.h"
#include "pls/internal/base/tas_spin_lock.h"
#include "pls/internal/base/backoff.h"
namespace
pls
{
namespace
internal
{
...
...
@@ -7,30 +8,31 @@ namespace base {
void
tas_spin_lock
::
lock
()
{
PROFILE_LOCK
(
"Acquire Lock"
)
int
tries
=
0
;
while
(
true
)
{
tries
++
;
if
(
tries
%
yield_at_tries_
==
0
)
{
this_thread
::
yield
();
}
backoff
backoff_strategy
;
while
(
true
)
{
if
(
flag_
.
test_and_set
(
std
::
memory_order_acquire
)
==
0
)
{
return
;
}
backoff_strategy
.
do_backoff
();
}
}
bool
tas_spin_lock
::
try_lock
(
unsigned
int
num_tries
)
{
PROFILE_LOCK
(
"Try Acquire Lock"
)
backoff
backoff_strategy
;
while
(
true
)
{
if
(
flag_
.
test_and_set
(
std
::
memory_order_acquire
)
==
0
)
{
return
true
;
}
num_tries
--
;
if
(
num_tries
<=
0
)
{
return
false
;
}
if
(
flag_
.
test_and_set
(
std
::
memory_order_acquire
)
==
0
)
{
return
true
;
}
backoff_strategy
.
do_backoff
();
}
}
...
...
This diff is collapsed.
Click to expand it.
lib/pls/src/internal/base/ttas_spin_lock.cpp
View file @
d16ad3eb
#include "pls/internal/helpers/profiler.h"
#include "pls/internal/base/ttas_spin_lock.h"
#include "pls/internal/base/backoff.h"
namespace
pls
{
namespace
internal
{
...
...
@@ -7,40 +8,44 @@ namespace base {
void
ttas_spin_lock
::
lock
()
{
PROFILE_LOCK
(
"Acquire Lock"
)
int
tries
=
0
;
int
expected
=
0
;
backoff_
.
reset
();
while
(
true
)
{
while
(
flag_
.
load
(
std
::
memory_order_relaxed
)
==
1
)
{
tries
++
;
if
(
tries
%
yield_at_tries_
==
0
)
{
this_thread
::
yield
();
}
}
while
(
flag_
.
load
(
std
::
memory_order_relaxed
)
==
1
)
system_details
::
relax_cpu
();
// Spin
expected
=
0
;
if
(
flag_
.
compare_exchange_weak
(
expected
,
1
,
std
::
memory_order_acquire
))
{
return
;
}
backoff_
.
do_backoff
();
}
}
bool
ttas_spin_lock
::
try_lock
(
unsigned
int
num_tries
)
{
PROFILE_LOCK
(
"Try Acquire Lock"
)
int
expected
=
0
;
backoff_
.
reset
();
while
(
true
)
{
while
(
flag_
.
load
(
std
::
memory_order_relaxed
)
==
1
)
{
while
(
flag_
.
load
()
==
1
)
{
num_tries
--
;
if
(
num_tries
<=
0
)
{
return
false
;
}
system_details
::
relax_cpu
();
}
expected
=
0
;
if
(
flag_
.
compare_exchange_weak
(
expected
,
1
,
std
::
memory_order_acquire
))
{
return
true
;
}
num_tries
--
;
if
(
num_tries
<=
0
)
{
return
false
;
}
backoff_
.
do_backoff
();
}
}
...
...
This diff is collapsed.
Click to expand it.
lib/pls/src/internal/scheduling/abstract_task.cpp
View file @
d16ad3eb
...
...
@@ -15,11 +15,11 @@ bool abstract_task::steal_work() {
const
size_t
my_id
=
my_state
->
id_
;
const
size_t
offset
=
my_state
->
random_
()
%
my_scheduler
->
num_threads
();
const
size_t
max_tries
=
my_scheduler
->
num_threads
();
// TODO: Tune this value
const
size_t
max_tries
=
my_scheduler
->
num_threads
()
-
1
;
// TODO: Tune this value
for
(
size_t
i
=
0
;
i
<
max_tries
;
i
++
)
{
size_t
target
=
(
offset
+
i
)
%
my_scheduler
->
num_threads
();
if
(
target
==
my_id
)
{
continue
;
target
=
(
target
+
1
)
%
my_scheduler
->
num_threads
()
;
}
auto
target_state
=
my_scheduler
->
thread_state_for
(
target
);
...
...
This diff is collapsed.
Click to expand it.
lib/pls/src/internal/scheduling/fork_join_task.cpp
View file @
d16ad3eb
...
...
@@ -54,13 +54,15 @@ void fork_join_sub_task::wait_for_all() {
if
(
local_task
!=
nullptr
)
{
local_task
->
execute
();
}
else
{
// Try to steal work.
// External steal will be executed implicitly if success
PROFILE_STEALING
(
"steal work"
)
bool
internal_steal_success
=
tbb_task_
->
steal_work
();
PROFILE_END_BLOCK
if
(
internal_steal_success
)
{
tbb_task_
->
last_stolen_
->
execute
();
while
(
ref_count_
>
0
)
{
// Try to steal work.
// External steal will be executed implicitly if success
PROFILE_STEALING
(
"steal work"
)
bool
internal_steal_success
=
tbb_task_
->
steal_work
();
PROFILE_END_BLOCK
if
(
internal_steal_success
)
{
tbb_task_
->
last_stolen_
->
execute
();
}
}
}
}
...
...
This diff is collapsed.
Click to expand it.
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment