Skip to content
Toggle navigation
P
Projects
G
Groups
S
Snippets
Help
las3_pub
/
predictable_parallel_patterns
This project
Loading...
Sign in
Toggle navigation
Go to a project
Project
Repository
Issues
0
Merge Requests
0
Pipelines
Wiki
Members
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Commit
92da43ff
authored
Apr 02, 2019
by
FritzFlorian
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Rename tbb_task to fork_join_task.
parent
f3fe5ac3
Hide whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
65 additions
and
47 deletions
+65
-47
app/playground/main.cpp
+2
-2
lib/pls/CMakeLists.txt
+3
-2
lib/pls/include/pls/algorithms/invoke_parallel.h
+16
-0
lib/pls/include/pls/internal/scheduling/fork_join_task.h
+18
-18
lib/pls/include/pls/pls.h
+3
-3
lib/pls/src/algorithms/invoke_parallel.cpp
+1
-0
lib/pls/src/internal/scheduling/fork_join_task.cpp
+15
-15
test/scheduling_tests.cpp
+7
-7
No files found.
app/playground/main.cpp
View file @
92da43ff
...
@@ -12,7 +12,7 @@ using namespace pls;
...
@@ -12,7 +12,7 @@ using namespace pls;
// Example for static memory allocation (no malloc or free required)
// Example for static memory allocation (no malloc or free required)
static
static_scheduler_memory
<
8
,
2
<<
12
>
my_scheduler_memory
;
static
static_scheduler_memory
<
8
,
2
<<
12
>
my_scheduler_memory
;
class
fib
:
public
tbb
_sub_task
{
class
fib
:
public
fork_join
_sub_task
{
static
constexpr
int
CUTOFF
=
20
;
static
constexpr
int
CUTOFF
=
20
;
int
num_
;
int
num_
;
...
@@ -64,7 +64,7 @@ int main() {
...
@@ -64,7 +64,7 @@ int main() {
int
result
;
int
result
;
fib
fib_sub_task
{
45
,
&
result
};
fib
fib_sub_task
{
45
,
&
result
};
tbb
_task
tbb_task
{
&
fib_sub_task
,
task_id
{
1
}};
fork_join
_task
tbb_task
{
&
fib_sub_task
,
task_id
{
1
}};
scheduler
::
execute_task
(
tbb_task
);
scheduler
::
execute_task
(
tbb_task
);
std
::
cout
<<
"Result: "
<<
result
<<
std
::
endl
;
std
::
cout
<<
"Result: "
<<
result
<<
std
::
endl
;
...
...
lib/pls/CMakeLists.txt
View file @
92da43ff
...
@@ -12,8 +12,9 @@ add_library(pls STATIC
...
@@ -12,8 +12,9 @@ add_library(pls STATIC
src/internal/base/aligned_stack.cpp include/pls/internal/base/aligned_stack.h
src/internal/base/aligned_stack.cpp include/pls/internal/base/aligned_stack.h
include/pls/internal/base/system_details.h
include/pls/internal/base/system_details.h
src/internal/scheduling/run_on_n_threads_task.cpp include/pls/internal/scheduling/run_on_n_threads_task.h
src/internal/scheduling/run_on_n_threads_task.cpp include/pls/internal/scheduling/run_on_n_threads_task.h
src/internal/scheduling/tbb_task.cpp include/pls/internal/scheduling/tbb_task.h
src/internal/scheduling/fork_join_task.cpp include/pls/internal/scheduling/fork_join_task.h
src/internal/base/deque.cpp include/pls/internal/base/deque.h
)
src/internal/base/deque.cpp include/pls/internal/base/deque.h
src/algorithms/invoke_parallel.cpp include/pls/algorithms/invoke_parallel.h
)
# Add everything in `./include` to be in the include path of this project
# Add everything in `./include` to be in the include path of this project
target_include_directories
(
pls
target_include_directories
(
pls
...
...
lib/pls/include/pls/algorithms/invoke_parallel.h
0 → 100644
View file @
92da43ff
#ifndef PLS_PARALLEL_INVOKE_H
#define PLS_PARALLEL_INVOKE_H
namespace
pls
{
namespace
algorithm
{
// template<typename Function1, typename Function2>
// void invoke_parallel(Function1 function1, Function2 function2) {
// if (already_this_invoke_parallel_instance) {
//
// }
// }
}
}
#endif //PLS_PARALLEL_INVOKE_H
lib/pls/include/pls/internal/scheduling/
tbb
_task.h
→
lib/pls/include/pls/internal/scheduling/
fork_join
_task.h
View file @
92da43ff
...
@@ -11,22 +11,22 @@
...
@@ -11,22 +11,22 @@
namespace
pls
{
namespace
pls
{
namespace
internal
{
namespace
internal
{
namespace
scheduling
{
namespace
scheduling
{
class
tbb
_task
;
class
fork_join
_task
;
class
tbb
_sub_task
:
public
base
::
deque_item
{
class
fork_join
_sub_task
:
public
base
::
deque_item
{
friend
class
tbb
_task
;
friend
class
fork_join
_task
;
// Coordinate finishing of sub_tasks
// Coordinate finishing of sub_tasks
std
::
atomic_uint32_t
ref_count_
;
std
::
atomic_uint32_t
ref_count_
;
tbb
_sub_task
*
parent_
;
fork_join
_sub_task
*
parent_
;
// Access to TBB scheduling environment
// Access to TBB scheduling environment
tbb
_task
*
tbb_task_
;
fork_join
_task
*
tbb_task_
;
// Stack Management (reset stack pointer after wait_for_all() calls)
// Stack Management (reset stack pointer after wait_for_all() calls)
base
::
aligned_stack
::
state
stack_state_
;
base
::
aligned_stack
::
state
stack_state_
;
protected
:
protected
:
explicit
tbb
_sub_task
();
explicit
fork_join
_sub_task
();
tbb_sub_task
(
const
tbb
_sub_task
&
other
);
fork_join_sub_task
(
const
fork_join
_sub_task
&
other
);
virtual
void
execute_internal
()
=
0
;
virtual
void
execute_internal
()
=
0
;
// SubClass Implementations:
// SubClass Implementations:
...
@@ -42,7 +42,7 @@ namespace pls {
...
@@ -42,7 +42,7 @@ namespace pls {
void
spawn_child
(
const
T
&
sub_task
);
void
spawn_child
(
const
T
&
sub_task
);
void
wait_for_all
();
void
wait_for_all
();
private
:
private
:
void
spawn_child_internal
(
tbb
_sub_task
*
sub_task
);
void
spawn_child_internal
(
fork_join
_sub_task
*
sub_task
);
void
execute
();
void
execute
();
public
:
public
:
...
@@ -51,26 +51,26 @@ namespace pls {
...
@@ -51,26 +51,26 @@ namespace pls {
}
}
};
};
class
tbb
_task
:
public
abstract_task
{
class
fork_join
_task
:
public
abstract_task
{
friend
class
tbb
_sub_task
;
friend
class
fork_join
_sub_task
;
tbb
_sub_task
*
root_task_
;
fork_join
_sub_task
*
root_task_
;
base
::
aligned_stack
*
my_stack_
;
base
::
aligned_stack
*
my_stack_
;
// Double-Ended Queue management
// Double-Ended Queue management
base
::
deque
<
tbb
_sub_task
>
deque_
;
base
::
deque
<
fork_join
_sub_task
>
deque_
;
// Steal Management
// Steal Management
tbb
_sub_task
*
last_stolen_
;
fork_join
_sub_task
*
last_stolen_
;
tbb
_sub_task
*
get_local_sub_task
();
fork_join
_sub_task
*
get_local_sub_task
();
tbb
_sub_task
*
get_stolen_sub_task
();
fork_join
_sub_task
*
get_stolen_sub_task
();
bool
internal_stealing
(
abstract_task
*
other_task
)
override
;
bool
internal_stealing
(
abstract_task
*
other_task
)
override
;
bool
split_task
(
base
::
spin_lock
*
/*lock*/
)
override
;
bool
split_task
(
base
::
spin_lock
*
/*lock*/
)
override
;
public
:
public
:
explicit
tbb_task
(
tbb
_sub_task
*
root_task
,
const
abstract_task
::
id
&
id
)
:
explicit
fork_join_task
(
fork_join
_sub_task
*
root_task
,
const
abstract_task
::
id
&
id
)
:
abstract_task
{
0
,
id
},
abstract_task
{
0
,
id
},
root_task_
{
root_task
},
root_task_
{
root_task
},
my_stack_
{
nullptr
},
my_stack_
{
nullptr
},
...
@@ -89,8 +89,8 @@ namespace pls {
...
@@ -89,8 +89,8 @@ namespace pls {
};
};
template
<
typename
T
>
template
<
typename
T
>
void
tbb
_sub_task
::
spawn_child
(
const
T
&
task
)
{
void
fork_join
_sub_task
::
spawn_child
(
const
T
&
task
)
{
static_assert
(
std
::
is_base_of
<
tbb_sub_task
,
T
>::
value
,
"Only pass tbb
_sub_task subclasses!"
);
static_assert
(
std
::
is_base_of
<
fork_join_sub_task
,
T
>::
value
,
"Only pass fork_join
_sub_task subclasses!"
);
T
*
new_task
=
tbb_task_
->
my_stack_
->
push
(
task
);
T
*
new_task
=
tbb_task_
->
my_stack_
->
push
(
task
);
spawn_child_internal
(
new_task
);
spawn_child_internal
(
new_task
);
...
...
lib/pls/include/pls/pls.h
View file @
92da43ff
...
@@ -2,7 +2,7 @@
...
@@ -2,7 +2,7 @@
#define PLS_LIBRARY_H
#define PLS_LIBRARY_H
#include "pls/internal/scheduling/scheduler.h"
#include "pls/internal/scheduling/scheduler.h"
#include "pls/internal/scheduling/
tbb
_task.h"
#include "pls/internal/scheduling/
fork_join
_task.h"
#include "pls/internal/scheduling/abstract_task.h"
#include "pls/internal/scheduling/abstract_task.h"
namespace
pls
{
namespace
pls
{
...
@@ -10,8 +10,8 @@ namespace pls {
...
@@ -10,8 +10,8 @@ namespace pls {
using
internal
::
scheduling
::
static_scheduler_memory
;
using
internal
::
scheduling
::
static_scheduler_memory
;
using
task_id
=
internal
::
scheduling
::
abstract_task
::
id
;
using
task_id
=
internal
::
scheduling
::
abstract_task
::
id
;
using
internal
::
scheduling
::
tbb
_sub_task
;
using
internal
::
scheduling
::
fork_join
_sub_task
;
using
internal
::
scheduling
::
tbb
_task
;
using
internal
::
scheduling
::
fork_join
_task
;
}
}
#endif
#endif
lib/pls/src/algorithms/invoke_parallel.cpp
0 → 100644
View file @
92da43ff
#include "pls/algorithms/invoke_parallel.h"
lib/pls/src/internal/scheduling/
tbb
_task.cpp
→
lib/pls/src/internal/scheduling/
fork_join
_task.cpp
View file @
92da43ff
#include "pls/internal/scheduling/scheduler.h"
#include "pls/internal/scheduling/scheduler.h"
#include "pls/internal/scheduling/
tbb
_task.h"
#include "pls/internal/scheduling/
fork_join
_task.h"
namespace
pls
{
namespace
pls
{
namespace
internal
{
namespace
internal
{
namespace
scheduling
{
namespace
scheduling
{
tbb_sub_task
::
tbb
_sub_task
()
:
fork_join_sub_task
::
fork_join
_sub_task
()
:
base
::
deque_item
{},
base
::
deque_item
{},
ref_count_
{
0
},
ref_count_
{
0
},
parent_
{
nullptr
},
parent_
{
nullptr
},
tbb_task_
{
nullptr
},
tbb_task_
{
nullptr
},
stack_state_
{
nullptr
}
{}
stack_state_
{
nullptr
}
{}
tbb_sub_task
::
tbb_sub_task
(
const
tbb
_sub_task
&
other
)
:
base
::
deque_item
(
other
)
{
fork_join_sub_task
::
fork_join_sub_task
(
const
fork_join
_sub_task
&
other
)
:
base
::
deque_item
(
other
)
{
// Do Nothing, will be inited after this anyways
// Do Nothing, will be inited after this anyways
}
}
void
tbb
_sub_task
::
execute
()
{
void
fork_join
_sub_task
::
execute
()
{
execute_internal
();
execute_internal
();
wait_for_all
();
wait_for_all
();
...
@@ -24,7 +24,7 @@ namespace pls {
...
@@ -24,7 +24,7 @@ namespace pls {
}
}
}
}
void
tbb_sub_task
::
spawn_child_internal
(
tbb
_sub_task
*
sub_task
)
{
void
fork_join_sub_task
::
spawn_child_internal
(
fork_join
_sub_task
*
sub_task
)
{
// Keep our refcount up to date
// Keep our refcount up to date
ref_count_
++
;
ref_count_
++
;
...
@@ -36,9 +36,9 @@ namespace pls {
...
@@ -36,9 +36,9 @@ namespace pls {
tbb_task_
->
deque_
.
push_tail
(
sub_task
);
tbb_task_
->
deque_
.
push_tail
(
sub_task
);
}
}
void
tbb
_sub_task
::
wait_for_all
()
{
void
fork_join
_sub_task
::
wait_for_all
()
{
while
(
ref_count_
>
0
)
{
while
(
ref_count_
>
0
)
{
tbb
_sub_task
*
local_task
=
tbb_task_
->
get_local_sub_task
();
fork_join
_sub_task
*
local_task
=
tbb_task_
->
get_local_sub_task
();
if
(
local_task
!=
nullptr
)
{
if
(
local_task
!=
nullptr
)
{
local_task
->
execute
();
local_task
->
execute
();
}
else
{
}
else
{
...
@@ -52,22 +52,22 @@ namespace pls {
...
@@ -52,22 +52,22 @@ namespace pls {
tbb_task_
->
my_stack_
->
reset_state
(
stack_state_
);
tbb_task_
->
my_stack_
->
reset_state
(
stack_state_
);
}
}
tbb_sub_task
*
tbb
_task
::
get_local_sub_task
()
{
fork_join_sub_task
*
fork_join
_task
::
get_local_sub_task
()
{
return
deque_
.
pop_tail
();
return
deque_
.
pop_tail
();
}
}
tbb_sub_task
*
tbb
_task
::
get_stolen_sub_task
()
{
fork_join_sub_task
*
fork_join
_task
::
get_stolen_sub_task
()
{
return
deque_
.
pop_head
();
return
deque_
.
pop_head
();
}
}
bool
tbb
_task
::
internal_stealing
(
abstract_task
*
other_task
)
{
bool
fork_join
_task
::
internal_stealing
(
abstract_task
*
other_task
)
{
auto
cast_other_task
=
reinterpret_cast
<
tbb
_task
*>
(
other_task
);
auto
cast_other_task
=
reinterpret_cast
<
fork_join
_task
*>
(
other_task
);
auto
stolen_sub_task
=
cast_other_task
->
get_stolen_sub_task
();
auto
stolen_sub_task
=
cast_other_task
->
get_stolen_sub_task
();
if
(
stolen_sub_task
==
nullptr
)
{
if
(
stolen_sub_task
==
nullptr
)
{
return
false
;
return
false
;
}
else
{
}
else
{
// Make sub-task belong to our
tbb
_task instance
// Make sub-task belong to our
fork_join
_task instance
stolen_sub_task
->
tbb_task_
=
this
;
stolen_sub_task
->
tbb_task_
=
this
;
stolen_sub_task
->
stack_state_
=
my_stack_
->
save_state
();
stolen_sub_task
->
stack_state_
=
my_stack_
->
save_state
();
// We will execute this next without explicitly moving it onto our stack storage
// We will execute this next without explicitly moving it onto our stack storage
...
@@ -77,12 +77,12 @@ namespace pls {
...
@@ -77,12 +77,12 @@ namespace pls {
}
}
}
}
bool
tbb
_task
::
split_task
(
base
::
spin_lock
*
lock
)
{
bool
fork_join
_task
::
split_task
(
base
::
spin_lock
*
lock
)
{
tbb
_sub_task
*
stolen_sub_task
=
get_stolen_sub_task
();
fork_join
_sub_task
*
stolen_sub_task
=
get_stolen_sub_task
();
if
(
stolen_sub_task
==
nullptr
)
{
if
(
stolen_sub_task
==
nullptr
)
{
return
false
;
return
false
;
}
}
tbb
_task
task
{
stolen_sub_task
,
this
->
unique_id
()};
fork_join
_task
task
{
stolen_sub_task
,
this
->
unique_id
()};
// In success case, unlock.
// In success case, unlock.
// TODO: this locking is complicated and error prone.
// TODO: this locking is complicated and error prone.
...
...
test/scheduling_tests.cpp
View file @
92da43ff
...
@@ -4,7 +4,7 @@
...
@@ -4,7 +4,7 @@
using
namespace
pls
;
using
namespace
pls
;
class
once_sub_task
:
public
tbb
_sub_task
{
class
once_sub_task
:
public
fork_join
_sub_task
{
std
::
atomic
<
int
>*
counter_
;
std
::
atomic
<
int
>*
counter_
;
int
children_
;
int
children_
;
...
@@ -18,12 +18,12 @@ protected:
...
@@ -18,12 +18,12 @@ protected:
public
:
public
:
explicit
once_sub_task
(
std
::
atomic
<
int
>*
counter
,
int
children
)
:
explicit
once_sub_task
(
std
::
atomic
<
int
>*
counter
,
int
children
)
:
tbb
_sub_task
(),
fork_join
_sub_task
(),
counter_
{
counter
},
counter_
{
counter
},
children_
{
children
}
{}
children_
{
children
}
{}
};
};
class
force_steal_sub_task
:
public
tbb
_sub_task
{
class
force_steal_sub_task
:
public
fork_join
_sub_task
{
std
::
atomic
<
int
>*
parent_counter_
;
std
::
atomic
<
int
>*
parent_counter_
;
std
::
atomic
<
int
>*
overall_counter_
;
std
::
atomic
<
int
>*
overall_counter_
;
...
@@ -42,12 +42,12 @@ protected:
...
@@ -42,12 +42,12 @@ protected:
public
:
public
:
explicit
force_steal_sub_task
(
std
::
atomic
<
int
>*
parent_counter
,
std
::
atomic
<
int
>*
overall_counter
)
:
explicit
force_steal_sub_task
(
std
::
atomic
<
int
>*
parent_counter
,
std
::
atomic
<
int
>*
overall_counter
)
:
tbb
_sub_task
(),
fork_join
_sub_task
(),
parent_counter_
{
parent_counter
},
parent_counter_
{
parent_counter
},
overall_counter_
{
overall_counter
}
{}
overall_counter_
{
overall_counter
}
{}
};
};
TEST_CASE
(
"tbb task are scheduled correctly"
,
"[internal/scheduling/
tbb
_task.h]"
)
{
TEST_CASE
(
"tbb task are scheduled correctly"
,
"[internal/scheduling/
fork_join
_task.h]"
)
{
static
static_scheduler_memory
<
8
,
2
<<
12
>
my_scheduler_memory
;
static
static_scheduler_memory
<
8
,
2
<<
12
>
my_scheduler_memory
;
SECTION
(
"tasks are executed exactly once"
)
{
SECTION
(
"tasks are executed exactly once"
)
{
...
@@ -58,7 +58,7 @@ TEST_CASE( "tbb task are scheduled correctly", "[internal/scheduling/tbb_task.h]
...
@@ -58,7 +58,7 @@ TEST_CASE( "tbb task are scheduled correctly", "[internal/scheduling/tbb_task.h]
my_scheduler
.
perform_work
([
&
]
(){
my_scheduler
.
perform_work
([
&
]
(){
once_sub_task
sub_task
{
&
counter
,
start_counter
};
once_sub_task
sub_task
{
&
counter
,
start_counter
};
tbb
_task
task
{
&
sub_task
};
fork_join
_task
task
{
&
sub_task
};
scheduler
::
execute_task
(
task
);
scheduler
::
execute_task
(
task
);
});
});
...
@@ -71,7 +71,7 @@ TEST_CASE( "tbb task are scheduled correctly", "[internal/scheduling/tbb_task.h]
...
@@ -71,7 +71,7 @@ TEST_CASE( "tbb task are scheduled correctly", "[internal/scheduling/tbb_task.h]
my_scheduler
.
perform_work
([
&
]
(){
my_scheduler
.
perform_work
([
&
]
(){
std
::
atomic
<
int
>
dummy_parent
{
1
},
overall_counter
{
8
};
std
::
atomic
<
int
>
dummy_parent
{
1
},
overall_counter
{
8
};
force_steal_sub_task
sub_task
{
&
dummy_parent
,
&
overall_counter
};
force_steal_sub_task
sub_task
{
&
dummy_parent
,
&
overall_counter
};
tbb
_task
task
{
&
sub_task
};
fork_join
_task
task
{
&
sub_task
};
scheduler
::
execute_task
(
task
);
scheduler
::
execute_task
(
task
);
});
});
my_scheduler
.
terminate
(
true
);
my_scheduler
.
terminate
(
true
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment