Skip to content
Toggle navigation
P
Projects
G
Groups
S
Snippets
Help
las3_pub
/
jester
This project
Loading...
Sign in
Toggle navigation
Go to a project
Project
Repository
Issues
0
Merge Requests
0
Pipelines
Wiki
Members
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Commit
f0a948aa
authored
May 26, 2020
by
Michael Schmid
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
DagUtils now in seperate class and small changes
parent
a52ab93d
Hide whitespace changes
Inline
Side-by-side
Showing
18 changed files
with
688 additions
and
727 deletions
+688
-727
src/main/java/mvd/jester/info/SchedulingInfo.java
+72
-55
src/main/java/mvd/jester/info/TerminationInfo.java
+1
-23
src/main/java/mvd/jester/model/DagTask.java
+5
-387
src/main/java/mvd/jester/model/SortedTaskSet.java
+7
-6
src/main/java/mvd/jester/model/SynchronousTask.java
+9
-8
src/main/java/mvd/jester/model/SystemSetup.java
+2
-2
src/main/java/mvd/jester/model/TreeJob.java
+2
-2
src/main/java/mvd/jester/tests/ChwaLee.java
+26
-28
src/main/java/mvd/jester/tests/FonsecaNelis.java
+80
-74
src/main/java/mvd/jester/tests/MaiaBertogna.java
+25
-27
src/main/java/mvd/jester/tests/MelaniButtazzo.java
+23
-25
src/main/java/mvd/jester/tests/SchmidMottok.java
+25
-26
src/main/java/mvd/jester/utils/BinaryDecompositionTree.java
+7
-7
src/main/java/mvd/jester/utils/DagUtils.java
+391
-0
src/main/java/mvd/jester/utils/Logger.java
+5
-5
src/test/java/mvd/jester/info/TestSchedulingInfo.java
+5
-45
src/test/java/mvd/jester/info/TestTerminationInfo.java
+2
-6
src/test/java/mvd/jester/model/TestDagUtils.java
+1
-1
No files found.
src/main/java/mvd/jester/info/SchedulingInfo.java
View file @
f0a948aa
package
mvd
.
jester
.
info
;
import
java.util.
HashSet
;
import
java.util.
Collection
;
import
java.util.Optional
;
import
java.util.Set
;
import
mvd.jester.info.TerminationInfo.Level
;
/**
* SchedulingInfo
*/
public
class
SchedulingInfo
{
private
final
Feasiblity
feasiblity
;
private
final
double
parallelTaskRatio
;
private
final
double
utilization
;
private
final
Set
<
TerminationInfo
>
terminationInfos
;
private
Optional
<
TerminationInfo
>
failedTerminationInfo
;
public
SchedulingInfo
(
double
parallelTaskRatio
,
double
utilization
)
{
this
.
parallelTaskRatio
=
parallelTaskRatio
;
this
.
utilization
=
utilization
;
this
.
terminationInfos
=
new
HashSet
<>();
this
.
failedTerminationInfo
=
Optional
.
empty
();
public
SchedulingInfo
(
Collection
<
TerminationInfo
>
terminationInfos
)
{
Optional
<
TerminationInfo
>
failedTerminationInfo
=
terminationInfos
.
stream
().
filter
(
t
->
t
.
getLateness
()
>
0
).
findFirst
();
feasiblity
=
failedTerminationInfo
.
isPresent
()
?
Feasiblity
.
FAILED
:
Feasiblity
.
SUCCEEDED
;
}
/**
* @return the
utilization
* @return the
feasiblity
*/
public
double
getUtilization
()
{
return
utilization
;
public
Feasiblity
getFeasibility
()
{
return
feasiblity
;
}
/**
* @return the parallelTaskRatio
*/
public
double
getParallelTaskRatio
()
{
return
parallelTaskRatio
;
public
enum
Feasiblity
{
FAILED
,
SUCCEEDED
,
}
public
SchedulingInfo
(
Set
<
TerminationInfo
>
terminationInfos
,
double
parallelTaskRatio
,
double
utilization
)
{
this
.
terminationInfos
=
terminationInfos
;
this
.
parallelTaskRatio
=
parallelTaskRatio
;
this
.
utilization
=
utilization
;
failedTerminationInfo
=
terminationInfos
.
stream
().
filter
(
t
->
t
.
getLateness
()
>
0
).
findFirst
();
}
public
boolean
checkLevelFail
(
Level
level
)
{
return
terminationInfos
.
stream
()
.
anyMatch
(
t
->
t
.
getLateness
()
>
0
&&
t
.
getTaskLevel
()
==
level
)
;
}
// private final double parallelTaskRatio;
// private final double utilization;
// private final Set<TerminationInfo> terminationInfos
;
// private Optional<TerminationInfo> failedTerminationInfo;
public
boolean
checkTasksetFeasible
()
{
// return terminationInfos.isEmpty();
return
!
terminationInfos
.
stream
().
anyMatch
(
t
->
t
.
getLateness
()
>
0
);
}
// public SchedulingInfo(double parallelTaskRatio, double utilization) {
// this.parallelTaskRatio = parallelTaskRatio;
// this.utilization = utilization;
// this.terminationInfos = new HashSet<>();
// this.failedTerminationInfo = Optional.empty();
// }
public
boolean
addTerminationInfo
(
TerminationInfo
terminationInfo
)
{
return
terminationInfos
.
add
(
terminationInfo
);
}
// /**
// * @return the utilization
// */
// public double getUtilization() {
// return utilization;
// }
/**
* @return the terminationInfos
*/
public
Set
<
TerminationInfo
>
getTerminationInfos
()
{
return
terminationInfos
;
}
/
/ /
**
// * @return the parallelTaskRatio
//
*/
// public double getParallelTaskRatio
() {
// return parallelTaskRatio
;
//
}
/**
* @return the failedTerminationInfo
*/
public
Optional
<
TerminationInfo
>
getFailedTerminationInfo
()
{
return
failedTerminationInfo
;
}
// public SchedulingInfo(Set<TerminationInfo> terminationInfos, double parallelTaskRatio,
// double utilization) {
// this.terminationInfos = terminationInfos;
// this.parallelTaskRatio = parallelTaskRatio;
// this.utilization = utilization;
// failedTerminationInfo =
// terminationInfos.stream().filter(t -> t.getLateness() > 0).findFirst();
// }
public
void
setFailedTerminationInfo
(
TerminationInfo
failedTerminationInfo
)
{
this
.
failedTerminationInfo
=
Optional
.
of
(
failedTerminationInfo
);
}
// public boolean checkLevelFail(Level level) {
// return terminationInfos.stream()
// .anyMatch(t -> t.getLateness() > 0 && t.getTaskLevel() == level);
// }
// public boolean checkTasksetFeasible() {
// // return terminationInfos.isEmpty();
// return !terminationInfos.stream().anyMatch(t -> t.getLateness() > 0);
// }
// public boolean addTerminationInfo(TerminationInfo terminationInfo) {
// return terminationInfos.add(terminationInfo);
// }
// /**
// * @return the terminationInfos
// */
// public Set<TerminationInfo> getTerminationInfos() {
// return terminationInfos;
// }
// /**
// * @return the failedTerminationInfo
// */
// public Optional<TerminationInfo> getFailedTerminationInfo() {
// return failedTerminationInfo;
// }
// public void setFailedTerminationInfo(TerminationInfo failedTerminationInfo) {
// this.failedTerminationInfo = Optional.of(failedTerminationInfo);
// }
}
src/main/java/mvd/jester/info/TerminationInfo.java
View file @
f0a948aa
...
...
@@ -9,30 +9,19 @@ public class TerminationInfo {
private
final
long
deadline
;
private
final
long
responseTime
;
private
final
long
lateness
;
private
final
Level
taskLevel
;
public
TerminationInfo
(
long
releaseTime
,
long
deadline
,
long
responseTime
)
{
this
.
releaseTime
=
releaseTime
;
this
.
deadline
=
deadline
;
this
.
responseTime
=
responseTime
;
this
.
lateness
=
responseTime
-
deadline
;
this
.
taskLevel
=
Level
.
LOW
;
}
public
TerminationInfo
(
long
releaseTime
,
long
deadline
,
long
responseTime
,
Level
taskLevel
)
{
this
.
releaseTime
=
releaseTime
;
this
.
deadline
=
deadline
;
this
.
responseTime
=
responseTime
;
this
.
lateness
=
responseTime
-
deadline
;
this
.
taskLevel
=
taskLevel
;
}
public
TerminationInfo
(
long
deadline
,
long
responseTime
,
Level
taskLevel
)
{
public
TerminationInfo
(
long
deadline
,
long
responseTime
)
{
this
.
releaseTime
=
0
;
this
.
deadline
=
deadline
;
this
.
responseTime
=
responseTime
;
this
.
lateness
=
responseTime
-
deadline
;
this
.
taskLevel
=
taskLevel
;
}
/**
...
...
@@ -62,15 +51,4 @@ public class TerminationInfo {
public
long
getResponseTime
()
{
return
responseTime
;
}
/**
* @return the taskLevel
*/
public
Level
getTaskLevel
()
{
return
taskLevel
;
}
public
enum
Level
{
HIGH
,
LOW
}
}
src/main/java/mvd/jester/model/DagTask.java
View file @
f0a948aa
package
mvd
.
jester
.
model
;
import
java.util.Arrays
;
import
java.util.HashSet
;
import
java.util.LinkedHashSet
;
import
java.util.LinkedList
;
import
java.util.List
;
import
java.util.Optional
;
import
java.util.Set
;
import
com.google.common.collect.Iterables
;
import
com.google.common.collect.Sets
;
import
org.jgrapht.Graphs
;
import
org.jgrapht.experimental.dag.DirectedAcyclicGraph
;
import
org.jgrapht.graph.DefaultEdge
;
import
mvd.jester.utils.BinaryDecompositionTree
;
import
mvd.jester.utils.BinaryDecompositionTree.Node
;
import
mvd.jester.utils.BinaryDecompositionTree.NodeType
;
import
mvd.jester.utils.DagUtils
;
public
class
DagTask
implements
Task
{
...
...
@@ -26,8 +15,8 @@ public class DagTask implements Task {
private
final
long
deadline
;
private
final
long
numberOfThreads
;
public
DagTask
(
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
jobDag
,
long
period
,
long
numberOfThreads
)
{
public
DagTask
(
final
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
jobDag
,
final
long
period
,
final
long
numberOfThreads
)
{
this
.
jobDag
=
jobDag
;
this
.
period
=
period
;
this
.
deadline
=
period
;
...
...
@@ -59,7 +48,7 @@ public class DagTask implements Task {
/**
* @param jobDag the jobDag to set
*/
public
void
setJobDag
(
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
jobDag
)
{
public
void
setJobDag
(
final
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
jobDag
)
{
this
.
jobDag
=
jobDag
;
}
...
...
@@ -95,7 +84,7 @@ public class DagTask implements Task {
@Override
public
long
getMaximumParallelism
()
{
long
max
=
0
;
for
(
Segment
s
:
workloadDistribution
)
{
for
(
final
Segment
s
:
workloadDistribution
)
{
if
(
max
<
s
.
getNumberOfJobs
())
{
max
=
s
.
getNumberOfJobs
();
}
...
...
@@ -109,375 +98,4 @@ public class DagTask implements Task {
return
numberOfThreads
;
}
public
static
class
DagUtils
{
public
static
long
calculateWorkload
(
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
jobDag
)
{
long
workload
=
0
;
for
(
Job
job
:
jobDag
)
{
workload
+=
job
.
getWcet
();
}
return
workload
;
}
public
static
long
calculateCriticalPath
(
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
jobDag
)
{
long
criticalPath
=
0
;
// BreadthFirstIterator<Job, DefaultEdge> breadthFirstIterator =
// new BreadthFirstIterator<>(jobDag);
// while (breadthFirstIterator.hasNext()) {
// Job job = breadthFirstIterator.next();
for
(
Job
job
:
jobDag
)
{
Set
<
DefaultEdge
>
edges
=
jobDag
.
incomingEdgesOf
(
job
);
long
longestRelativeCompletionTime
=
0
;
for
(
DefaultEdge
e
:
edges
)
{
Job
source
=
jobDag
.
getEdgeSource
(
e
);
longestRelativeCompletionTime
=
longestRelativeCompletionTime
>=
source
.
getRelativeCompletionTime
()
?
longestRelativeCompletionTime
:
source
.
getRelativeCompletionTime
();
}
job
.
setRelativeCompletionTime
(
longestRelativeCompletionTime
+
job
.
getWcet
());
criticalPath
=
job
.
getRelativeCompletionTime
();
}
return
criticalPath
;
}
public
static
LinkedHashSet
<
Segment
>
calculateWorkloadDistribution
(
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
jobDag
,
long
criticalPath
)
{
LinkedHashSet
<
Segment
>
segments
=
new
LinkedHashSet
<>();
long
segmentDuration
=
0
;
long
segmentHeight
=
1
;
for
(
long
t
=
0
;
t
<
criticalPath
;
++
t
)
{
long
currentHeight
=
0
;
for
(
Job
j
:
jobDag
)
{
if
(
t
>=
j
.
getRelativeCompletionTime
()
-
j
.
getWcet
()
&&
t
<
j
.
getRelativeCompletionTime
())
{
currentHeight
++;
}
}
if
(
currentHeight
==
segmentHeight
)
{
segmentDuration
++;
}
else
{
segments
.
add
(
new
Segment
(
segmentDuration
,
segmentHeight
));
segmentDuration
=
1
;
segmentHeight
=
currentHeight
;
}
}
segments
.
add
(
new
Segment
(
segmentDuration
,
segmentHeight
));
return
segments
;
}
public
static
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
createNFJGraph
(
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
jobDag
)
{
LinkedList
<
Job
>
joinNodes
=
new
LinkedList
<>();
LinkedList
<
Job
>
forkNodes
=
new
LinkedList
<>();
collectForksAndJoins
(
jobDag
,
forkNodes
,
joinNodes
);
return
createNFJGraph
(
jobDag
,
forkNodes
,
joinNodes
);
}
public
static
void
collectForksAndJoins
(
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
jobDag
,
LinkedList
<
Job
>
forkNodes
,
LinkedList
<
Job
>
joinNodes
)
{
for
(
Job
j
:
jobDag
)
{
if
(
jobDag
.
inDegreeOf
(
j
)
>
1
)
{
joinNodes
.
add
(
j
);
}
if
(
jobDag
.
outDegreeOf
(
j
)
>
1
)
{
forkNodes
.
add
(
j
);
}
}
}
public
static
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
createNFJGraph
(
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
jobDag
,
LinkedList
<
Job
>
forkNodes
,
LinkedList
<
Job
>
joinNodes
)
{
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
modifiedJobDag
=
new
DirectedAcyclicGraph
<>(
DefaultEdge
.
class
);
Graphs
.
addGraph
(
modifiedJobDag
,
jobDag
);
if
(!(
joinNodes
.
size
()
>
1
))
{
return
modifiedJobDag
;
}
final
Job
sink
=
joinNodes
.
getLast
();
for
(
Job
j
:
joinNodes
)
{
Set
<
DefaultEdge
>
edgeSet
=
new
HashSet
<>(
modifiedJobDag
.
incomingEdgesOf
(
j
));
for
(
DefaultEdge
e
:
edgeSet
)
{
Job
predecessor
=
modifiedJobDag
.
getEdgeSource
(
e
);
boolean
satisfiesProposition
=
DagUtils
.
checkForFork
(
modifiedJobDag
,
j
,
forkNodes
,
predecessor
);
if
(!
satisfiesProposition
)
{
modifiedJobDag
.
removeEdge
(
e
);
if
(
modifiedJobDag
.
outgoingEdgesOf
(
predecessor
).
isEmpty
())
{
try
{
modifiedJobDag
.
addDagEdge
(
predecessor
,
sink
);
}
catch
(
Exception
ex
)
{
}
}
}
if
(
modifiedJobDag
.
inDegreeOf
(
j
)
==
1
)
{
break
;
}
}
}
return
modifiedJobDag
;
}
public
static
BinaryDecompositionTree
<
Job
>
createDecompositionTree
(
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
jobDag
)
{
LinkedList
<
Job
>
joinNodes
=
new
LinkedList
<>();
LinkedList
<
Job
>
forkNodes
=
new
LinkedList
<>();
collectForksAndJoins
(
jobDag
,
forkNodes
,
joinNodes
);
return
createDecompositionTree
(
jobDag
,
forkNodes
,
joinNodes
);
}
public
static
BinaryDecompositionTree
<
Job
>
createDecompositionTree
(
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
jobDag
,
LinkedList
<
Job
>
forkNodes
,
LinkedList
<
Job
>
joinNodes
)
{
BinaryDecompositionTree
<
Job
>
tree
=
new
BinaryDecompositionTree
<>();
Optional
<
Job
>
source
=
Optional
.
empty
();
Optional
<
Job
>
sink
=
Optional
.
empty
();
if
(
forkNodes
.
size
()
>
0
)
{
source
=
Optional
.
of
(
forkNodes
.
getFirst
());
sink
=
Optional
.
of
(
joinNodes
.
getLast
());
}
else
{
boolean
firstRun
=
true
;
for
(
Job
j
:
jobDag
)
{
if
(
firstRun
)
{
firstRun
=
false
;
source
=
Optional
.
of
(
j
);
}
sink
=
Optional
.
of
(
j
);
}
}
if
(
source
.
isPresent
()
&&
sink
.
isPresent
())
{
Job
current
=
source
.
get
();
DagUtils
.
constructTree
(
jobDag
,
null
,
current
,
tree
,
forkNodes
,
joinNodes
,
sink
.
get
());
}
return
tree
;
}
private
static
Node
<
Job
>
constructTree
(
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
jobDag
,
Node
<
Job
>
parentNode
,
Job
currentJob
,
BinaryDecompositionTree
<
Job
>
tree
,
LinkedList
<
Job
>
forkVertices
,
LinkedList
<
Job
>
joinVertices
,
Job
sinkJob
)
{
if
(
forkVertices
.
contains
(
currentJob
))
{
Job
forkJob
=
currentJob
;
Job
joinJob
=
findJoin
(
jobDag
,
forkJob
,
sinkJob
,
joinVertices
);
Node
<
Job
>
seqForkNode
=
new
Node
<
Job
>(
parentNode
,
NodeType
.
SEQ
);
seqForkNode
.
setLeftNode
(
new
Node
<
Job
>(
seqForkNode
,
forkJob
));
if
(
tree
.
isEmpty
())
{
tree
.
setRoot
(
seqForkNode
);
}
final
Node
<
Job
>
parContinuationNode
;
if
(!
tree
.
contains
(
joinJob
))
{
Node
<
Job
>
seqJoinNode
=
seqForkNode
.
setRightNode
(
new
Node
<
Job
>(
seqForkNode
,
NodeType
.
SEQ
));
Node
<
Job
>
subTreeOfJoin
=
constructTree
(
jobDag
,
seqJoinNode
,
joinJob
,
tree
,
forkVertices
,
joinVertices
,
sinkJob
);
seqJoinNode
.
setRightNode
(
subTreeOfJoin
);
parContinuationNode
=
seqJoinNode
.
setLeftNode
(
new
Node
<
Job
>(
seqJoinNode
,
NodeType
.
PAR
));
}
else
{
parContinuationNode
=
seqForkNode
.
setRightNode
(
new
Node
<
Job
>(
seqForkNode
,
NodeType
.
PAR
));
}
Node
<
Job
>
successorParent
=
parContinuationNode
;
List
<
Job
>
successors
=
Graphs
.
successorListOf
(
jobDag
,
currentJob
);
// create leftSide of joinNode
for
(
int
i
=
0
;
i
<
successors
.
size
();
++
i
)
{
Job
succ
=
successors
.
get
(
i
);
Node
<
Job
>
thisNode
=
constructTree
(
jobDag
,
successorParent
,
succ
,
tree
,
forkVertices
,
joinVertices
,
sinkJob
);
if
(
i
==
successors
.
size
()
-
1
)
{
successorParent
.
setRightNode
(
thisNode
);
}
else
if
(
i
==
successors
.
size
()
-
2
)
{
successorParent
.
setLeftNode
(
thisNode
);
}
else
{
successorParent
.
setLeftNode
(
thisNode
);
successorParent
.
setRightNode
(
new
Node
<>(
successorParent
,
NodeType
.
PAR
));
successorParent
=
successorParent
.
getRightNode
();
}
}
return
seqForkNode
;
}
else
{
List
<
Job
>
successorList
=
Graphs
.
successorListOf
(
jobDag
,
currentJob
);
if
(
successorList
.
size
()
>
0
)
{
Job
successor
=
successorList
.
get
(
0
);
if
(!
tree
.
contains
(
successor
))
{
Node
<
Job
>
seqJobNode
=
new
Node
<
Job
>(
parentNode
,
NodeType
.
SEQ
);
if
(
tree
.
isEmpty
())
{
tree
.
setRoot
(
seqJobNode
);
}
seqJobNode
.
setLeftNode
(
new
Node
<
Job
>(
seqJobNode
,
currentJob
));
Node
<
Job
>
contNode
=
constructTree
(
jobDag
,
seqJobNode
,
successor
,
tree
,
forkVertices
,
joinVertices
,
sinkJob
);
seqJobNode
.
setRightNode
(
contNode
);
return
seqJobNode
;
}
}
Node
<
Job
>
jobNode
=
new
Node
<
Job
>(
parentNode
,
currentJob
);
if
(
tree
.
isEmpty
())
{
tree
.
setRoot
(
jobNode
);
}
return
jobNode
;
}
}
public
static
boolean
checkNFJProperty
(
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
jobDag
)
{
LinkedList
<
Job
>
joinNodes
=
new
LinkedList
<>();
LinkedList
<
Job
>
forkNodes
=
new
LinkedList
<>();
collectForksAndJoins
(
jobDag
,
forkNodes
,
joinNodes
);
if
(!(
joinNodes
.
size
()
>
1
))
{
return
true
;
}
nextJoin:
for
(
Job
j
:
joinNodes
)
{
for
(
Job
f
:
forkNodes
)
{
Set
<
Job
>
ancestorsOfJ
=
jobDag
.
getAncestors
(
jobDag
,
j
);
Set
<
Job
>
ancesotorsOfF
=
jobDag
.
getAncestors
(
jobDag
,
f
);
ancesotorsOfF
.
add
(
f
);
Set
<
Job
>
inbetweenJobs
=
new
HashSet
<>(
ancestorsOfJ
);
inbetweenJobs
.
removeAll
(
ancesotorsOfF
);
if
(
inbetweenJobs
.
isEmpty
())
{
continue
;
}
for
(
Job
a
:
inbetweenJobs
)
{
List
<
Job
>
neighboursOfA
=
Graphs
.
neighborListOf
(
jobDag
,
a
);
for
(
Job
b
:
neighboursOfA
)
{
if
((
jobDag
.
getAncestors
(
jobDag
,
j
).
contains
(
b
)
||
b
==
j
)
&&
(
jobDag
.
getDescendants
(
jobDag
,
f
).
contains
(
b
)
||
b
==
f
))
{
continue
nextJoin
;
}
}
}
}
return
false
;
}
return
true
;
}
private
static
Job
findJoin
(
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
jobDag
,
Job
forkNode
,
Job
sink
,
List
<
Job
>
joinNodes
)
{
for
(
Job
j
:
joinNodes
)
{
Set
<
Job
>
ancestorsOfJ
=
jobDag
.
getAncestors
(
jobDag
,
j
);
Set
<
Job
>
ancesotorsOfFork
=
jobDag
.
getAncestors
(
jobDag
,
forkNode
);
ancesotorsOfFork
.
add
(
forkNode
);
Set
<
Job
>
inbetweenJobs
=
new
HashSet
<>(
ancestorsOfJ
);
inbetweenJobs
.
removeAll
(
ancesotorsOfFork
);
if
(
inbetweenJobs
.
isEmpty
())
{
continue
;
}
List
<
Job
>
successorOfFork
=
Graphs
.
successorListOf
(
jobDag
,
forkNode
);
if
(
inbetweenJobs
.
containsAll
(
successorOfFork
))
{
return
j
;
}
}
return
sink
;
}
private
static
boolean
checkForFork
(
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
jobDag
,
Job
joinNode
,
List
<
Job
>
forkNodes
,
Job
job
)
{
List
<
Job
>
pred
=
Graphs
.
predecessorListOf
(
jobDag
,
job
);
for
(
Job
p
:
pred
)
{
if
(
forkNodes
.
contains
(
p
))
{
for
(
Job
successor
:
Graphs
.
successorListOf
(
jobDag
,
p
))
{
if
(
jobDag
.
getAncestors
(
jobDag
,
joinNode
).
contains
(
successor
))
{
return
false
;
}
}
}
else
{
return
checkForFork
(
jobDag
,
joinNode
,
forkNodes
,
p
);
}
}
return
true
;
}
public
static
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
createNFJfromDecompositionTree
(
BinaryDecompositionTree
<
Job
>
tree
)
{
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
jobDag
=
new
DirectedAcyclicGraph
<>(
DefaultEdge
.
class
);
if
(
tree
.
getRootNode
()
!=
null
)
{
traverseNodes
(
jobDag
,
tree
.
getRootNode
());
}
return
jobDag
;
}
private
static
GraphEndPoints
traverseNodes
(
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
jobDag
,
Node
<
Job
>
node
)
{
switch
(
node
.
getNodeType
())
{
case
LEAF:
{
Job
j
=
node
.
getObject
();
jobDag
.
addVertex
(
j
);
Set
<
Job
>
endPoints
=
new
HashSet
<
Job
>(
Arrays
.
asList
(
j
));
return
new
GraphEndPoints
(
endPoints
,
endPoints
);
}
case
SEQ:
{
GraphEndPoints
leftEndPoints
=
traverseNodes
(
jobDag
,
node
.
getLeftNode
());
GraphEndPoints
rightEndPoints
=
traverseNodes
(
jobDag
,
node
.
getRightNode
());
for
(
Job
l
:
leftEndPoints
.
right
)
{
for
(
Job
r
:
rightEndPoints
.
left
)
{
if
(
r
==
l
)
{
continue
;
}
try
{
jobDag
.
addDagEdge
(
l
,
r
);
}
catch
(
Exception
e
)
{
}
}
}
return
new
GraphEndPoints
(
leftEndPoints
.
left
,
rightEndPoints
.
right
);
}
case
PAR:
{
GraphEndPoints
leftEndPoints
=
traverseNodes
(
jobDag
,
node
.
getLeftNode
());
GraphEndPoints
rightEndPoints
=
traverseNodes
(
jobDag
,
node
.
getRightNode
());
Set
<
Job
>
leftEndPointJobs
=
Sets
.
newHashSet
(
Iterables
.
concat
(
leftEndPoints
.
left
,
rightEndPoints
.
left
));
Set
<
Job
>
rightEndPointJobs
=
Sets
.
newHashSet
(
Iterables
.
concat
(
leftEndPoints
.
right
,
rightEndPoints
.
right
));
return
new
GraphEndPoints
(
leftEndPointJobs
,
rightEndPointJobs
);
}
default
:
break
;
}
return
new
GraphEndPoints
(
new
HashSet
<>(),
new
HashSet
<>());
}
public
static
class
GraphEndPoints
{
public
Set
<
Job
>
left
;
public
Set
<
Job
>
right
;
public
GraphEndPoints
(
Set
<
Job
>
left
,
Set
<
Job
>
right
)
{
this
.
left
=
left
;
this
.
right
=
right
;
}
}
}
}
src/main/java/mvd/jester/model/SortedTaskSet.java
View file @
f0a948aa
...
...
@@ -17,7 +17,7 @@ public class SortedTaskSet<T extends Task> extends TreeSet<T> {
private
static
final
long
serialVersionUID
=
4808544133562675597L
;
public
SortedTaskSet
(
PriorityManager
priorityMananger
)
{
public
SortedTaskSet
(
final
PriorityManager
priorityMananger
)
{
super
((
t1
,
t2
)
->
priorityMananger
.
compare
(
t1
,
t2
));
}
...
...
@@ -28,7 +28,8 @@ public class SortedTaskSet<T extends Task> extends TreeSet<T> {
public
double
getParallelTaskRatio
()
{
long
parallelTasks
=
super
.
stream
().
filter
(
t
->
t
.
getMaximumParallelism
()
>
1
).
count
();
final
long
parallelTasks
=
super
.
stream
().
filter
(
t
->
t
.
getMaximumParallelism
()
>
1
).
count
();
return
(
double
)
parallelTasks
/
super
.
size
();
}
...
...
@@ -36,11 +37,11 @@ public class SortedTaskSet<T extends Task> extends TreeSet<T> {
public
static
class
Deserializer
<
T
extends
Task
>
implements
JsonDeserializer
<
SortedTaskSet
<
T
>>
{
@Override
public
SortedTaskSet
<
T
>
deserialize
(
JsonElement
json
,
Type
typeOfT
,
JsonDeserializationContext
context
)
throws
JsonParseException
{
SortedTaskSet
<
T
>
taskSet
=
new
SortedTaskSet
<>(
new
RateMonotonic
());
public
SortedTaskSet
<
T
>
deserialize
(
final
JsonElement
json
,
final
Type
typeOfT
,
final
JsonDeserializationContext
context
)
throws
JsonParseException
{
final
SortedTaskSet
<
T
>
taskSet
=
new
SortedTaskSet
<>(
new
RateMonotonic
());
if
(
json
.
isJsonArray
())
{
JsonArray
array
=
json
.
getAsJsonArray
();
final
JsonArray
array
=
json
.
getAsJsonArray
();
array
.
forEach
(
e
->
{
taskSet
.
add
(
context
.
deserialize
(
e
,
SynchronousTask
.
class
));
});
...
...
src/main/java/mvd/jester/model/SynchronousTask.java
View file @
f0a948aa
...
...
@@ -14,8 +14,8 @@ public class SynchronousTask implements Task {
private
final
long
criticalPath
;
private
final
long
numberOfThreads
;
public
SynchronousTask
(
long
period
,
long
deadline
,
long
numberOfThreads
,
Set
<
Segment
>
segments
)
{
public
SynchronousTask
(
final
long
period
,
final
long
deadline
,
final
long
numberOfThreads
,
final
Set
<
Segment
>
segments
)
{
this
.
deadline
=
deadline
;
this
.
period
=
period
;
this
.
numberOfThreads
=
numberOfThreads
;
...
...
@@ -24,7 +24,8 @@ public class SynchronousTask implements Task {
this
.
criticalPath
=
SynchronousUtils
.
calculateCriticalPath
(
segments
);
}
public
SynchronousTask
(
Set
<
Segment
>
segments
,
long
period
,
long
numberOfThreads
)
{
public
SynchronousTask
(
final
Set
<
Segment
>
segments
,
final
long
period
,
final
long
numberOfThreads
)
{
this
(
period
,
period
,
numberOfThreads
,
segments
);
}
...
...
@@ -70,7 +71,7 @@ public class SynchronousTask implements Task {
@Override
public
long
getMaximumParallelism
()
{
long
max
=
0
;
for
(
Segment
s
:
segments
)
{
for
(
final
Segment
s
:
segments
)
{
if
(
max
<
s
.
getNumberOfJobs
())
{
max
=
s
.
getNumberOfJobs
();
}
...
...
@@ -84,18 +85,18 @@ public class SynchronousTask implements Task {
}
public
static
class
SynchronousUtils
{
public
static
long
calculateWorkload
(
Set
<
Segment
>
segments
)
{
public
static
long
calculateWorkload
(
final
Set
<
Segment
>
segments
)
{
long
workload
=
0
;
for
(
Segment
s
:
segments
)
{
for
(
final
Segment
s
:
segments
)
{
workload
+=
s
.
getJobWcet
()
*
s
.
getNumberOfJobs
();
}
return
workload
;
}
public
static
long
calculateCriticalPath
(
Set
<
Segment
>
segments
)
{
public
static
long
calculateCriticalPath
(
final
Set
<
Segment
>
segments
)
{
long
criticalPath
=
0
;
for
(
Segment
s
:
segments
)
{
for
(
final
Segment
s
:
segments
)
{
criticalPath
+=
s
.
getJobWcet
();
}
...
...
src/main/java/mvd/jester/model/SystemSetup.java
View file @
f0a948aa
...
...
@@ -13,7 +13,7 @@ import com.google.gson.Gson;
import
com.google.gson.GsonBuilder
;
import
org.jgrapht.experimental.dag.DirectedAcyclicGraph
;
import
org.jgrapht.graph.DefaultEdge
;
import
mvd.jester.
model.DagTask
.DagUtils
;
import
mvd.jester.
utils
.DagUtils
;
/**
* TaskSet
...
...
@@ -219,7 +219,7 @@ public class SystemSetup<T extends Task> {
return
ThreadLocalRandom
.
current
().
nextLong
(
1
,
100
);
}
public
Set
<
DagTask
>
generateTaskSet
(
double
totalUtilization
)
{
public
Set
<
DagTask
>
generateTaskSet
(
final
double
totalUtilization
)
{
final
LinkedHashSet
<
DagTask
>
taskSet
=
new
LinkedHashSet
<>();
double
currentUtilization
=
0
;
while
(
currentUtilization
<=
totalUtilization
)
{
...
...
src/main/java/mvd/jester/model/TreeJob.java
View file @
f0a948aa
...
...
@@ -4,7 +4,7 @@ public class TreeJob {
private
long
wcet
;
private
final
Job
job
;
public
TreeJob
(
Job
job
)
{
public
TreeJob
(
final
Job
job
)
{
this
.
wcet
=
job
.
getWcet
();
this
.
job
=
job
;
}
...
...
@@ -26,7 +26,7 @@ public class TreeJob {
/**
* @param wcet the wcet to set
*/
public
void
setWcet
(
long
wcet
)
{
public
void
setWcet
(
final
long
wcet
)
{
this
.
wcet
=
wcet
;
}
}
src/main/java/mvd/jester/tests/ChwaLee.java
View file @
f0a948aa
...
...
@@ -4,14 +4,12 @@ import java.math.RoundingMode;
import
java.util.ArrayList
;
import
java.util.Collections
;
import
java.util.HashMap
;
import
java.util.HashSet
;
import
java.util.List
;
import
java.util.Map
;
import
java.util.Set
;
import
com.google.common.math.LongMath
;
import
mvd.jester.info.SchedulingInfo
;
import
mvd.jester.info.TerminationInfo
;
import
mvd.jester.info.TerminationInfo.Level
;
import
mvd.jester.model.Segment
;
import
mvd.jester.model.SortedTaskSet
;
import
mvd.jester.model.SynchronousTask
;
...
...
@@ -26,7 +24,7 @@ public class ChwaLee extends AbstractTest<SynchronousTask> {
private
final
Map
<
SynchronousTask
,
TerminationInfo
>
responseTimes
;
private
final
PriorityManager
priorityManager
;
public
ChwaLee
(
long
numberOfProcessors
)
{
public
ChwaLee
(
final
long
numberOfProcessors
)
{
super
(
numberOfProcessors
);
this
.
responseTimes
=
new
HashMap
<>();
this
.
priorityManager
=
new
EarliestDeadlineFirst
();
...
...
@@ -38,27 +36,26 @@ public class ChwaLee extends AbstractTest<SynchronousTask> {
}
@Override
public
SchedulingInfo
runSchedulabilityCheck
(
SortedTaskSet
<
SynchronousTask
>
tasks
)
{
public
SchedulingInfo
runSchedulabilityCheck
(
final
SortedTaskSet
<
SynchronousTask
>
tasks
)
{
responseTimes
.
clear
();
for
(
SynchronousTask
t
:
tasks
)
{
Level
taskLevel
=
tasks
.
headSet
(
t
).
size
()
<=
tasks
.
size
()
/
2
?
Level
.
HIGH
:
Level
.
LOW
;
long
responseTime
=
calculateResponseTime
(
tasks
,
t
);
responseTimes
.
put
(
t
,
new
TerminationInfo
(
t
.
getDeadline
(),
responseTime
,
taskLevel
));
for
(
final
SynchronousTask
t
:
tasks
)
{
final
long
responseTime
=
calculateResponseTime
(
tasks
,
t
);
responseTimes
.
put
(
t
,
new
TerminationInfo
(
t
.
getDeadline
(),
responseTime
));
}
return
new
SchedulingInfo
(
new
HashSet
<>(
responseTimes
.
values
()),
tasks
.
getParallelTaskRatio
(),
tasks
.
getUtilization
());
return
new
SchedulingInfo
(
responseTimes
.
values
());
}
private
long
calculateResponseTime
(
Set
<
SynchronousTask
>
tasks
,
SynchronousTask
task
)
{
long
minimumWcet
=
getMinimumWcet
(
task
);
long
deadline
=
task
.
getDeadline
();
private
long
calculateResponseTime
(
final
Set
<
SynchronousTask
>
tasks
,
final
SynchronousTask
task
)
{
final
long
minimumWcet
=
getMinimumWcet
(
task
);
final
long
deadline
=
task
.
getDeadline
();
long
taskInterference
=
0
;
for
(
SynchronousTask
t
:
tasks
)
{
for
(
final
SynchronousTask
t
:
tasks
)
{
if
(!
t
.
equals
(
task
))
{
long
maxNumberOfJobs
=
t
.
getMaximumParallelism
();
final
long
maxNumberOfJobs
=
t
.
getMaximumParallelism
();
for
(
long
p
=
0
;
p
<
maxNumberOfJobs
;
++
p
)
{
taskInterference
+=
Math
.
min
(
getTaskInterference
(
t
,
deadline
,
p
+
1
),
deadline
-
minimumWcet
);
...
...
@@ -68,23 +65,24 @@ public class ChwaLee extends AbstractTest<SynchronousTask> {
long
selfInterference
=
0
;
long
maxNumberOfJobs
=
task
.
getMaximumParallelism
();
final
long
maxNumberOfJobs
=
task
.
getMaximumParallelism
();
for
(
long
p
=
0
;
p
<
maxNumberOfJobs
;
++
p
)
{
selfInterference
+=
Math
.
min
(
getSelfInterference
(
task
,
deadline
,
p
+
1
),
deadline
-
minimumWcet
);
}
boolean
feasible
=
taskInterference
+
selfInterference
<=
numberOfProcessors
final
boolean
feasible
=
taskInterference
+
selfInterference
<=
numberOfProcessors
*
(
deadline
-
minimumWcet
);
return
feasible
?
deadline
-
1
:
deadline
+
1
;
}
private
long
getSelfInterference
(
SynchronousTask
task
,
long
deadline
,
long
p
)
{
private
long
getSelfInterference
(
final
SynchronousTask
task
,
final
long
deadline
,
final
long
p
)
{
long
selfInterference
=
0
;
for
(
Segment
s
:
task
.
getWorkloadDistribution
())
{
for
(
final
Segment
s
:
task
.
getWorkloadDistribution
())
{
if
(
s
.
getNumberOfJobs
()
>=
p
+
1
)
{
selfInterference
+=
s
.
getJobWcet
();
}
...
...
@@ -92,24 +90,24 @@ public class ChwaLee extends AbstractTest<SynchronousTask> {
return
selfInterference
;
}
private
long
getTaskInterference
(
SynchronousTask
t
,
long
deadline
,
long
p
)
{
long
numberOfBodyJobs
=
LongMath
.
divide
(
deadline
,
t
.
getPeriod
(),
RoundingMode
.
FLOOR
);
private
long
getTaskInterference
(
final
SynchronousTask
t
,
final
long
deadline
,
final
long
p
)
{
final
long
numberOfBodyJobs
=
LongMath
.
divide
(
deadline
,
t
.
getPeriod
(),
RoundingMode
.
FLOOR
);
long
workloadOfBodyJobs
=
0
;
for
(
Segment
s
:
t
.
getWorkloadDistribution
())
{
for
(
final
Segment
s
:
t
.
getWorkloadDistribution
())
{
if
(
s
.
getNumberOfJobs
()
>=
p
)
{
workloadOfBodyJobs
+=
s
.
getJobWcet
();
}
}
long
boundedBodyWorkload
=
numberOfBodyJobs
*
workloadOfBodyJobs
;
final
long
boundedBodyWorkload
=
numberOfBodyJobs
*
workloadOfBodyJobs
;
long
boundedCarryInWorkload
=
0
;
long
remainingLength
=
deadline
%
t
.
getPeriod
();
final
long
remainingLength
=
deadline
%
t
.
getPeriod
();
long
carryInLength
=
0
;
List
<
Segment
>
segmentList
=
new
ArrayList
<>(
t
.
getWorkloadDistribution
());
final
List
<
Segment
>
segmentList
=
new
ArrayList
<>(
t
.
getWorkloadDistribution
());
Collections
.
reverse
(
segmentList
);
for
(
Segment
s
:
segmentList
)
{
for
(
final
Segment
s
:
segmentList
)
{
carryInLength
+=
s
.
getJobWcet
();
if
(
s
.
getNumberOfJobs
()
>=
p
&&
remainingLength
>
carryInLength
)
{
boundedCarryInWorkload
+=
s
.
getJobWcet
();
...
...
@@ -124,9 +122,9 @@ public class ChwaLee extends AbstractTest<SynchronousTask> {
return
boundedBodyWorkload
+
boundedCarryInWorkload
;
}
private
long
getMinimumWcet
(
SynchronousTask
task
)
{
private
long
getMinimumWcet
(
final
SynchronousTask
task
)
{
long
minWcet
=
0
;
for
(
Segment
s
:
task
.
getWorkloadDistribution
())
{
for
(
final
Segment
s
:
task
.
getWorkloadDistribution
())
{
minWcet
+=
s
.
getJobWcet
();
}
...
...
src/main/java/mvd/jester/tests/FonsecaNelis.java
View file @
f0a948aa
...
...
@@ -17,14 +17,13 @@ import org.jgrapht.experimental.dag.DirectedAcyclicGraph;
import
org.jgrapht.graph.DefaultEdge
;
import
mvd.jester.info.SchedulingInfo
;
import
mvd.jester.info.TerminationInfo
;
import
mvd.jester.info.TerminationInfo.Level
;
import
mvd.jester.model.DagTask
;
import
mvd.jester.model.Job
;
import
mvd.jester.model.Segment
;
import
mvd.jester.model.SortedTaskSet
;
import
mvd.jester.model.Task
;
import
mvd.jester.model.TreeJob
;
import
mvd.jester.
model.DagTask
.DagUtils
;
import
mvd.jester.
utils
.DagUtils
;
import
mvd.jester.priority.PriorityManager
;
import
mvd.jester.priority.RateMonotonic
;
import
mvd.jester.utils.BinaryDecompositionTree
;
...
...
@@ -37,7 +36,7 @@ public class FonsecaNelis extends AbstractTest<DagTask> {
private
final
PriorityManager
priorityManager
;
private
final
Map
<
Task
,
Set
<
Segment
>>
sortedSegments
;
public
FonsecaNelis
(
long
numberOfProcessors
)
{
public
FonsecaNelis
(
final
long
numberOfProcessors
)
{
super
(
numberOfProcessors
);
this
.
responseTimes
=
new
HashMap
<>();
this
.
priorityManager
=
new
RateMonotonic
();
...
...
@@ -50,44 +49,45 @@ public class FonsecaNelis extends AbstractTest<DagTask> {
}
@Override
public
SchedulingInfo
runSchedulabilityCheck
(
SortedTaskSet
<
DagTask
>
tasks
)
{
public
SchedulingInfo
runSchedulabilityCheck
(
final
SortedTaskSet
<
DagTask
>
tasks
)
{
sortedSegments
.
clear
();
responseTimes
.
clear
();
createNFJandDecompositionTree
(
tasks
);
for
(
DagTask
t
:
tasks
)
{
long
responseTime
=
calculateResponseTime
(
tasks
,
t
);
responseTimes
.
put
(
t
,
new
TerminationInfo
(
t
.
getDeadline
(),
responseTime
,
Level
.
HIGH
));
for
(
final
DagTask
t
:
tasks
)
{
final
long
responseTime
=
calculateResponseTime
(
tasks
,
t
);
responseTimes
.
put
(
t
,
new
TerminationInfo
(
t
.
getDeadline
(),
responseTime
));
}
return
new
SchedulingInfo
(
new
HashSet
<>(
responseTimes
.
values
()),
tasks
.
getParallelTaskRatio
(),
tasks
.
getUtilization
());
return
new
SchedulingInfo
(
responseTimes
.
values
());
}
private
void
createNFJandDecompositionTree
(
SortedTaskSet
<
DagTask
>
tasks
)
{
for
(
DagTask
t
:
tasks
)
{
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
jobDag
=
t
.
getJobDag
();
private
void
createNFJandDecompositionTree
(
final
SortedTaskSet
<
DagTask
>
tasks
)
{
for
(
final
DagTask
t
:
tasks
)
{
final
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
jobDag
=
t
.
getJobDag
();
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
nfjJobDag
=
DagUtils
.
createNFJGraph
(
jobDag
);
BinaryDecompositionTree
<
Job
>
tree
=
DagUtils
.
createDecompositionTree
(
nfjJobDag
);
final
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
nfjJobDag
=
DagUtils
.
createNFJGraph
(
jobDag
);
final
BinaryDecompositionTree
<
Job
>
tree
=
DagUtils
.
createDecompositionTree
(
nfjJobDag
);
sortedSegments
.
put
(
t
,
constructCarryOutDistribution
(
nfjJobDag
,
tree
));
}
}
private
Set
<
Segment
>
constructCarryOutDistribution
(
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
nfjDag
,
BinaryDecompositionTree
<
Job
>
tree
)
{
Set
<
Segment
>
carryOutWorkload
=
new
LinkedHashSet
<>();
BinaryDecompositionTree
<
TreeJob
>
modifiedTree
=
transformTree
(
tree
);
final
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
nfjDag
,
final
BinaryDecompositionTree
<
Job
>
tree
)
{
final
Set
<
Segment
>
carryOutWorkload
=
new
LinkedHashSet
<>();
final
BinaryDecompositionTree
<
TreeJob
>
modifiedTree
=
transformTree
(
tree
);
boolean
isEmpty
=
false
;
do
{
Set
<
TreeJob
>
parallelJobs
=
getMaximumParallelism
(
modifiedTree
.
getRootNode
());
Optional
<
TreeJob
>
min
=
final
Set
<
TreeJob
>
parallelJobs
=
getMaximumParallelism
(
modifiedTree
.
getRootNode
());
final
Optional
<
TreeJob
>
min
=
parallelJobs
.
stream
().
min
((
p1
,
p2
)
->
Long
.
compare
(
p1
.
getWcet
(),
p2
.
getWcet
()));
if
(
min
.
isPresent
())
{
long
width
=
min
.
get
().
getWcet
();
final
long
width
=
min
.
get
().
getWcet
();
carryOutWorkload
.
add
(
new
Segment
(
width
,
parallelJobs
.
size
()));
for
(
TreeJob
p
:
parallelJobs
)
{
for
(
final
TreeJob
p
:
parallelJobs
)
{
p
.
setWcet
(
p
.
getWcet
()
-
width
);
}
}
else
{
...
...
@@ -100,33 +100,34 @@ public class FonsecaNelis extends AbstractTest<DagTask> {
}
private
BinaryDecompositionTree
<
TreeJob
>
transformTree
(
BinaryDecompositionTree
<
Job
>
tree
)
{
BinaryDecompositionTree
<
TreeJob
>
modifiedTree
=
new
BinaryDecompositionTree
<>();
Node
<
TreeJob
>
root
=
transformNode
(
null
,
tree
.
getRootNode
());
private
BinaryDecompositionTree
<
TreeJob
>
transformTree
(
final
BinaryDecompositionTree
<
Job
>
tree
)
{
final
BinaryDecompositionTree
<
TreeJob
>
modifiedTree
=
new
BinaryDecompositionTree
<>();
final
Node
<
TreeJob
>
root
=
transformNode
(
null
,
tree
.
getRootNode
());
modifiedTree
.
setRoot
(
root
);
return
modifiedTree
;
}
private
Node
<
TreeJob
>
transformNode
(
Node
<
TreeJob
>
parent
,
Node
<
Job
>
node
)
{
private
Node
<
TreeJob
>
transformNode
(
final
Node
<
TreeJob
>
parent
,
final
Node
<
Job
>
node
)
{
if
(
node
.
getNodeType
().
equals
(
NodeType
.
LEAF
))
{
return
new
Node
<
TreeJob
>(
parent
,
new
TreeJob
(
node
.
getObject
()));
}
else
{
Node
<
TreeJob
>
modifiedNode
=
new
Node
<
TreeJob
>(
null
,
node
.
getNodeType
());
final
Node
<
TreeJob
>
modifiedNode
=
new
Node
<
TreeJob
>(
null
,
node
.
getNodeType
());
modifiedNode
.
setLeftNode
(
transformNode
(
modifiedNode
,
node
.
getLeftNode
()));
modifiedNode
.
setRightNode
(
transformNode
(
modifiedNode
,
node
.
getRightNode
()));
return
modifiedNode
;
}
}
private
Set
<
TreeJob
>
getMaximumParallelism
(
Node
<
TreeJob
>
node
)
{
NodeType
nodeType
=
node
.
getNodeType
();
private
Set
<
TreeJob
>
getMaximumParallelism
(
final
Node
<
TreeJob
>
node
)
{
final
NodeType
nodeType
=
node
.
getNodeType
();
if
(
nodeType
.
equals
(
NodeType
.
PAR
))
{
Set
<
TreeJob
>
left
=
getMaximumParallelism
(
node
.
getLeftNode
());
Set
<
TreeJob
>
right
=
getMaximumParallelism
(
node
.
getRightNode
());
final
Set
<
TreeJob
>
left
=
getMaximumParallelism
(
node
.
getLeftNode
());
final
Set
<
TreeJob
>
right
=
getMaximumParallelism
(
node
.
getRightNode
());
return
Sets
.
union
(
left
,
right
);
}
else
if
(
nodeType
.
equals
(
NodeType
.
SEQ
))
{
Set
<
TreeJob
>
left
=
getMaximumParallelism
(
node
.
getLeftNode
());
Set
<
TreeJob
>
right
=
getMaximumParallelism
(
node
.
getRightNode
());
final
Set
<
TreeJob
>
left
=
getMaximumParallelism
(
node
.
getLeftNode
());
final
Set
<
TreeJob
>
right
=
getMaximumParallelism
(
node
.
getRightNode
());
if
(
left
.
size
()
>=
right
.
size
())
{
return
left
;
}
else
{
...
...
@@ -141,15 +142,15 @@ public class FonsecaNelis extends AbstractTest<DagTask> {
}
}
private
long
calculateResponseTime
(
SortedTaskSet
<
DagTask
>
tasks
,
DagTask
task
)
{
long
criticalPath
=
task
.
getCriticalPath
();
private
long
calculateResponseTime
(
final
SortedTaskSet
<
DagTask
>
tasks
,
final
DagTask
task
)
{
final
long
criticalPath
=
task
.
getCriticalPath
();
long
responseTime
=
criticalPath
;
long
previousResponseTime
=
0
;
do
{
previousResponseTime
=
responseTime
;
double
taskInterference
=
0
;
for
(
DagTask
t
:
tasks
)
{
for
(
final
DagTask
t
:
tasks
)
{
if
(
t
.
getPeriod
()
<
task
.
getPeriod
())
{
taskInterference
+=
getTaskInterference
(
t
,
responseTime
);
}
...
...
@@ -157,9 +158,9 @@ public class FonsecaNelis extends AbstractTest<DagTask> {
taskInterference
/=
numberOfProcessors
;
double
selfInterference
=
getSelfInterference
(
task
);
final
double
selfInterference
=
getSelfInterference
(
task
);
long
totalInterference
=
(
long
)
Math
.
floor
(
taskInterference
+
selfInterference
);
final
long
totalInterference
=
(
long
)
Math
.
floor
(
taskInterference
+
selfInterference
);
responseTime
=
criticalPath
+
totalInterference
;
}
while
(
responseTime
!=
previousResponseTime
);
...
...
@@ -167,37 +168,40 @@ public class FonsecaNelis extends AbstractTest<DagTask> {
return
responseTime
;
}
private
double
getTaskInterference
(
DagTask
task
,
long
interval
)
{
long
period
=
task
.
getPeriod
();
long
criticalPath
=
task
.
getCriticalPath
();
long
numberOfIntervals
=
private
double
getTaskInterference
(
final
DagTask
task
,
final
long
interval
)
{
final
long
period
=
task
.
getPeriod
();
final
long
criticalPath
=
task
.
getCriticalPath
();
final
long
numberOfIntervals
=
LongMath
.
divide
(
interval
-
criticalPath
,
period
,
RoundingMode
.
FLOOR
);
long
carryInAndOutInterval
=
interval
-
Math
.
max
(
0
,
numberOfIntervals
)
*
period
;
long
numberOfBodyJobs
=
final
long
carryInAndOutInterval
=
interval
-
Math
.
max
(
0
,
numberOfIntervals
)
*
period
;
final
long
numberOfBodyJobs
=
LongMath
.
divide
(
interval
-
carryInAndOutInterval
,
period
,
RoundingMode
.
FLOOR
);
long
bodyWorkload
=
Math
.
max
(
0
,
numberOfBodyJobs
)
*
task
.
getWorkload
();
final
long
bodyWorkload
=
Math
.
max
(
0
,
numberOfBodyJobs
)
*
task
.
getWorkload
();
long
carryInAndOutWorkload
=
getCarryInAndOutWorkload
(
task
,
task
.
getWorkloadDistribution
()
,
sortedSegments
.
get
(
task
),
carryInAndOutInterval
);
final
long
carryInAndOutWorkload
=
getCarryInAndOutWorkload
(
task
,
task
.
getWorkloadDistribution
(),
sortedSegments
.
get
(
task
),
carryInAndOutInterval
);
return
carryInAndOutWorkload
+
bodyWorkload
;
}
private
long
getCarryInAndOutWorkload
(
DagTask
task
,
Set
<
Segment
>
carryInDistribution
,
Set
<
Segment
>
carryOutDistribution
,
long
carryInAndOutPeriod
)
{
private
long
getCarryInAndOutWorkload
(
final
DagTask
task
,
final
Set
<
Segment
>
carryInDistribution
,
final
Set
<
Segment
>
carryOutDistribution
,
final
long
carryInAndOutPeriod
)
{
long
workload
=
getCarryOutWorkload
(
task
,
carryOutDistribution
,
carryInAndOutPeriod
);
long
carryInPeriod
=
task
.
getPeriod
()
-
responseTimes
.
get
(
task
).
getResponseTime
();
long
carryOutPeriod
=
0
;
List
<
Segment
>
carryInList
=
Lists
.
newArrayList
(
carryInDistribution
);
final
List
<
Segment
>
carryInList
=
Lists
.
newArrayList
(
carryInDistribution
);
Collections
.
reverse
(
carryInList
);
for
(
Segment
s
:
carryInList
)
{
for
(
final
Segment
s
:
carryInList
)
{
carryInPeriod
+=
s
.
getJobWcet
();
carryOutPeriod
=
carryInAndOutPeriod
-
carryInPeriod
;
long
carryInWorkload
=
getCarryInWorkload
(
task
,
carryInDistribution
,
carryInPeriod
);
long
carryOutWorkload
=
getCarryOutWorkload
(
task
,
carryOutDistribution
,
carryOutPeriod
);
final
long
carryInWorkload
=
getCarryInWorkload
(
task
,
carryInDistribution
,
carryInPeriod
);
final
long
carryOutWorkload
=
getCarryOutWorkload
(
task
,
carryOutDistribution
,
carryOutPeriod
);
workload
=
Math
.
max
(
workload
,
carryInWorkload
+
carryOutWorkload
);
}
...
...
@@ -205,68 +209,70 @@ public class FonsecaNelis extends AbstractTest<DagTask> {
getCarryInWorkload
(
task
,
carryInDistribution
,
carryInAndOutPeriod
));
carryOutPeriod
=
0
;
for
(
Segment
s
:
carryOutDistribution
)
{
for
(
final
Segment
s
:
carryOutDistribution
)
{
carryOutPeriod
+=
s
.
getJobWcet
();
carryInPeriod
=
carryInAndOutPeriod
-
carryOutPeriod
;
long
carryInWorkload
=
getCarryInWorkload
(
task
,
carryInDistribution
,
carryInPeriod
);
long
carryOutWorkload
=
getCarryOutWorkload
(
task
,
carryOutDistribution
,
carryOutPeriod
);
final
long
carryInWorkload
=
getCarryInWorkload
(
task
,
carryInDistribution
,
carryInPeriod
);
final
long
carryOutWorkload
=
getCarryOutWorkload
(
task
,
carryOutDistribution
,
carryOutPeriod
);
workload
=
Math
.
max
(
workload
,
carryInWorkload
+
carryOutWorkload
);
}
return
workload
;
}
private
long
getCarryOutWorkload
(
DagTask
task
,
Set
<
Segment
>
carryOutDistribution
,
long
carryOutPeriod
)
{
private
long
getCarryOutWorkload
(
final
DagTask
task
,
final
Set
<
Segment
>
carryOutDistribution
,
final
long
carryOutPeriod
)
{
long
workload
=
0
;
long
period
=
task
.
getPeriod
();
long
responseTime
=
responseTimes
.
get
(
task
).
getResponseTime
();
List
<
Segment
>
distributionList
=
Lists
.
newArrayList
(
carryOutDistribution
);
final
long
period
=
task
.
getPeriod
();
final
long
responseTime
=
responseTimes
.
get
(
task
).
getResponseTime
();
final
List
<
Segment
>
distributionList
=
Lists
.
newArrayList
(
carryOutDistribution
);
for
(
int
i
=
0
;
i
<
distributionList
.
size
();
++
i
)
{
Segment
s
=
distributionList
.
get
(
i
);
final
Segment
s
=
distributionList
.
get
(
i
);
long
weightOfPreviousSegments
=
0
;
for
(
int
j
=
0
;
j
<
i
;
++
j
)
{
weightOfPreviousSegments
+=
distributionList
.
get
(
j
).
getJobWcet
();
}
long
width
=
carryOutPeriod
-
weightOfPreviousSegments
;
final
long
width
=
carryOutPeriod
-
weightOfPreviousSegments
;
workload
+=
Math
.
max
(
Math
.
min
(
width
,
s
.
getJobWcet
()),
0
)
*
s
.
getNumberOfJobs
();
}
long
improvedWorkload
=
final
long
improvedWorkload
=
Math
.
max
(
carryOutPeriod
-
(
period
-
responseTime
),
0
)
*
numberOfProcessors
;
return
Math
.
min
(
improvedWorkload
,
workload
);
}
private
long
getCarryInWorkload
(
DagTask
task
,
Set
<
Segment
>
carryInDistribution
,
long
carryInPeriod
)
{
private
long
getCarryInWorkload
(
final
DagTask
task
,
final
Set
<
Segment
>
carryInDistribution
,
final
long
carryInPeriod
)
{
long
workload
=
0
;
long
period
=
task
.
getPeriod
();
long
responseTime
=
responseTimes
.
get
(
task
).
getResponseTime
();
List
<
Segment
>
distributionList
=
Lists
.
newArrayList
(
carryInDistribution
);
final
long
period
=
task
.
getPeriod
();
final
long
responseTime
=
responseTimes
.
get
(
task
).
getResponseTime
();
final
List
<
Segment
>
distributionList
=
Lists
.
newArrayList
(
carryInDistribution
);
for
(
int
i
=
0
;
i
<
carryInDistribution
.
size
();
++
i
)
{
Segment
s
=
distributionList
.
get
(
i
);
final
Segment
s
=
distributionList
.
get
(
i
);
long
weightOfRemainingSegments
=
0
;
for
(
int
j
=
i
+
1
;
j
<
carryInDistribution
.
size
();
++
j
)
{
weightOfRemainingSegments
+=
distributionList
.
get
(
j
).
getJobWcet
();
}
long
width
=
carryInPeriod
-
period
+
responseTime
-
weightOfRemainingSegments
;
final
long
width
=
carryInPeriod
-
period
+
responseTime
-
weightOfRemainingSegments
;
workload
+=
Math
.
max
(
Math
.
min
(
width
,
s
.
getJobWcet
()),
0
)
*
s
.
getNumberOfJobs
();
}
long
improvedWorkload
=
final
long
improvedWorkload
=
Math
.
max
(
carryInPeriod
-
(
period
-
responseTime
),
0
)
*
numberOfProcessors
;
return
Math
.
min
(
improvedWorkload
,
workload
);
}
private
double
getSelfInterference
(
DagTask
task
)
{
long
criticalPath
=
task
.
getCriticalPath
();
long
workload
=
task
.
getWorkload
();
private
double
getSelfInterference
(
final
DagTask
task
)
{
final
long
criticalPath
=
task
.
getCriticalPath
();
final
long
workload
=
task
.
getWorkload
();
return
(
double
)
(
workload
-
criticalPath
)
/
numberOfProcessors
;
}
...
...
src/main/java/mvd/jester/tests/MaiaBertogna.java
View file @
f0a948aa
...
...
@@ -2,13 +2,11 @@ package mvd.jester.tests;
import
java.math.RoundingMode
;
import
java.util.HashMap
;
import
java.util.HashSet
;
import
java.util.Map
;
import
java.util.Set
;
import
com.google.common.math.LongMath
;
import
mvd.jester.info.SchedulingInfo
;
import
mvd.jester.info.TerminationInfo
;
import
mvd.jester.info.TerminationInfo.Level
;
import
mvd.jester.model.Segment
;
import
mvd.jester.model.SortedTaskSet
;
import
mvd.jester.model.SynchronousTask
;
...
...
@@ -23,7 +21,7 @@ public class MaiaBertogna extends AbstractTest<SynchronousTask> {
private
final
Map
<
SynchronousTask
,
TerminationInfo
>
responseTimes
;
private
final
PriorityManager
priorityManager
;
public
MaiaBertogna
(
long
numberOfProcessors
)
{
public
MaiaBertogna
(
final
long
numberOfProcessors
)
{
super
(
numberOfProcessors
);
this
.
responseTimes
=
new
HashMap
<>();
this
.
priorityManager
=
new
RateMonotonic
();
...
...
@@ -35,16 +33,14 @@ public class MaiaBertogna extends AbstractTest<SynchronousTask> {
}
@Override
public
SchedulingInfo
runSchedulabilityCheck
(
SortedTaskSet
<
SynchronousTask
>
tasks
)
{
public
SchedulingInfo
runSchedulabilityCheck
(
final
SortedTaskSet
<
SynchronousTask
>
tasks
)
{
responseTimes
.
clear
();
for
(
SynchronousTask
t
:
tasks
)
{
Level
taskLevel
=
tasks
.
headSet
(
t
).
size
()
<=
tasks
.
size
()
/
2
?
Level
.
HIGH
:
Level
.
LOW
;
long
responseTime
=
calculateResponseTime
(
tasks
,
t
);
responseTimes
.
put
(
t
,
new
TerminationInfo
(
t
.
getDeadline
(),
responseTime
,
taskLevel
));
for
(
final
SynchronousTask
t
:
tasks
)
{
final
long
responseTime
=
calculateResponseTime
(
tasks
,
t
);
responseTimes
.
put
(
t
,
new
TerminationInfo
(
t
.
getDeadline
(),
responseTime
));
}
return
new
SchedulingInfo
(
new
HashSet
<>(
responseTimes
.
values
()),
tasks
.
getParallelTaskRatio
(),
tasks
.
getUtilization
());
return
new
SchedulingInfo
(
responseTimes
.
values
());
}
@Override
...
...
@@ -52,8 +48,9 @@ public class MaiaBertogna extends AbstractTest<SynchronousTask> {
return
"MaiaBertogna"
;
}
private
long
calculateResponseTime
(
Set
<
SynchronousTask
>
tasks
,
SynchronousTask
task
)
{
long
minimumWcet
=
getMinimumWcet
(
task
);
private
long
calculateResponseTime
(
final
Set
<
SynchronousTask
>
tasks
,
final
SynchronousTask
task
)
{
final
long
minimumWcet
=
getMinimumWcet
(
task
);
long
responseTime
=
minimumWcet
;
long
previousResponseTime
=
0
;
...
...
@@ -61,9 +58,9 @@ public class MaiaBertogna extends AbstractTest<SynchronousTask> {
previousResponseTime
=
responseTime
;
long
taskInterference
=
0
;
for
(
SynchronousTask
t
:
tasks
)
{
for
(
final
SynchronousTask
t
:
tasks
)
{
if
(
t
.
getPeriod
()
<
task
.
getPeriod
())
{
long
maxNumberOfJobsOfT
=
t
.
getMaximumParallelism
();
final
long
maxNumberOfJobsOfT
=
t
.
getMaximumParallelism
();
for
(
int
p
=
0
;
p
<
maxNumberOfJobsOfT
;
++
p
)
{
taskInterference
+=
Math
.
min
(
getTaskInterference
(
t
,
responseTime
,
p
+
1
),
responseTime
-
minimumWcet
+
1
);
...
...
@@ -73,13 +70,13 @@ public class MaiaBertogna extends AbstractTest<SynchronousTask> {
long
selfInterference
=
0
;
long
maxNumberOfJobs
=
task
.
getMaximumParallelism
();
final
long
maxNumberOfJobs
=
task
.
getMaximumParallelism
();
for
(
int
p
=
0
;
p
<
maxNumberOfJobs
;
++
p
)
{
selfInterference
+=
Math
.
min
(
getSelfInterference
(
task
,
p
+
1
),
responseTime
-
minimumWcet
+
1
);
}
long
totalInterference
=
LongMath
.
divide
(
taskInterference
+
selfInterference
,
final
long
totalInterference
=
LongMath
.
divide
(
taskInterference
+
selfInterference
,
numberOfProcessors
,
RoundingMode
.
FLOOR
);
responseTime
=
minimumWcet
+
totalInterference
;
...
...
@@ -89,10 +86,10 @@ public class MaiaBertogna extends AbstractTest<SynchronousTask> {
return
responseTime
;
}
private
long
getSelfInterference
(
SynchronousTask
task
,
long
parallelism
)
{
private
long
getSelfInterference
(
final
SynchronousTask
task
,
final
long
parallelism
)
{
long
interference
=
0
;
for
(
Segment
s
:
task
.
getWorkloadDistribution
())
{
for
(
final
Segment
s
:
task
.
getWorkloadDistribution
())
{
if
(
s
.
getNumberOfJobs
()
>=
parallelism
+
1
)
{
interference
+=
s
.
getJobWcet
();
}
...
...
@@ -101,24 +98,25 @@ public class MaiaBertogna extends AbstractTest<SynchronousTask> {
return
interference
;
}
private
long
getTaskInterference
(
SynchronousTask
task
,
long
interval
,
long
parallelism
)
{
private
long
getTaskInterference
(
final
SynchronousTask
task
,
final
long
interval
,
final
long
parallelism
)
{
if
(
responseTimes
.
containsKey
(
task
))
{
long
responseTime
=
responseTimes
.
get
(
task
).
getResponseTime
();
long
minWcet
=
getMinimumWcet
(
task
);
long
period
=
task
.
getPeriod
();
long
numberOfJobs
=
final
long
responseTime
=
responseTimes
.
get
(
task
).
getResponseTime
();
final
long
minWcet
=
getMinimumWcet
(
task
);
final
long
period
=
task
.
getPeriod
();
final
long
numberOfJobs
=
(
LongMath
.
divide
(
interval
+
responseTime
-
minWcet
,
period
,
RoundingMode
.
FLOOR
)
+
1
);
long
workload
=
0
;
for
(
Segment
s
:
task
.
getWorkloadDistribution
())
{
for
(
final
Segment
s
:
task
.
getWorkloadDistribution
())
{
if
(
s
.
getNumberOfJobs
()
>=
parallelism
)
{
workload
+=
s
.
getJobWcet
();
}
}
long
interference
=
numberOfJobs
*
workload
;
final
long
interference
=
numberOfJobs
*
workload
;
return
interference
;
}
else
{
...
...
@@ -126,9 +124,9 @@ public class MaiaBertogna extends AbstractTest<SynchronousTask> {
}
}
private
long
getMinimumWcet
(
SynchronousTask
task
)
{
private
long
getMinimumWcet
(
final
SynchronousTask
task
)
{
long
minWcet
=
0
;
for
(
Segment
s
:
task
.
getWorkloadDistribution
())
{
for
(
final
Segment
s
:
task
.
getWorkloadDistribution
())
{
minWcet
+=
s
.
getJobWcet
();
}
...
...
src/main/java/mvd/jester/tests/MelaniButtazzo.java
View file @
f0a948aa
...
...
@@ -2,13 +2,11 @@ package mvd.jester.tests;
import
java.math.RoundingMode
;
import
java.util.HashMap
;
import
java.util.HashSet
;
import
java.util.Map
;
import
java.util.Set
;
import
com.google.common.math.DoubleMath
;
import
mvd.jester.info.SchedulingInfo
;
import
mvd.jester.info.TerminationInfo
;
import
mvd.jester.info.TerminationInfo.Level
;
import
mvd.jester.model.DagTask
;
import
mvd.jester.model.SortedTaskSet
;
import
mvd.jester.model.Task
;
...
...
@@ -20,7 +18,7 @@ public class MelaniButtazzo extends AbstractTest<DagTask> {
private
final
Map
<
Task
,
TerminationInfo
>
responseTimes
;
private
final
PriorityManager
priorityManager
;
public
MelaniButtazzo
(
long
numberOfProcessors
)
{
public
MelaniButtazzo
(
final
long
numberOfProcessors
)
{
super
(
numberOfProcessors
);
this
.
responseTimes
=
new
HashMap
<>();
this
.
priorityManager
=
new
RateMonotonic
();
...
...
@@ -37,19 +35,18 @@ public class MelaniButtazzo extends AbstractTest<DagTask> {
}
@Override
public
SchedulingInfo
runSchedulabilityCheck
(
SortedTaskSet
<
DagTask
>
tasks
)
{
public
SchedulingInfo
runSchedulabilityCheck
(
final
SortedTaskSet
<
DagTask
>
tasks
)
{
responseTimes
.
clear
();
for
(
DagTask
t
:
tasks
)
{
long
responseTime
=
calculateResponseTime
(
tasks
,
t
);
responseTimes
.
put
(
t
,
new
TerminationInfo
(
t
.
getDeadline
(),
responseTime
,
Level
.
HIGH
));
for
(
final
DagTask
t
:
tasks
)
{
final
long
responseTime
=
calculateResponseTime
(
tasks
,
t
);
responseTimes
.
put
(
t
,
new
TerminationInfo
(
t
.
getDeadline
(),
responseTime
));
}
return
new
SchedulingInfo
(
new
HashSet
<>(
responseTimes
.
values
()),
tasks
.
getParallelTaskRatio
(),
tasks
.
getUtilization
());
return
new
SchedulingInfo
(
responseTimes
.
values
());
}
private
long
calculateResponseTime
(
Set
<
DagTask
>
tasks
,
DagTask
task
)
{
long
minimumWcet
=
task
.
getCriticalPath
();
private
long
calculateResponseTime
(
final
Set
<
DagTask
>
tasks
,
final
DagTask
task
)
{
final
long
minimumWcet
=
task
.
getCriticalPath
();
long
responseTime
=
minimumWcet
;
long
previousResponseTime
=
0
;
...
...
@@ -57,16 +54,16 @@ public class MelaniButtazzo extends AbstractTest<DagTask> {
previousResponseTime
=
responseTime
;
double
taskInterference
=
0
;
for
(
DagTask
t
:
tasks
)
{
for
(
final
DagTask
t
:
tasks
)
{
if
(
t
.
getPeriod
()
<
task
.
getPeriod
())
{
taskInterference
+=
getTaskInterference
(
t
,
responseTime
);
}
}
taskInterference
/=
numberOfProcessors
;
double
selfInterference
=
getSelfInterference
(
task
);
final
double
selfInterference
=
getSelfInterference
(
task
);
long
totalInterference
=
(
long
)
Math
.
floor
(
taskInterference
+
selfInterference
);
final
long
totalInterference
=
(
long
)
Math
.
floor
(
taskInterference
+
selfInterference
);
responseTime
=
minimumWcet
+
totalInterference
;
}
while
(
previousResponseTime
!=
responseTime
);
...
...
@@ -74,28 +71,29 @@ public class MelaniButtazzo extends AbstractTest<DagTask> {
return
responseTime
;
}
private
double
getSelfInterference
(
DagTask
task
)
{
long
criticalPath
=
task
.
getCriticalPath
();
long
workload
=
task
.
getWorkload
();
private
double
getSelfInterference
(
final
DagTask
task
)
{
final
long
criticalPath
=
task
.
getCriticalPath
();
final
long
workload
=
task
.
getWorkload
();
return
(
double
)
(
workload
-
criticalPath
)
/
numberOfProcessors
;
}
private
double
getTaskInterference
(
DagTask
task
,
long
interval
)
{
private
double
getTaskInterference
(
final
DagTask
task
,
final
long
interval
)
{
if
(
responseTimes
.
containsKey
(
task
))
{
long
responseTime
=
responseTimes
.
get
(
task
).
getResponseTime
();
long
singleWorkload
=
task
.
getWorkload
();
long
period
=
task
.
getPeriod
();
final
long
responseTime
=
responseTimes
.
get
(
task
).
getResponseTime
();
final
long
singleWorkload
=
task
.
getWorkload
();
final
long
period
=
task
.
getPeriod
();
double
nominator
=
final
double
nominator
=
(
interval
+
responseTime
-
(
double
)
singleWorkload
/
numberOfProcessors
);
long
amountOfJobs
=
DoubleMath
.
roundToLong
(
nominator
/
period
,
RoundingMode
.
FLOOR
);
final
long
amountOfJobs
=
DoubleMath
.
roundToLong
(
nominator
/
period
,
RoundingMode
.
FLOOR
);
double
carryOutPortion
=
final
double
carryOutPortion
=
Math
.
min
(
singleWorkload
,
numberOfProcessors
*
(
nominator
%
period
));
double
interference
=
amountOfJobs
*
singleWorkload
+
carryOutPortion
;
final
double
interference
=
amountOfJobs
*
singleWorkload
+
carryOutPortion
;
return
interference
;
...
...
src/main/java/mvd/jester/tests/SchmidMottok.java
View file @
f0a948aa
...
...
@@ -2,13 +2,11 @@ package mvd.jester.tests;
import
java.math.RoundingMode
;
import
java.util.HashMap
;
import
java.util.HashSet
;
import
java.util.Map
;
import
java.util.Set
;
import
com.google.common.math.LongMath
;
import
mvd.jester.info.SchedulingInfo
;
import
mvd.jester.info.TerminationInfo
;
import
mvd.jester.info.TerminationInfo.Level
;
import
mvd.jester.model.DagTask
;
import
mvd.jester.model.Segment
;
import
mvd.jester.model.SortedTaskSet
;
...
...
@@ -24,7 +22,7 @@ public class SchmidMottok extends AbstractTest<DagTask> {
private
final
Map
<
Task
,
TerminationInfo
>
responseTimes
;
private
final
PriorityManager
priorityManager
;
public
SchmidMottok
(
long
numberOfProcessors
)
{
public
SchmidMottok
(
final
long
numberOfProcessors
)
{
super
(
numberOfProcessors
);
this
.
responseTimes
=
new
HashMap
<>();
this
.
priorityManager
=
new
RateMonotonic
();
...
...
@@ -36,15 +34,14 @@ public class SchmidMottok extends AbstractTest<DagTask> {
}
@Override
public
SchedulingInfo
runSchedulabilityCheck
(
SortedTaskSet
<
DagTask
>
tasks
)
{
public
SchedulingInfo
runSchedulabilityCheck
(
final
SortedTaskSet
<
DagTask
>
tasks
)
{
responseTimes
.
clear
();
for
(
DagTask
t
:
tasks
)
{
long
responseTime
=
calculateResponseTime
(
tasks
,
t
);
responseTimes
.
put
(
t
,
new
TerminationInfo
(
t
.
getDeadline
(),
responseTime
,
Level
.
HIGH
));
for
(
final
DagTask
t
:
tasks
)
{
final
long
responseTime
=
calculateResponseTime
(
tasks
,
t
);
responseTimes
.
put
(
t
,
new
TerminationInfo
(
t
.
getDeadline
(),
responseTime
));
}
return
new
SchedulingInfo
(
new
HashSet
<>(
responseTimes
.
values
()),
tasks
.
getParallelTaskRatio
(),
tasks
.
getUtilization
());
return
new
SchedulingInfo
(
responseTimes
.
values
());
}
@Override
...
...
@@ -52,8 +49,8 @@ public class SchmidMottok extends AbstractTest<DagTask> {
return
"SchmidMottok"
;
}
private
long
calculateResponseTime
(
Set
<
DagTask
>
tasks
,
DagTask
task
)
{
long
minimumWcet
=
task
.
getCriticalPath
();
private
long
calculateResponseTime
(
final
Set
<
DagTask
>
tasks
,
final
DagTask
task
)
{
final
long
minimumWcet
=
task
.
getCriticalPath
();
long
responseTime
=
minimumWcet
;
long
previousResponseTime
=
0
;
...
...
@@ -61,9 +58,9 @@ public class SchmidMottok extends AbstractTest<DagTask> {
previousResponseTime
=
responseTime
;
double
taskInterference
=
0
;
for
(
DagTask
t
:
tasks
)
{
for
(
final
DagTask
t
:
tasks
)
{
if
(
t
.
getPeriod
()
<
task
.
getPeriod
())
{
long
numberOfThreads
=
t
.
getNumberOfThreads
();
final
long
numberOfThreads
=
t
.
getNumberOfThreads
();
for
(
int
p
=
0
;
p
<
numberOfThreads
;
++
p
)
{
taskInterference
+=
Math
.
min
(
getTaskInterference
(
t
,
responseTime
,
p
+
1
),
responseTime
-
minimumWcet
+
1
);
...
...
@@ -72,9 +69,9 @@ public class SchmidMottok extends AbstractTest<DagTask> {
}
taskInterference
/=
numberOfProcessors
;
double
selfInterference
=
getSelfInterference
(
task
);
final
double
selfInterference
=
getSelfInterference
(
task
);
long
totalInterference
=
(
long
)
Math
.
floor
(
taskInterference
+
selfInterference
);
final
long
totalInterference
=
(
long
)
Math
.
floor
(
taskInterference
+
selfInterference
);
responseTime
=
minimumWcet
+
totalInterference
;
}
while
(
previousResponseTime
!=
responseTime
);
...
...
@@ -83,11 +80,11 @@ public class SchmidMottok extends AbstractTest<DagTask> {
}
private
double
getSelfInterference
(
DagTask
task
)
{
private
double
getSelfInterference
(
final
DagTask
task
)
{
double
interference
=
0
;
long
numberOfThreads
=
task
.
getNumberOfThreads
();
final
long
numberOfThreads
=
task
.
getNumberOfThreads
();
for
(
Segment
s
:
task
.
getWorkloadDistribution
())
{
for
(
final
Segment
s
:
task
.
getWorkloadDistribution
())
{
interference
+=
(
double
)
(
s
.
getNumberOfJobs
()
-
1
)
*
s
.
getJobWcet
();
}
...
...
@@ -97,25 +94,27 @@ public class SchmidMottok extends AbstractTest<DagTask> {
}
private
double
getTaskInterference
(
DagTask
task
,
long
interval
,
long
parallelism
)
{
private
double
getTaskInterference
(
final
DagTask
task
,
final
long
interval
,
final
long
parallelism
)
{
if
(
responseTimes
.
containsKey
(
task
))
{
long
responseTime
=
responseTimes
.
get
(
task
).
getResponseTime
();
long
minWcet
=
task
.
getCriticalPath
();
long
period
=
task
.
getPeriod
();
long
amountOfJobs
=
final
long
responseTime
=
responseTimes
.
get
(
task
).
getResponseTime
();
final
long
minWcet
=
task
.
getCriticalPath
();
final
long
period
=
task
.
getPeriod
();
final
long
amountOfJobs
=
(
LongMath
.
divide
(
interval
+
responseTime
-
minWcet
,
period
,
RoundingMode
.
FLOOR
)
+
1
);
double
workload
=
0
;
for
(
Segment
s
:
task
.
getWorkloadDistribution
())
{
long
numberOfThreads
=
s
.
getNumberOfJobs
()
>
1
?
task
.
getNumberOfThreads
()
:
1
;
for
(
final
Segment
s
:
task
.
getWorkloadDistribution
())
{
final
long
numberOfThreads
=
s
.
getNumberOfJobs
()
>
1
?
task
.
getNumberOfThreads
()
:
1
;
if
(
numberOfThreads
>=
parallelism
)
{
workload
+=
s
.
getNumberOfJobs
()
*
s
.
getJobWcet
()
/
numberOfThreads
;
}
}
double
interference
=
amountOfJobs
*
workload
;
final
double
interference
=
amountOfJobs
*
workload
;
return
interference
;
}
else
{
...
...
src/main/java/mvd/jester/utils/BinaryDecompositionTree.java
View file @
f0a948aa
...
...
@@ -12,7 +12,7 @@ public class BinaryDecompositionTree<N> {
return
root
;
}
public
boolean
contains
(
N
object
)
{
public
boolean
contains
(
final
N
object
)
{
if
(
root
==
null
)
{
return
false
;
}
...
...
@@ -23,7 +23,7 @@ public class BinaryDecompositionTree<N> {
return
root
==
null
;
}
public
void
setRoot
(
Node
<
N
>
root
)
{
public
void
setRoot
(
final
Node
<
N
>
root
)
{
this
.
root
=
root
;
}
...
...
@@ -34,13 +34,13 @@ public class BinaryDecompositionTree<N> {
private
final
NodeType
nodeType
;
private
final
T
object
;
public
Node
(
Node
<
T
>
parentNode
,
NodeType
nodeType
)
{
public
Node
(
final
Node
<
T
>
parentNode
,
final
NodeType
nodeType
)
{
this
.
parentNode
=
parentNode
;
this
.
nodeType
=
nodeType
;
this
.
object
=
null
;
}
public
boolean
contains
(
T
object
)
{
public
boolean
contains
(
final
T
object
)
{
if
(
nodeType
.
equals
(
NodeType
.
LEAF
))
{
return
this
.
object
==
object
;
}
else
{
...
...
@@ -56,7 +56,7 @@ public class BinaryDecompositionTree<N> {
}
}
public
Node
(
Node
<
T
>
parentNode
,
T
object
)
{
public
Node
(
final
Node
<
T
>
parentNode
,
final
T
object
)
{
this
.
parentNode
=
parentNode
;
this
.
nodeType
=
NodeType
.
LEAF
;
this
.
object
=
object
;
...
...
@@ -80,7 +80,7 @@ public class BinaryDecompositionTree<N> {
/**
* @param leftNode the leftNode to set
*/
public
Node
<
T
>
setLeftNode
(
Node
<
T
>
leftNode
)
{
public
Node
<
T
>
setLeftNode
(
final
Node
<
T
>
leftNode
)
{
this
.
leftNode
=
leftNode
;
return
this
.
leftNode
;
}
...
...
@@ -95,7 +95,7 @@ public class BinaryDecompositionTree<N> {
/**
* @param rightNode the rightNode to set
*/
public
Node
<
T
>
setRightNode
(
Node
<
T
>
rightNode
)
{
public
Node
<
T
>
setRightNode
(
final
Node
<
T
>
rightNode
)
{
this
.
rightNode
=
rightNode
;
return
this
.
rightNode
;
}
...
...
src/main/java/mvd/jester/utils/DagUtils.java
0 → 100644
View file @
f0a948aa
package
mvd
.
jester
.
utils
;
import
java.util.Arrays
;
import
java.util.HashSet
;
import
java.util.LinkedHashSet
;
import
java.util.LinkedList
;
import
java.util.List
;
import
java.util.Optional
;
import
java.util.Set
;
import
com.google.common.collect.Iterables
;
import
com.google.common.collect.Sets
;
import
org.jgrapht.Graphs
;
import
org.jgrapht.experimental.dag.DirectedAcyclicGraph
;
import
org.jgrapht.graph.DefaultEdge
;
import
mvd.jester.model.Job
;
import
mvd.jester.model.Segment
;
import
mvd.jester.utils.BinaryDecompositionTree.Node
;
import
mvd.jester.utils.BinaryDecompositionTree.NodeType
;
public
class
DagUtils
{
public
static
long
calculateWorkload
(
final
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
jobDag
)
{
long
workload
=
0
;
for
(
final
Job
job
:
jobDag
)
{
workload
+=
job
.
getWcet
();
}
return
workload
;
}
public
static
long
calculateCriticalPath
(
final
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
jobDag
)
{
long
criticalPath
=
0
;
// BreadthFirstIterator<Job, DefaultEdge> breadthFirstIterator =
// new BreadthFirstIterator<>(jobDag);
// while (breadthFirstIterator.hasNext()) {
// Job job = breadthFirstIterator.next();
for
(
final
Job
job
:
jobDag
)
{
final
Set
<
DefaultEdge
>
edges
=
jobDag
.
incomingEdgesOf
(
job
);
long
longestRelativeCompletionTime
=
0
;
for
(
final
DefaultEdge
e
:
edges
)
{
final
Job
source
=
jobDag
.
getEdgeSource
(
e
);
longestRelativeCompletionTime
=
longestRelativeCompletionTime
>=
source
.
getRelativeCompletionTime
()
?
longestRelativeCompletionTime
:
source
.
getRelativeCompletionTime
();
}
job
.
setRelativeCompletionTime
(
longestRelativeCompletionTime
+
job
.
getWcet
());
criticalPath
=
job
.
getRelativeCompletionTime
();
}
return
criticalPath
;
}
public
static
LinkedHashSet
<
Segment
>
calculateWorkloadDistribution
(
final
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
jobDag
,
final
long
criticalPath
)
{
final
LinkedHashSet
<
Segment
>
segments
=
new
LinkedHashSet
<>();
long
segmentDuration
=
0
;
long
segmentHeight
=
1
;
for
(
long
t
=
0
;
t
<
criticalPath
;
++
t
)
{
long
currentHeight
=
0
;
for
(
final
Job
j
:
jobDag
)
{
if
(
t
>=
j
.
getRelativeCompletionTime
()
-
j
.
getWcet
()
&&
t
<
j
.
getRelativeCompletionTime
())
{
currentHeight
++;
}
}
if
(
currentHeight
==
segmentHeight
)
{
segmentDuration
++;
}
else
{
segments
.
add
(
new
Segment
(
segmentDuration
,
segmentHeight
));
segmentDuration
=
1
;
segmentHeight
=
currentHeight
;
}
}
segments
.
add
(
new
Segment
(
segmentDuration
,
segmentHeight
));
return
segments
;
}
public
static
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
createNFJGraph
(
final
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
jobDag
)
{
final
LinkedList
<
Job
>
joinNodes
=
new
LinkedList
<>();
final
LinkedList
<
Job
>
forkNodes
=
new
LinkedList
<>();
collectForksAndJoins
(
jobDag
,
forkNodes
,
joinNodes
);
return
createNFJGraph
(
jobDag
,
forkNodes
,
joinNodes
);
}
public
static
void
collectForksAndJoins
(
final
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
jobDag
,
final
LinkedList
<
Job
>
forkNodes
,
final
LinkedList
<
Job
>
joinNodes
)
{
for
(
final
Job
j
:
jobDag
)
{
if
(
jobDag
.
inDegreeOf
(
j
)
>
1
)
{
joinNodes
.
add
(
j
);
}
if
(
jobDag
.
outDegreeOf
(
j
)
>
1
)
{
forkNodes
.
add
(
j
);
}
}
}
public
static
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
createNFJGraph
(
final
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
jobDag
,
final
LinkedList
<
Job
>
forkNodes
,
final
LinkedList
<
Job
>
joinNodes
)
{
final
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
modifiedJobDag
=
new
DirectedAcyclicGraph
<>(
DefaultEdge
.
class
);
Graphs
.
addGraph
(
modifiedJobDag
,
jobDag
);
if
(!(
joinNodes
.
size
()
>
1
))
{
return
modifiedJobDag
;
}
final
Job
sink
=
joinNodes
.
getLast
();
for
(
final
Job
j
:
joinNodes
)
{
final
Set
<
DefaultEdge
>
edgeSet
=
new
HashSet
<>(
modifiedJobDag
.
incomingEdgesOf
(
j
));
for
(
final
DefaultEdge
e
:
edgeSet
)
{
final
Job
predecessor
=
modifiedJobDag
.
getEdgeSource
(
e
);
final
boolean
satisfiesProposition
=
DagUtils
.
checkForFork
(
modifiedJobDag
,
j
,
forkNodes
,
predecessor
);
if
(!
satisfiesProposition
)
{
modifiedJobDag
.
removeEdge
(
e
);
if
(
modifiedJobDag
.
outgoingEdgesOf
(
predecessor
).
isEmpty
())
{
try
{
modifiedJobDag
.
addDagEdge
(
predecessor
,
sink
);
}
catch
(
final
Exception
ex
)
{
}
}
}
if
(
modifiedJobDag
.
inDegreeOf
(
j
)
==
1
)
{
break
;
}
}
}
return
modifiedJobDag
;
}
public
static
BinaryDecompositionTree
<
Job
>
createDecompositionTree
(
final
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
jobDag
)
{
final
LinkedList
<
Job
>
joinNodes
=
new
LinkedList
<>();
final
LinkedList
<
Job
>
forkNodes
=
new
LinkedList
<>();
collectForksAndJoins
(
jobDag
,
forkNodes
,
joinNodes
);
return
createDecompositionTree
(
jobDag
,
forkNodes
,
joinNodes
);
}
public
static
BinaryDecompositionTree
<
Job
>
createDecompositionTree
(
final
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
jobDag
,
final
LinkedList
<
Job
>
forkNodes
,
final
LinkedList
<
Job
>
joinNodes
)
{
final
BinaryDecompositionTree
<
Job
>
tree
=
new
BinaryDecompositionTree
<>();
Optional
<
Job
>
source
=
Optional
.
empty
();
Optional
<
Job
>
sink
=
Optional
.
empty
();
if
(
forkNodes
.
size
()
>
0
)
{
source
=
Optional
.
of
(
forkNodes
.
getFirst
());
sink
=
Optional
.
of
(
joinNodes
.
getLast
());
}
else
{
boolean
firstRun
=
true
;
for
(
final
Job
j
:
jobDag
)
{
if
(
firstRun
)
{
firstRun
=
false
;
source
=
Optional
.
of
(
j
);
}
sink
=
Optional
.
of
(
j
);
}
}
if
(
source
.
isPresent
()
&&
sink
.
isPresent
())
{
final
Job
current
=
source
.
get
();
DagUtils
.
constructTree
(
jobDag
,
null
,
current
,
tree
,
forkNodes
,
joinNodes
,
sink
.
get
());
}
return
tree
;
}
private
static
Node
<
Job
>
constructTree
(
final
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
jobDag
,
final
Node
<
Job
>
parentNode
,
final
Job
currentJob
,
final
BinaryDecompositionTree
<
Job
>
tree
,
final
LinkedList
<
Job
>
forkVertices
,
final
LinkedList
<
Job
>
joinVertices
,
final
Job
sinkJob
)
{
if
(
forkVertices
.
contains
(
currentJob
))
{
final
Job
forkJob
=
currentJob
;
final
Job
joinJob
=
findJoin
(
jobDag
,
forkJob
,
sinkJob
,
joinVertices
);
final
Node
<
Job
>
seqForkNode
=
new
Node
<
Job
>(
parentNode
,
NodeType
.
SEQ
);
seqForkNode
.
setLeftNode
(
new
Node
<
Job
>(
seqForkNode
,
forkJob
));
if
(
tree
.
isEmpty
())
{
tree
.
setRoot
(
seqForkNode
);
}
final
Node
<
Job
>
parContinuationNode
;
if
(!
tree
.
contains
(
joinJob
))
{
final
Node
<
Job
>
seqJoinNode
=
seqForkNode
.
setRightNode
(
new
Node
<
Job
>(
seqForkNode
,
NodeType
.
SEQ
));
final
Node
<
Job
>
subTreeOfJoin
=
constructTree
(
jobDag
,
seqJoinNode
,
joinJob
,
tree
,
forkVertices
,
joinVertices
,
sinkJob
);
seqJoinNode
.
setRightNode
(
subTreeOfJoin
);
parContinuationNode
=
seqJoinNode
.
setLeftNode
(
new
Node
<
Job
>(
seqJoinNode
,
NodeType
.
PAR
));
}
else
{
parContinuationNode
=
seqForkNode
.
setRightNode
(
new
Node
<
Job
>(
seqForkNode
,
NodeType
.
PAR
));
}
Node
<
Job
>
successorParent
=
parContinuationNode
;
final
List
<
Job
>
successors
=
Graphs
.
successorListOf
(
jobDag
,
currentJob
);
// create leftSide of joinNode
for
(
int
i
=
0
;
i
<
successors
.
size
();
++
i
)
{
final
Job
succ
=
successors
.
get
(
i
);
final
Node
<
Job
>
thisNode
=
constructTree
(
jobDag
,
successorParent
,
succ
,
tree
,
forkVertices
,
joinVertices
,
sinkJob
);
if
(
i
==
successors
.
size
()
-
1
)
{
successorParent
.
setRightNode
(
thisNode
);
}
else
if
(
i
==
successors
.
size
()
-
2
)
{
successorParent
.
setLeftNode
(
thisNode
);
}
else
{
successorParent
.
setLeftNode
(
thisNode
);
successorParent
.
setRightNode
(
new
Node
<>(
successorParent
,
NodeType
.
PAR
));
successorParent
=
successorParent
.
getRightNode
();
}
}
return
seqForkNode
;
}
else
{
final
List
<
Job
>
successorList
=
Graphs
.
successorListOf
(
jobDag
,
currentJob
);
if
(
successorList
.
size
()
>
0
)
{
final
Job
successor
=
successorList
.
get
(
0
);
if
(!
tree
.
contains
(
successor
))
{
final
Node
<
Job
>
seqJobNode
=
new
Node
<
Job
>(
parentNode
,
NodeType
.
SEQ
);
if
(
tree
.
isEmpty
())
{
tree
.
setRoot
(
seqJobNode
);
}
seqJobNode
.
setLeftNode
(
new
Node
<
Job
>(
seqJobNode
,
currentJob
));
final
Node
<
Job
>
contNode
=
constructTree
(
jobDag
,
seqJobNode
,
successor
,
tree
,
forkVertices
,
joinVertices
,
sinkJob
);
seqJobNode
.
setRightNode
(
contNode
);
return
seqJobNode
;
}
}
final
Node
<
Job
>
jobNode
=
new
Node
<
Job
>(
parentNode
,
currentJob
);
if
(
tree
.
isEmpty
())
{
tree
.
setRoot
(
jobNode
);
}
return
jobNode
;
}
}
public
static
boolean
checkNFJProperty
(
final
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
jobDag
)
{
final
LinkedList
<
Job
>
joinNodes
=
new
LinkedList
<>();
final
LinkedList
<
Job
>
forkNodes
=
new
LinkedList
<>();
collectForksAndJoins
(
jobDag
,
forkNodes
,
joinNodes
);
if
(!(
joinNodes
.
size
()
>
1
))
{
return
true
;
}
nextJoin:
for
(
final
Job
j
:
joinNodes
)
{
for
(
final
Job
f
:
forkNodes
)
{
final
Set
<
Job
>
ancestorsOfJ
=
jobDag
.
getAncestors
(
jobDag
,
j
);
final
Set
<
Job
>
ancesotorsOfF
=
jobDag
.
getAncestors
(
jobDag
,
f
);
ancesotorsOfF
.
add
(
f
);
final
Set
<
Job
>
inbetweenJobs
=
new
HashSet
<>(
ancestorsOfJ
);
inbetweenJobs
.
removeAll
(
ancesotorsOfF
);
if
(
inbetweenJobs
.
isEmpty
())
{
continue
;
}
for
(
final
Job
a
:
inbetweenJobs
)
{
final
List
<
Job
>
neighboursOfA
=
Graphs
.
neighborListOf
(
jobDag
,
a
);
for
(
final
Job
b
:
neighboursOfA
)
{
if
((
jobDag
.
getAncestors
(
jobDag
,
j
).
contains
(
b
)
||
b
==
j
)
&&
(
jobDag
.
getDescendants
(
jobDag
,
f
).
contains
(
b
)
||
b
==
f
))
{
continue
nextJoin
;
}
}
}
}
return
false
;
}
return
true
;
}
private
static
Job
findJoin
(
final
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
jobDag
,
final
Job
forkNode
,
final
Job
sink
,
final
List
<
Job
>
joinNodes
)
{
for
(
final
Job
j
:
joinNodes
)
{
final
Set
<
Job
>
ancestorsOfJ
=
jobDag
.
getAncestors
(
jobDag
,
j
);
final
Set
<
Job
>
ancesotorsOfFork
=
jobDag
.
getAncestors
(
jobDag
,
forkNode
);
ancesotorsOfFork
.
add
(
forkNode
);
final
Set
<
Job
>
inbetweenJobs
=
new
HashSet
<>(
ancestorsOfJ
);
inbetweenJobs
.
removeAll
(
ancesotorsOfFork
);
if
(
inbetweenJobs
.
isEmpty
())
{
continue
;
}
final
List
<
Job
>
successorOfFork
=
Graphs
.
successorListOf
(
jobDag
,
forkNode
);
if
(
inbetweenJobs
.
containsAll
(
successorOfFork
))
{
return
j
;
}
}
return
sink
;
}
private
static
boolean
checkForFork
(
final
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
jobDag
,
final
Job
joinNode
,
final
List
<
Job
>
forkNodes
,
final
Job
job
)
{
final
List
<
Job
>
pred
=
Graphs
.
predecessorListOf
(
jobDag
,
job
);
for
(
final
Job
p
:
pred
)
{
if
(
forkNodes
.
contains
(
p
))
{
for
(
final
Job
successor
:
Graphs
.
successorListOf
(
jobDag
,
p
))
{
if
(
jobDag
.
getAncestors
(
jobDag
,
joinNode
).
contains
(
successor
))
{
return
false
;
}
}
}
else
{
return
checkForFork
(
jobDag
,
joinNode
,
forkNodes
,
p
);
}
}
return
true
;
}
public
static
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
createNFJfromDecompositionTree
(
final
BinaryDecompositionTree
<
Job
>
tree
)
{
final
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
jobDag
=
new
DirectedAcyclicGraph
<>(
DefaultEdge
.
class
);
if
(
tree
.
getRootNode
()
!=
null
)
{
traverseNodes
(
jobDag
,
tree
.
getRootNode
());
}
return
jobDag
;
}
private
static
GraphEndPoints
traverseNodes
(
final
DirectedAcyclicGraph
<
Job
,
DefaultEdge
>
jobDag
,
final
Node
<
Job
>
node
)
{
switch
(
node
.
getNodeType
())
{
case
LEAF:
{
final
Job
j
=
node
.
getObject
();
jobDag
.
addVertex
(
j
);
final
Set
<
Job
>
endPoints
=
new
HashSet
<
Job
>(
Arrays
.
asList
(
j
));
return
new
GraphEndPoints
(
endPoints
,
endPoints
);
}
case
SEQ:
{
final
GraphEndPoints
leftEndPoints
=
traverseNodes
(
jobDag
,
node
.
getLeftNode
());
final
GraphEndPoints
rightEndPoints
=
traverseNodes
(
jobDag
,
node
.
getRightNode
());
for
(
final
Job
l
:
leftEndPoints
.
right
)
{
for
(
final
Job
r
:
rightEndPoints
.
left
)
{
if
(
r
==
l
)
{
continue
;
}
try
{
jobDag
.
addDagEdge
(
l
,
r
);
}
catch
(
final
Exception
e
)
{
}
}
}
return
new
GraphEndPoints
(
leftEndPoints
.
left
,
rightEndPoints
.
right
);
}
case
PAR:
{
final
GraphEndPoints
leftEndPoints
=
traverseNodes
(
jobDag
,
node
.
getLeftNode
());
final
GraphEndPoints
rightEndPoints
=
traverseNodes
(
jobDag
,
node
.
getRightNode
());
final
Set
<
Job
>
leftEndPointJobs
=
Sets
.
newHashSet
(
Iterables
.
concat
(
leftEndPoints
.
left
,
rightEndPoints
.
left
));
final
Set
<
Job
>
rightEndPointJobs
=
Sets
.
newHashSet
(
Iterables
.
concat
(
leftEndPoints
.
right
,
rightEndPoints
.
right
));
return
new
GraphEndPoints
(
leftEndPointJobs
,
rightEndPointJobs
);
}
default
:
break
;
}
return
new
GraphEndPoints
(
new
HashSet
<>(),
new
HashSet
<>());
}
public
static
class
GraphEndPoints
{
public
Set
<
Job
>
left
;
public
Set
<
Job
>
right
;
public
GraphEndPoints
(
final
Set
<
Job
>
left
,
final
Set
<
Job
>
right
)
{
this
.
left
=
left
;
this
.
right
=
right
;
}
}
}
src/main/java/mvd/jester/utils/Logger.java
View file @
f0a948aa
...
...
@@ -13,18 +13,18 @@ public class Logger {
private
BufferedWriter
bufferedWriter
;
private
PrintWriter
printWriter
;
public
Logger
(
String
pathToFile
)
{
public
Logger
(
final
String
pathToFile
)
{
try
{
fileWriter
=
new
FileWriter
(
pathToFile
,
tru
e
);
fileWriter
=
new
FileWriter
(
pathToFile
,
fals
e
);
bufferedWriter
=
new
BufferedWriter
(
fileWriter
);
printWriter
=
new
PrintWriter
(
bufferedWriter
);
}
catch
(
Exception
e
)
{
}
catch
(
final
Exception
e
)
{
System
.
out
.
println
(
"Could not create file!"
);
}
}
public
void
log
(
Appendable
sb
)
{
public
void
log
(
final
Appendable
sb
)
{
printWriter
.
write
(
sb
.
toString
());
}
...
...
@@ -39,7 +39,7 @@ public class Logger {
if
(
fileWriter
!=
null
)
{
fileWriter
.
close
();
}
}
catch
(
Exception
e
)
{
}
catch
(
final
Exception
e
)
{
}
}
...
...
src/test/java/mvd/jester/info/TestSchedulingInfo.java
View file @
f0a948aa
package
mvd
.
jester
.
info
;
import
static
org
.
junit
.
jupiter
.
api
.
Assertions
.
assertFalse
;
import
static
org
.
junit
.
jupiter
.
api
.
Assertions
.
assertTrue
;
import
static
org
.
mockito
.
Mockito
.
mock
;
import
java.util.HashSet
;
import
java.util.Set
;
import
java.util.concurrent.ThreadLocalRandom
;
import
org.junit.jupiter.api.DisplayName
;
import
org.junit.jupiter.api.Test
;
import
mvd.jester.info.
TerminationInfo.Level
;
import
mvd.jester.info.
SchedulingInfo.Feasiblity
;
/**
* TestTerminationInfo
...
...
@@ -22,58 +20,20 @@ public class TestSchedulingInfo {
Set
<
TerminationInfo
>
terminationInfos
=
new
HashSet
<>();
int
numberOfTerminationInfos
=
ThreadLocalRandom
.
current
().
nextInt
(
5
,
10
);
boolean
feasibile
=
true
;
boolean
levelFailed
=
false
;
for
(
int
i
=
0
;
i
<
numberOfTerminationInfos
;
++
i
)
{
long
deadline
=
ThreadLocalRandom
.
current
().
nextLong
(
50
,
100
);
long
responseTime
=
ThreadLocalRandom
.
current
().
nextLong
(
50
,
100
);
Level
taskLevel
=
ThreadLocalRandom
.
current
().
nextLong
(
0
,
100
)
<
50
?
Level
.
LOW
:
Level
.
HIGH
;
terminationInfos
.
add
(
new
TerminationInfo
(
deadline
,
responseTime
,
taskLevel
));
terminationInfos
.
add
(
new
TerminationInfo
(
deadline
,
responseTime
));
if
(
deadline
<
responseTime
)
{
feasibile
=
false
;
if
(
taskLevel
==
Level
.
HIGH
)
{
levelFailed
=
true
;
}
}
}
SchedulingInfo
schedulingInfo
=
new
SchedulingInfo
(
terminationInfos
,
2
,
numberOfTerminationInfos
);
assertTrue
(
schedulingInfo
.
checkLevelFail
(
Level
.
HIGH
)
==
levelFailed
);
assertTrue
(
schedulingInfo
.
checkTasksetFeasible
()
==
feasibile
);
SchedulingInfo
schedulingInfo
=
new
SchedulingInfo
(
terminationInfos
);
assertTrue
(
schedulingInfo
.
getFeasibility
()
==
(
feasibile
?
Feasiblity
.
SUCCEEDED
:
Feasiblity
.
FAILED
));
}
}
@Test
@DisplayName
(
"Check Getters and Setters."
)
public
void
testGettersAndSetters
()
{
double
taskRatio
=
0.23
;
double
utilization
=
0.49
;
SchedulingInfo
si
=
new
SchedulingInfo
(
taskRatio
,
utilization
);
Set
<
TerminationInfo
>
terminationInfos
=
new
HashSet
<>();
long
numberOfTerminationInfos
=
ThreadLocalRandom
.
current
().
nextLong
(
5
,
10
);
for
(
int
i
=
0
;
i
<
numberOfTerminationInfos
;
++
i
)
{
TerminationInfo
ti
=
mock
(
TerminationInfo
.
class
);
terminationInfos
.
add
(
ti
);
assertTrue
(
si
.
addTerminationInfo
(
ti
));
assertFalse
(
si
.
addTerminationInfo
(
ti
));
}
assertTrue
(
si
.
getParallelTaskRatio
()
==
taskRatio
);
assertTrue
(
si
.
getUtilization
()
==
utilization
);
assertTrue
(
si
.
getTerminationInfos
().
size
()
==
numberOfTerminationInfos
);
assertTrue
(
si
.
getTerminationInfos
().
equals
(
terminationInfos
));
assertTrue
(
si
.
addTerminationInfo
(
null
));
assertFalse
(
si
.
getFailedTerminationInfo
().
isPresent
());
TerminationInfo
ti
=
mock
(
TerminationInfo
.
class
);
si
.
setFailedTerminationInfo
(
ti
);
assertTrue
(
si
.
getFailedTerminationInfo
().
isPresent
());
assertTrue
(
si
.
getFailedTerminationInfo
().
get
().
equals
(
ti
));
}
}
src/test/java/mvd/jester/info/TestTerminationInfo.java
View file @
f0a948aa
...
...
@@ -3,7 +3,6 @@ package mvd.jester.info;
import
static
org
.
junit
.
jupiter
.
api
.
Assertions
.
assertTrue
;
import
org.junit.jupiter.api.DisplayName
;
import
org.junit.jupiter.api.Test
;
import
mvd.jester.info.TerminationInfo.Level
;
/**
* TestTerminationInfo
...
...
@@ -16,11 +15,10 @@ public class TestTerminationInfo {
long
releaseTime
=
20
;
long
deadline
=
40
;
long
responseTime
=
30
;
Level
taskLevel
=
Level
.
LOW
;
TerminationInfo
ti
=
new
TerminationInfo
(
releaseTime
,
deadline
,
responseTime
);
TerminationInfo
til
=
new
TerminationInfo
(
deadline
,
responseTime
,
taskLevel
);
TerminationInfo
tirl
=
new
TerminationInfo
(
releaseTime
,
deadline
,
responseTime
,
taskLevel
);
TerminationInfo
til
=
new
TerminationInfo
(
deadline
,
responseTime
);
TerminationInfo
tirl
=
new
TerminationInfo
(
releaseTime
,
deadline
,
responseTime
);
assertTrue
(
ti
.
getDeadline
()
==
deadline
);
...
...
@@ -31,8 +29,6 @@ public class TestTerminationInfo {
assertTrue
(
tirl
.
getResponseTime
()
==
responseTime
);
assertTrue
(
ti
.
getReleaseTime
()
==
releaseTime
);
assertTrue
(
tirl
.
getReleaseTime
()
==
releaseTime
);
assertTrue
(
til
.
getTaskLevel
()
==
taskLevel
);
assertTrue
(
tirl
.
getTaskLevel
()
==
taskLevel
);
assertTrue
(
ti
.
getLateness
()
==
responseTime
-
deadline
);
assertTrue
(
til
.
getLateness
()
==
responseTime
-
deadline
);
assertTrue
(
tirl
.
getLateness
()
==
responseTime
-
deadline
);
...
...
src/test/java/mvd/jester/model/TestDagUtils.java
View file @
f0a948aa
...
...
@@ -7,7 +7,7 @@ import org.jgrapht.experimental.dag.DirectedAcyclicGraph;
import
org.jgrapht.graph.DefaultEdge
;
import
org.junit.jupiter.api.DisplayName
;
import
org.junit.jupiter.api.Test
;
import
mvd.jester.
model.DagTask
.DagUtils
;
import
mvd.jester.
utils
.DagUtils
;
import
mvd.jester.model.SystemSetup.DagTaskBuilder
;
import
mvd.jester.utils.BinaryDecompositionTree
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment