Games Task Scheduler (GTS)
A multi-processor scheduling framework for games engines
quick_start.h
1 /*******************************************************************************
2  * Copyright 2019 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a copy
5  * of this software and associated documentation files(the "Software"), to deal
6  * in the Software without restriction, including without limitation the rights
7  * to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
8  * copies of the Software, and to permit persons to whom the Software is
9  * furnished to do so, subject to the following conditions :
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
17  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20  * THE SOFTWARE.
21  ******************************************************************************/
22 #pragma once
23 
24 #include <iostream>
25 #include <vector>
26 
27 #include "gts/micro_scheduler/WorkerPool.h"
28 #include "gts/micro_scheduler/MicroScheduler.h"
29 #include "gts/micro_scheduler/patterns/ParallelFor.h"
30 
31 #include "gts/macro_scheduler/Node.h"
32 #include "gts/macro_scheduler/compute_resources/MicroScheduler_Workload.h"
33 #include "gts/macro_scheduler/compute_resources/MicroScheduler_ComputeResource.h"
34 #include "gts/macro_scheduler/schedulers/homogeneous/central_queue/CentralQueue_MacroScheduler.h"
35 
36 using namespace gts;
37 
38 namespace gts_examples {
39 
40 //------------------------------------------------------------------------------
41 void basicParallelFor()
42 {
43  printf ("================\n");
44  printf ("basicParallelFor\n");
45  printf ("================\n");
46 
47  // Create a worker pool for the MicroScheduler to run on.
48  WorkerPool workerPool;
49  bool result = workerPool.initialize();
50  GTS_ASSERT(result);
51 
52  // Create a micro scheduler and assign the worker pool to it.
53  MicroScheduler microScheduler;
54  result = microScheduler.initialize(&workerPool);
55  GTS_ASSERT(result);
56 
57  // Create a ParallelFor object attached to the scheduler.
58  ParallelFor parFor(microScheduler);
59 
60  // Increment a vector of items in parallel
61  std::vector<int> vec(1000000, 0);
62  parFor(vec.begin(), vec.end(), [](std::vector<int>::iterator iter) { (*iter)++; });
63 
64  // Verify results.
65  for (auto const& v : vec)
66  {
67  GTS_ASSERT(v == 1);
68  }
69 
70  // These resources can be shutdown explicitly or their destructor will
71  // shut them down implicitly.
72  microScheduler.shutdown();
73  workerPool.shutdown();
74 
75  printf("SUCCESS!\n\n");
76 }
77 
78 //------------------------------------------------------------------------------
79 void basicMacroSchedulerNodeGraph()
80 {
81  printf ("================\n");
82  printf ("basicMacroSchedulerNodeGraph\n");
83  printf ("================\n");
84 
85  // A MacroScheduler is a high level scheduler that maps a persistent task
86  // graph (DAG) of Nodes to set of ComputeResources. A CentralQueue_MacroScheduler
87  // is a MacroScheduler that executes a DAG exclusively on one or more
88  // MicroSchedulers. Each Node is converted to a Task when it is ready to be
89  // executed.
90 
91  //
92  // First, we create a MicroSchedulder and map it to a
93  // MicroScheduler_ComputeResource that can be consumed by
94  // a CentralQueue_MacroScheduler.
95 
96  WorkerPool workerPool;
97  workerPool.initialize();
98 
99  MicroScheduler microScheduler;
100  microScheduler.initialize(&workerPool);
101 
102  MicroScheduler_ComputeResource microSchedulerCompResource(&microScheduler, 0, 0);
103 
104  //
105  // Second, we create a CentralQueue_MacroScheduler and map the
106  // MicroScheduler_ComputeResource to it.
107 
108  MacroSchedulerDesc generalizedDagSchedulerDesc;
109  generalizedDagSchedulerDesc.computeResources.push_back(&microSchedulerCompResource);
110 
111  MacroScheduler* pMacroScheduler = new CentralQueue_MacroScheduler;
112  pMacroScheduler->init(generalizedDagSchedulerDesc);
113 
114  //
115  // Build a DAG of Nodes
116  /*
117  +---+
118  +---->| B |-----+
119  | +---+ |
120  | v
121  +---+ +---+
122  | A | | D |
123  +---+ +---+
124  | ^
125  | +---+ |
126  +---->| C |-----+
127  +---+
128  */
129 
130 
131  Node* pA = pMacroScheduler->allocateNode();
132  pA->addWorkload<MicroSchedulerLambda_Workload>([](WorkloadContext const&){ printf("A\n"); });
133 
134  Node* pB = pMacroScheduler->allocateNode();
135  pB->addWorkload<MicroSchedulerLambda_Workload>([](WorkloadContext const&){ printf("B\n"); });
136 
137  Node* pC = pMacroScheduler->allocateNode();
138  pC->addWorkload<MicroSchedulerLambda_Workload>([](WorkloadContext const&){ printf("C\n"); });
139 
140  Node* pD = pMacroScheduler->allocateNode();
141  pD->addWorkload<MicroSchedulerLambda_Workload>([](WorkloadContext const&){ printf("D\n"); });
142 
143  pA->addSuccessor(pB);
144  pA->addSuccessor(pC);
145  pB->addSuccessor(pD);
146  pC->addSuccessor(pD);
147 
148  //
149  // Build and execute the schedule
150 
151  Schedule* pSchedule = pMacroScheduler->buildSchedule(pA, pD);
152 
153  // Let's execute the DAG as if it were in a game loop.
154  for(size_t iLoop = 0; iLoop < 10; ++iLoop)
155  {
156  printf("--- Frame ---\n");
157  pMacroScheduler->executeSchedule(pSchedule, microSchedulerCompResource.id());
158  }
159 
160  printf("SUCCESS!\n\n");
161 }
162 
163 //------------------------------------------------------------------------------
164 void basicMacroSchedulerNodeGraphWithParallelFor()
165 {
166  printf ("================\n");
167  printf ("basicMacroSchedulerNodeGraphWithParallelFor\n");
168  printf ("================\n");
169 
170  //
171  // Setup CpuComputeResource
172 
173  WorkerPool workerPool;
174  workerPool.initialize();
175 
176  MicroScheduler microScheduler;
177  microScheduler.initialize(&workerPool);
178 
179  MicroScheduler_ComputeResource microSchedulerCompResource(&microScheduler, 0, 0);
180 
181  //
182  // Init MacroScheduler
183 
184  MacroSchedulerDesc generalizedDagSchedulerDesc;
185  generalizedDagSchedulerDesc.computeResources.push_back(&microSchedulerCompResource);
186 
187  MacroScheduler* pMacroScheduler = new CentralQueue_MacroScheduler;
188  pMacroScheduler->init(generalizedDagSchedulerDesc);
189 
190  //
191  // Build a DAG of Nodes
192  /*
193  +---+
194  +---->| B |-----+
195  | +---+ |
196  | v
197  +---+ +---+
198  | A | | D |
199  +---+ +---+
200  | ^
201  | +---+ |
202  +---->| C |-----+
203  +---+
204 
205  A: Increments all elements in a vector by 1.
206  B: Increments elements [0, n/2) by 2.
207  C: Increments elements [n/2, n) by 3.
208  D: Increments all elements by by 1.
209 
210  Result: { 4, 4, ..., 4, 5, 5, ..., 5}.
211  */
212 
213  ParallelFor parFor(microScheduler);
214  std::vector<int> vec(1000000, 0);
215 
216  Node* pA = pMacroScheduler->allocateNode();
217  pA->addWorkload<MicroSchedulerLambda_Workload>([&parFor, &vec](WorkloadContext const& ctx)
218  {
219  parFor(vec.begin(), vec.end(), [](std::vector<int>::iterator iter) { (*iter)++; });
220  printf("A\n");
221  });
222 
223  Node* pB = pMacroScheduler->allocateNode();
224  pB->addWorkload<MicroSchedulerLambda_Workload>([&parFor, &vec](WorkloadContext const&)
225  {
226  parFor(vec.begin(), vec.begin() + vec.size() / 2, [](std::vector<int>::iterator iter) { (*iter) += 2; });
227  printf("B\n");
228  });
229 
230  Node* pC = pMacroScheduler->allocateNode();
231  pC->addWorkload<MicroSchedulerLambda_Workload>([&parFor, &vec](WorkloadContext const&)
232  {
233  parFor(vec.begin() + vec.size() / 2, vec.end(), [](std::vector<int>::iterator iter) { (*iter) += 3; });
234  printf("C\n");
235  });
236 
237  Node* pD = pMacroScheduler->allocateNode();
238  pD->addWorkload<MicroSchedulerLambda_Workload>([&parFor, &vec](WorkloadContext const&)
239  {
240  parFor(vec.begin(), vec.end(), [](std::vector<int>::iterator iter) { (*iter)++; });
241  printf("D\n");
242 
243  });
244 
245  pA->addSuccessor(pB);
246  pA->addSuccessor(pC);
247  pB->addSuccessor(pD);
248  pC->addSuccessor(pD);
249 
250  // Build and execute the schedule
251  Schedule* pSchedule = pMacroScheduler->buildSchedule(pA, pD);
252  pMacroScheduler->executeSchedule(pSchedule, microSchedulerCompResource.id());
253 
254  // Validate
255  for (auto iter = vec.begin(); iter != vec.begin() + vec.size() / 2; ++iter)
256  {
257  GTS_ASSERT(*iter == 4);
258  }
259  for (auto iter = vec.begin() + vec.size() / 2; iter != vec.end(); ++iter)
260  {
261  GTS_ASSERT(*iter == 5);
262  }
263 
264 
265  printf("SUCCESS!\n\n");
266 }
267 
268 } // namespace gts_examples
A generalized DAG scheduler utilizing work stealing. This scheduler delegates its responsibilities to...
Definition: CentralQueue_MacroScheduler.h:58
A MacroScheduler builds ISchedules for a set of ComputeResources from a DAG of Node.
Definition: MacroScheduler.h:50
virtual Schedule * buildSchedule(Node *pStart, Node *pEnd)=0
virtual void executeSchedule(Schedule *pSchedule, ComputeResourceId id)=0
virtual bool init(MacroSchedulerDesc const &desc)=0
A ComputeResource that wraps a MicroScheduler.
Definition: MicroScheduler_ComputeResource.h:53
A work-stealing task scheduler. The scheduler is executed by the WorkerPool it is initialized with.
Definition: MicroScheduler.h:81
bool initialize(WorkerPool *pWorkerPool)
Initializes the MicroScheduler and attaches it to pWorkPool, where each worker in pWorkPool will exec...
void shutdown()
Stops the MicroScheduler and destroys all resources. The TaskSchuduler is now in an unusable state....
A concrete lambda Workload that maps to the MicroScheduler.
Definition: MicroScheduler_Workload.h:116
A Node represents a task in a generalized task DAG. It contains Workloads that are scheduled onto a C...
Definition: Node.h:50
GTS_INLINE TWorkload * addWorkload(TArgs &&... args)
Allocates a new Workload object of type TWorkload.
Definition: Node.h:174
void addSuccessor(Node *pNode)
Add the sucessor Node 'pNode'.
A construct that maps parallel-for behavior to a MicroScheduler.
Definition: ParallelFor.h:48
The execution schedule for all ComputeResources.
Definition: Schedule.h:45
A collection of running Worker threads that a MicroScheduler can be run on.
Definition: WorkerPool.h:54
bool initialize(uint32_t threadCount=0)
#define GTS_ASSERT(expr)
Causes execution to break when expr is false.
Definition: Assert.h:144
The description of a MacroSchedulerDesc to create.
Definition: MacroSchedulerTypes.h:60
gts::Vector< ComputeResource * > computeResources
The ComputeResource that the MacroScheduler can schedule to.
Definition: MacroSchedulerTypes.h:62
The context associated with the task being executed.
Definition: MacroSchedulerTypes.h:72