Games Task Scheduler (GTS)
A multi-processor scheduling framework for games engines
4_isolation_init.h
1 /*******************************************************************************
2  * Copyright 2019 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a copy
5  * of this software and associated documentation files(the "Software"), to deal
6  * in the Software without restriction, including without limitation the rights
7  * to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
8  * copies of the Software, and to permit persons to whom the Software is
9  * furnished to do so, subject to the following conditions :
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
17  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20  * THE SOFTWARE.
21  ******************************************************************************/
22 #include <vector>
23 #include <iostream>
24 
25 #include "gts/platform/Thread.h"
26 
27 #include "gts/micro_scheduler/WorkerPool.h"
28 #include "gts/micro_scheduler/MicroScheduler.h"
29 #include "gts/micro_scheduler/patterns/ParallelFor.h"
30 #include "gts/micro_scheduler/patterns/Range1d.h"
31 
32 using namespace gts;
33 
34 namespace gts_examples {
35 
36 void isolationInit()
37 {
38  printf ("================\n");
39  printf ("isolationInit\n");
40  printf ("================\n");
41 
42 
43  // Another way to view MicroSchedulers are as isolated parallel regions.
44  // Each microScheduler runs on the same workerPool, but their tasks are
45  // executed in isolation. This is achieved internally using a round robin
46  // strategy for each Worker in workerPool. When the currently executing
47  // Schedule on the Worker fails to find work, the Worker moves onto the
48  // next available Schedule.
49 
50  WorkerPool workerPool;
51  workerPool.initialize();
52 
53  // Let say we need two isolated regions.
54  constexpr uint32_t NUM_SCHEDUELRS = 2;
55  MicroScheduler microScheduler[NUM_SCHEDUELRS];
56 
57  // Attach each scheduler to the same WorkerPool.
58  for (uint32_t iScheduler = 0; iScheduler < NUM_SCHEDUELRS; ++iScheduler)
59  {
60  microScheduler[iScheduler].initialize(&workerPool);
61  }
62 
63  //
64  // Isolated workload:
65 
66  const uint32_t elementCount = 1000;
67 
68  // Create a value per thread.
69  std::vector<uint32_t> threadLocalValues(gts::Thread::getHardwareThreadCount(), 0);
70 
71  ParallelFor parallelFor(microScheduler[0]);
72  parallelFor(
73  Range1d<uint32_t>(0, elementCount, 1),
74  [&](Range1d<uint32_t>& range, void* pData, TaskContext const& ctx)
75  {
76  std::vector<uint32_t>& threadLocalValues = *(std::vector<uint32_t>*)pData;
77  threadLocalValues[ctx.workerId.localId()] = range.begin();
78 
79  // Using the isolate feature, the inner parallelFor cannot execute
80  // task spawned outside the scope of the isolation lambda.
81  ParallelFor parallelFor(microScheduler[1]);
82  parallelFor(
83  Range1d<uint32_t>(0, elementCount, 1),
84  [](Range1d<uint32_t>&, void*, TaskContext const&)
85  {},
87  nullptr
88  );
89 
90  // Will never print.
91  if (threadLocalValues[ctx.workerId.localId()] != range.begin())
92  {
93  std::cout << "Isolated: Executed an outer loop iteration will waiting on inner loop.\n";
94  }
95  },
97  &threadLocalValues);
98 
99  // If many isolated regions are needed, we recommend creating a pool instead
100  // of frequently creating and destroying MicroSchedulers.
101 }
102 
103 } // namespace gts_examples
A work-stealing task scheduler. The scheduler is executed by the WorkerPool it is initialized with.
Definition: MicroScheduler.h:81
bool initialize(WorkerPool *pWorkerPool)
Initializes the MicroScheduler and attaches it to pWorkPool, where each worker in pWorkPool will exec...
GTS_INLINE SubIdType localId() const
Definition: Utils.h:317
A construct that maps parallel-for behavior to a MicroScheduler.
Definition: ParallelFor.h:48
An iteration range over a 1D data set. Splits divide the range in two based unless the minimum size i...
Definition: Range1d.h:56
Recursively splits a range and tries to limit number of splits to the number of workers in the execut...
Definition: Partitioners.h:114
A collection of running Worker threads that a MicroScheduler can be run on.
Definition: WorkerPool.h:54
bool initialize(uint32_t threadCount=0)
static GTS_INLINE uint32_t getHardwareThreadCount()
Gets the number of logical processor on this system.
Definition: Thread.h:447
The context associated with the task being executed.
Definition: MicroSchedulerTypes.h:54
OwnedId workerId
The ID of the current Worker.
Definition: MicroSchedulerTypes.h:65