Games Task Scheduler (GTS)
A multi-processor scheduling framework for games engines
1_parallel_for.h
1 /*******************************************************************************
2  * Copyright 2019 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a copy
5  * of this software and associated documentation files(the "Software"), to deal
6  * in the Software without restriction, including without limitation the rights
7  * to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
8  * copies of the Software, and to permit persons to whom the Software is
9  * furnished to do so, subject to the following conditions :
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
17  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20  * THE SOFTWARE.
21  ******************************************************************************/
22 #pragma once
23 
24 #include <vector>
25 
26 #include "gts/micro_scheduler/WorkerPool.h"
27 #include "gts/micro_scheduler/MicroScheduler.h"
28 #include "gts/micro_scheduler/patterns/ParallelFor.h"
29 #include "gts/micro_scheduler/patterns/Partitioners.h"
30 #include "gts/micro_scheduler/patterns/Range1d.h"
31 
32 using namespace gts;
33 
34 namespace gts_examples {
35 
36 //------------------------------------------------------------------------------
37 void simplestIndexedParallelFor()
38 {
39  printf ("================\n");
40  printf ("simplestIndexedParallelFor\n");
41  printf ("================\n");
42 
43  // Demonstrates the simplest ParallelFor interface with
44  // iteration over a container using an index.
45 
46  // Init boilerplate
47  WorkerPool workerPool;
48  bool result = workerPool.initialize();
49  GTS_ASSERT(result);
50  MicroScheduler microScheduler;
51  result = microScheduler.initialize(&workerPool);
52  GTS_ASSERT(result);
53 
54  size_t const elementCount = 1 << 16;
55 
56  // Make a ParallelFor object for this scheduler. We do this because
57  // there can be multiple scheduler objects.
58  ParallelFor parFor(microScheduler);
59 
60  // Increment a vector of items in parallel
61  std::vector<int> vec(elementCount, 0);
62  parFor(size_t(0), size_t(vec.size()), [&vec](size_t idx) { vec[idx]++; });
63 
64  // Verify results.
65  for (auto const& v : vec)
66  {
67  GTS_ASSERT(v == 1);
68  }
69 }
70 
71 //------------------------------------------------------------------------------
72 void simplerIteratorParallelFor()
73 {
74  printf ("================\n");
75  printf ("simplerIteratorParallelFor\n");
76  printf ("================\n");
77 
78  // Demonstrates the simpler ParallelFor interface with
79  // iteration over a container using an iterator.
80 
81  // Init boilerplate
82  WorkerPool workerPool;
83  bool result = workerPool.initialize();
84  GTS_ASSERT(result);
85  MicroScheduler microScheduler;
86  result = microScheduler.initialize(&workerPool);
87  GTS_ASSERT(result);
88 
89  size_t const elementCount = 1 << 16;
90 
91  ParallelFor parFor(microScheduler);
92 
93  // The partitioner determines how the data in Range1d is divided
94  // up over all the works. AdaptivePartitioner type only divides when
95  // other worker threads need more work.
96  auto partitionerType = AdaptivePartitioner();
97 
98  // Since the partitioner is adaptive, a block size of 1 gives the partitioner
99  // full control over division.
100  size_t const blockSize = 1;
101 
102  // Increment a vector of items in parallel
103  std::vector<int> vec(elementCount, 0);
104  parFor(
105  vec.begin(),
106  vec.end(),
107  [](std::vector<int>::iterator iter) { (*iter)++; });
108 
109  // Verify results.
110  for (auto const& v : vec)
111  {
112  GTS_ASSERT(v == 1);
113  }
114 }
115 
116 //------------------------------------------------------------------------------
117 void fullParallelFor()
118 {
119  printf ("================\n");
120  printf ("fullParallelFor\n");
121  printf ("================\n");
122 
123  // Demonstrates the full ParallelFor interface. Note that the ParallelFor
124  // execution function gives full access to the iteration range.
125 
126  // Init boilerplate
127  WorkerPool workerPool;
128  bool result = workerPool.initialize();
129  GTS_ASSERT(result);
130  MicroScheduler microScheduler;
131  result = microScheduler.initialize(&workerPool);
132  GTS_ASSERT(result);
133 
134  size_t const elementCount = 1 << 16;
135  std::vector<int> vec(elementCount, 0);
136 
137  ParallelFor parallelFor(microScheduler);
138  auto partitionerType = AdaptivePartitioner();
139  size_t const blockSize = 1;
140 
141  parallelFor(
142 
143  // The 1D index range parallel-for will iterate over.
144  Range1d<size_t>(0, elementCount, blockSize),
145 
146  // The function parallel for will execute on each block of the range.
147  [](Range1d<size_t>& range, void* pData, TaskContext const&)
148  {
149  // unpack the user data
150  std::vector<int>& vec = *(std::vector<int>*)pData;
151 
152  // For each index in the block, increment the element.
153  for (size_t idx = range.begin(); idx != range.end(); ++idx)
154  {
155  vec[idx]++;
156  }
157  },
158 
159  // The partitioner object.
160  partitionerType,
161 
162  // The vec user data.
163  &vec
164  );
165 
166  // Verify results.
167  for (auto const& v : vec)
168  {
169  GTS_ASSERT(v == 1);
170  }
171 }
172 
173 } // namespace gts_examples
Adaptively subdivides a TRange based on demand from the scheduler.
Definition: AdaptivePartitioner.h:28
A work-stealing task scheduler. The scheduler is executed by the WorkerPool it is initialized with.
Definition: MicroScheduler.h:81
bool initialize(WorkerPool *pWorkerPool)
Initializes the MicroScheduler and attaches it to pWorkPool, where each worker in pWorkPool will exec...
A construct that maps parallel-for behavior to a MicroScheduler.
Definition: ParallelFor.h:48
An iteration range over a 1D data set. Splits divide the range in two based unless the minimum size i...
Definition: Range1d.h:56
A collection of running Worker threads that a MicroScheduler can be run on.
Definition: WorkerPool.h:54
bool initialize(uint32_t threadCount=0)
#define GTS_ASSERT(expr)
Causes execution to break when expr is false.
Definition: Assert.h:144
The context associated with the task being executed.
Definition: MicroSchedulerTypes.h:54