LLVM OpenMP* Runtime Library
kmp.h
1 
2 /*
3  * kmp.h -- KPTS runtime header file.
4  */
5 
6 //===----------------------------------------------------------------------===//
7 //
8 // The LLVM Compiler Infrastructure
9 //
10 // This file is dual licensed under the MIT and the University of Illinois Open
11 // Source Licenses. See LICENSE.txt for details.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #ifndef KMP_H
16 #define KMP_H
17 
18 #include "kmp_config.h"
19 
20 /* #define BUILD_PARALLEL_ORDERED 1 */
21 
22 /* This fix replaces gettimeofday with clock_gettime for better scalability on
23  the Altix. Requires user code to be linked with -lrt. */
24 //#define FIX_SGI_CLOCK
25 
26 /* Defines for OpenMP 3.0 tasking and auto scheduling */
27 
28 #ifndef KMP_STATIC_STEAL_ENABLED
29 #define KMP_STATIC_STEAL_ENABLED 1
30 #endif
31 
32 #define TASK_CURRENT_NOT_QUEUED 0
33 #define TASK_CURRENT_QUEUED 1
34 
35 #ifdef BUILD_TIED_TASK_STACK
36 #define TASK_STACK_EMPTY 0 // entries when the stack is empty
37 #define TASK_STACK_BLOCK_BITS 5 // Used in TASK_STACK_SIZE and TASK_STACK_MASK
38 // Number of entries in each task stack array
39 #define TASK_STACK_BLOCK_SIZE (1 << TASK_STACK_BLOCK_BITS)
40 // Mask for determining index into stack block
41 #define TASK_STACK_INDEX_MASK (TASK_STACK_BLOCK_SIZE - 1)
42 #endif // BUILD_TIED_TASK_STACK
43 
44 #define TASK_NOT_PUSHED 1
45 #define TASK_SUCCESSFULLY_PUSHED 0
46 #define TASK_TIED 1
47 #define TASK_UNTIED 0
48 #define TASK_EXPLICIT 1
49 #define TASK_IMPLICIT 0
50 #define TASK_PROXY 1
51 #define TASK_FULL 0
52 
53 #define KMP_CANCEL_THREADS
54 #define KMP_THREAD_ATTR
55 
56 // Android does not have pthread_cancel. Undefine KMP_CANCEL_THREADS if being
57 // built on Android
58 #if defined(__ANDROID__)
59 #undef KMP_CANCEL_THREADS
60 #endif
61 
62 #include <signal.h>
63 #include <stdarg.h>
64 #include <stddef.h>
65 #include <stdio.h>
66 #include <stdlib.h>
67 #include <string.h>
68 /* include <ctype.h> don't use; problems with /MD on Windows* OS NT due to bad
69  Microsoft library. Some macros provided below to replace these functions */
70 #ifndef __ABSOFT_WIN
71 #include <sys/types.h>
72 #endif
73 #include <limits.h>
74 #include <time.h>
75 
76 #include <errno.h>
77 
78 #include "kmp_os.h"
79 
80 #include "kmp_safe_c_api.h"
81 
82 #if KMP_STATS_ENABLED
83 class kmp_stats_list;
84 #endif
85 
86 #if KMP_USE_HIER_SCHED
87 // Only include hierarchical scheduling if affinity is supported
88 #undef KMP_USE_HIER_SCHED
89 #define KMP_USE_HIER_SCHED KMP_AFFINITY_SUPPORTED
90 #endif
91 
92 #if KMP_USE_HWLOC && KMP_AFFINITY_SUPPORTED
93 #include "hwloc.h"
94 #ifndef HWLOC_OBJ_NUMANODE
95 #define HWLOC_OBJ_NUMANODE HWLOC_OBJ_NODE
96 #endif
97 #ifndef HWLOC_OBJ_PACKAGE
98 #define HWLOC_OBJ_PACKAGE HWLOC_OBJ_SOCKET
99 #endif
100 #endif
101 
102 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
103 #include <xmmintrin.h>
104 #endif
105 
106 #include "kmp_debug.h"
107 #include "kmp_lock.h"
108 #include "kmp_version.h"
109 #if USE_DEBUGGER
110 #include "kmp_debugger.h"
111 #endif
112 #include "kmp_i18n.h"
113 
114 #define KMP_HANDLE_SIGNALS (KMP_OS_UNIX || KMP_OS_WINDOWS)
115 
116 #include "kmp_wrapper_malloc.h"
117 #if KMP_OS_UNIX
118 #include <unistd.h>
119 #if !defined NSIG && defined _NSIG
120 #define NSIG _NSIG
121 #endif
122 #endif
123 
124 #if KMP_OS_LINUX
125 #pragma weak clock_gettime
126 #endif
127 
128 #if OMPT_SUPPORT
129 #include "ompt-internal.h"
130 #endif
131 
132 // 0 - no fast memory allocation, alignment: 8-byte on x86, 16-byte on x64.
133 // 3 - fast allocation using sync, non-sync free lists of any size, non-self
134 // free lists of limited size.
135 #ifndef USE_FAST_MEMORY
136 #define USE_FAST_MEMORY 3
137 #endif
138 
139 #ifndef KMP_NESTED_HOT_TEAMS
140 #define KMP_NESTED_HOT_TEAMS 0
141 #define USE_NESTED_HOT_ARG(x)
142 #else
143 #if KMP_NESTED_HOT_TEAMS
144 #if OMP_40_ENABLED
145 #define USE_NESTED_HOT_ARG(x) , x
146 #else
147 // Nested hot teams feature depends on omp 4.0, disable it for earlier versions
148 #undef KMP_NESTED_HOT_TEAMS
149 #define KMP_NESTED_HOT_TEAMS 0
150 #define USE_NESTED_HOT_ARG(x)
151 #endif
152 #else
153 #define USE_NESTED_HOT_ARG(x)
154 #endif
155 #endif
156 
157 // Assume using BGET compare_exchange instruction instead of lock by default.
158 #ifndef USE_CMP_XCHG_FOR_BGET
159 #define USE_CMP_XCHG_FOR_BGET 1
160 #endif
161 
162 // Test to see if queuing lock is better than bootstrap lock for bget
163 // #ifndef USE_QUEUING_LOCK_FOR_BGET
164 // #define USE_QUEUING_LOCK_FOR_BGET
165 // #endif
166 
167 #define KMP_NSEC_PER_SEC 1000000000L
168 #define KMP_USEC_PER_SEC 1000000L
169 
178 enum {
183  /* 0x04 is no longer used */
192  KMP_IDENT_BARRIER_IMPL_MASK = 0x01C0,
193  KMP_IDENT_BARRIER_IMPL_FOR = 0x0040,
194  KMP_IDENT_BARRIER_IMPL_SECTIONS = 0x00C0,
195 
196  KMP_IDENT_BARRIER_IMPL_SINGLE = 0x0140,
197  KMP_IDENT_BARRIER_IMPL_WORKSHARE = 0x01C0,
198 
210  KMP_IDENT_ATOMIC_HINT_UNCONTENDED = 0x010000,
211  KMP_IDENT_ATOMIC_HINT_CONTENDED = 0x020000,
212  KMP_IDENT_ATOMIC_HINT_NONSPECULATIVE = 0x040000,
213  KMP_IDENT_ATOMIC_HINT_SPECULATIVE = 0x080000,
214 };
215 
219 typedef struct ident {
220  kmp_int32 reserved_1;
221  kmp_int32 flags;
223  kmp_int32 reserved_2;
224 #if USE_ITT_BUILD
225 /* but currently used for storing region-specific ITT */
226 /* contextual information. */
227 #endif /* USE_ITT_BUILD */
228  kmp_int32 reserved_3;
229  char const *psource;
233 } ident_t;
238 // Some forward declarations.
239 typedef union kmp_team kmp_team_t;
240 typedef struct kmp_taskdata kmp_taskdata_t;
241 typedef union kmp_task_team kmp_task_team_t;
242 typedef union kmp_team kmp_team_p;
243 typedef union kmp_info kmp_info_p;
244 typedef union kmp_root kmp_root_p;
245 
246 #ifdef __cplusplus
247 extern "C" {
248 #endif
249 
250 /* ------------------------------------------------------------------------ */
251 
252 /* Pack two 32-bit signed integers into a 64-bit signed integer */
253 /* ToDo: Fix word ordering for big-endian machines. */
254 #define KMP_PACK_64(HIGH_32, LOW_32) \
255  ((kmp_int64)((((kmp_uint64)(HIGH_32)) << 32) | (kmp_uint64)(LOW_32)))
256 
257 // Generic string manipulation macros. Assume that _x is of type char *
258 #define SKIP_WS(_x) \
259  { \
260  while (*(_x) == ' ' || *(_x) == '\t') \
261  (_x)++; \
262  }
263 #define SKIP_DIGITS(_x) \
264  { \
265  while (*(_x) >= '0' && *(_x) <= '9') \
266  (_x)++; \
267  }
268 #define SKIP_TOKEN(_x) \
269  { \
270  while ((*(_x) >= '0' && *(_x) <= '9') || (*(_x) >= 'a' && *(_x) <= 'z') || \
271  (*(_x) >= 'A' && *(_x) <= 'Z') || *(_x) == '_') \
272  (_x)++; \
273  }
274 #define SKIP_TO(_x, _c) \
275  { \
276  while (*(_x) != '\0' && *(_x) != (_c)) \
277  (_x)++; \
278  }
279 
280 /* ------------------------------------------------------------------------ */
281 
282 #define KMP_MAX(x, y) ((x) > (y) ? (x) : (y))
283 #define KMP_MIN(x, y) ((x) < (y) ? (x) : (y))
284 
285 /* ------------------------------------------------------------------------ */
286 /* Enumeration types */
287 
288 enum kmp_state_timer {
289  ts_stop,
290  ts_start,
291  ts_pause,
292 
293  ts_last_state
294 };
295 
296 enum dynamic_mode {
297  dynamic_default,
298 #ifdef USE_LOAD_BALANCE
299  dynamic_load_balance,
300 #endif /* USE_LOAD_BALANCE */
301  dynamic_random,
302  dynamic_thread_limit,
303  dynamic_max
304 };
305 
306 /* external schedule constants, duplicate enum omp_sched in omp.h in order to
307  * not include it here */
308 #ifndef KMP_SCHED_TYPE_DEFINED
309 #define KMP_SCHED_TYPE_DEFINED
310 typedef enum kmp_sched {
311  kmp_sched_lower = 0, // lower and upper bounds are for routine parameter check
312  // Note: need to adjust __kmp_sch_map global array in case enum is changed
313  kmp_sched_static = 1, // mapped to kmp_sch_static_chunked (33)
314  kmp_sched_dynamic = 2, // mapped to kmp_sch_dynamic_chunked (35)
315  kmp_sched_guided = 3, // mapped to kmp_sch_guided_chunked (36)
316  kmp_sched_auto = 4, // mapped to kmp_sch_auto (38)
317  kmp_sched_upper_std = 5, // upper bound for standard schedules
318  kmp_sched_lower_ext = 100, // lower bound of Intel extension schedules
319  kmp_sched_trapezoidal = 101, // mapped to kmp_sch_trapezoidal (39)
320 #if KMP_STATIC_STEAL_ENABLED
321  kmp_sched_static_steal = 102, // mapped to kmp_sch_static_steal (44)
322 #endif
323  kmp_sched_upper,
324  kmp_sched_default = kmp_sched_static // default scheduling
325 } kmp_sched_t;
326 #endif
327 
332 enum sched_type : kmp_int32 {
334  kmp_sch_static_chunked = 33,
336  kmp_sch_dynamic_chunked = 35,
338  kmp_sch_runtime = 37,
340  kmp_sch_trapezoidal = 39,
341 
342  /* accessible only through KMP_SCHEDULE environment variable */
343  kmp_sch_static_greedy = 40,
344  kmp_sch_static_balanced = 41,
345  /* accessible only through KMP_SCHEDULE environment variable */
346  kmp_sch_guided_iterative_chunked = 42,
347  kmp_sch_guided_analytical_chunked = 43,
348  /* accessible only through KMP_SCHEDULE environment variable */
349  kmp_sch_static_steal = 44,
350 
351 #if OMP_45_ENABLED
352  /* static with chunk adjustment (e.g., simd) */
353  kmp_sch_static_balanced_chunked = 45,
354  kmp_sch_guided_simd = 46,
355  kmp_sch_runtime_simd = 47,
356 #endif
357 
358  /* accessible only through KMP_SCHEDULE environment variable */
362  kmp_ord_static_chunked = 65,
364  kmp_ord_dynamic_chunked = 67,
365  kmp_ord_guided_chunked = 68,
366  kmp_ord_runtime = 69,
368  kmp_ord_trapezoidal = 71,
371 #if OMP_40_ENABLED
372  /* Schedules for Distribute construct */
375 #endif
376 
377  /* For the "nomerge" versions, kmp_dispatch_next*() will always return a
378  single iteration/chunk, even if the loop is serialized. For the schedule
379  types listed above, the entire iteration vector is returned if the loop is
380  serialized. This doesn't work for gcc/gcomp sections. */
381  kmp_nm_lower = 160,
383  kmp_nm_static_chunked =
384  (kmp_sch_static_chunked - kmp_sch_lower + kmp_nm_lower),
386  kmp_nm_dynamic_chunked = 163,
388  kmp_nm_runtime = 165,
389  kmp_nm_auto = 166,
390  kmp_nm_trapezoidal = 167,
391 
392  /* accessible only through KMP_SCHEDULE environment variable */
393  kmp_nm_static_greedy = 168,
394  kmp_nm_static_balanced = 169,
395  /* accessible only through KMP_SCHEDULE environment variable */
396  kmp_nm_guided_iterative_chunked = 170,
397  kmp_nm_guided_analytical_chunked = 171,
398  kmp_nm_static_steal =
399  172, /* accessible only through OMP_SCHEDULE environment variable */
400 
401  kmp_nm_ord_static_chunked = 193,
403  kmp_nm_ord_dynamic_chunked = 195,
404  kmp_nm_ord_guided_chunked = 196,
405  kmp_nm_ord_runtime = 197,
407  kmp_nm_ord_trapezoidal = 199,
410 #if OMP_45_ENABLED
411  /* Support for OpenMP 4.5 monotonic and nonmonotonic schedule modifiers. Since
412  we need to distinguish the three possible cases (no modifier, monotonic
413  modifier, nonmonotonic modifier), we need separate bits for each modifier.
414  The absence of monotonic does not imply nonmonotonic, especially since 4.5
415  says that the behaviour of the "no modifier" case is implementation defined
416  in 4.5, but will become "nonmonotonic" in 5.0.
417 
418  Since we're passing a full 32 bit value, we can use a couple of high bits
419  for these flags; out of paranoia we avoid the sign bit.
420 
421  These modifiers can be or-ed into non-static schedules by the compiler to
422  pass the additional information. They will be stripped early in the
423  processing in __kmp_dispatch_init when setting up schedules, so most of the
424  code won't ever see schedules with these bits set. */
425  kmp_sch_modifier_monotonic =
426  (1 << 29),
427  kmp_sch_modifier_nonmonotonic =
428  (1 << 30),
430 #define SCHEDULE_WITHOUT_MODIFIERS(s) \
431  (enum sched_type)( \
432  (s) & ~(kmp_sch_modifier_nonmonotonic | kmp_sch_modifier_monotonic))
433 #define SCHEDULE_HAS_MONOTONIC(s) (((s)&kmp_sch_modifier_monotonic) != 0)
434 #define SCHEDULE_HAS_NONMONOTONIC(s) (((s)&kmp_sch_modifier_nonmonotonic) != 0)
435 #define SCHEDULE_HAS_NO_MODIFIERS(s) \
436  (((s) & (kmp_sch_modifier_nonmonotonic | kmp_sch_modifier_monotonic)) == 0)
437 #else
438 /* By doing this we hope to avoid multiple tests on OMP_45_ENABLED. Compilers
439  can now eliminate tests on compile time constants and dead code that results
440  from them, so we can leave code guarded by such an if in place. */
441 #define SCHEDULE_WITHOUT_MODIFIERS(s) (s)
442 #define SCHEDULE_HAS_MONOTONIC(s) false
443 #define SCHEDULE_HAS_NONMONOTONIC(s) false
444 #define SCHEDULE_HAS_NO_MODIFIERS(s) true
445 #endif
446 
448 };
449 
450 /* Type to keep runtime schedule set via OMP_SCHEDULE or omp_set_schedule() */
451 typedef union kmp_r_sched {
452  struct {
453  enum sched_type r_sched_type;
454  int chunk;
455  };
456  kmp_int64 sched;
457 } kmp_r_sched_t;
458 
459 extern enum sched_type __kmp_sch_map[]; // map OMP 3.0 schedule types with our
460 // internal schedule types
461 
462 enum library_type {
463  library_none,
464  library_serial,
465  library_turnaround,
466  library_throughput
467 };
468 
469 #if KMP_OS_LINUX
470 enum clock_function_type {
471  clock_function_gettimeofday,
472  clock_function_clock_gettime
473 };
474 #endif /* KMP_OS_LINUX */
475 
476 #if KMP_MIC_SUPPORTED
477 enum mic_type { non_mic, mic1, mic2, mic3, dummy };
478 #endif
479 
480 /* -- fast reduction stuff ------------------------------------------------ */
481 
482 #undef KMP_FAST_REDUCTION_BARRIER
483 #define KMP_FAST_REDUCTION_BARRIER 1
484 
485 #undef KMP_FAST_REDUCTION_CORE_DUO
486 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
487 #define KMP_FAST_REDUCTION_CORE_DUO 1
488 #endif
489 
490 enum _reduction_method {
491  reduction_method_not_defined = 0,
492  critical_reduce_block = (1 << 8),
493  atomic_reduce_block = (2 << 8),
494  tree_reduce_block = (3 << 8),
495  empty_reduce_block = (4 << 8)
496 };
497 
498 // Description of the packed_reduction_method variable:
499 // The packed_reduction_method variable consists of two enum types variables
500 // that are packed together into 0-th byte and 1-st byte:
501 // 0: (packed_reduction_method & 0x000000FF) is a 'enum barrier_type' value of
502 // barrier that will be used in fast reduction: bs_plain_barrier or
503 // bs_reduction_barrier
504 // 1: (packed_reduction_method & 0x0000FF00) is a reduction method that will
505 // be used in fast reduction;
506 // Reduction method is of 'enum _reduction_method' type and it's defined the way
507 // so that the bits of 0-th byte are empty, so no need to execute a shift
508 // instruction while packing/unpacking
509 
510 #if KMP_FAST_REDUCTION_BARRIER
511 #define PACK_REDUCTION_METHOD_AND_BARRIER(reduction_method, barrier_type) \
512  ((reduction_method) | (barrier_type))
513 
514 #define UNPACK_REDUCTION_METHOD(packed_reduction_method) \
515  ((enum _reduction_method)((packed_reduction_method) & (0x0000FF00)))
516 
517 #define UNPACK_REDUCTION_BARRIER(packed_reduction_method) \
518  ((enum barrier_type)((packed_reduction_method) & (0x000000FF)))
519 #else
520 #define PACK_REDUCTION_METHOD_AND_BARRIER(reduction_method, barrier_type) \
521  (reduction_method)
522 
523 #define UNPACK_REDUCTION_METHOD(packed_reduction_method) \
524  (packed_reduction_method)
525 
526 #define UNPACK_REDUCTION_BARRIER(packed_reduction_method) (bs_plain_barrier)
527 #endif
528 
529 #define TEST_REDUCTION_METHOD(packed_reduction_method, which_reduction_block) \
530  ((UNPACK_REDUCTION_METHOD(packed_reduction_method)) == \
531  (which_reduction_block))
532 
533 #if KMP_FAST_REDUCTION_BARRIER
534 #define TREE_REDUCE_BLOCK_WITH_REDUCTION_BARRIER \
535  (PACK_REDUCTION_METHOD_AND_BARRIER(tree_reduce_block, bs_reduction_barrier))
536 
537 #define TREE_REDUCE_BLOCK_WITH_PLAIN_BARRIER \
538  (PACK_REDUCTION_METHOD_AND_BARRIER(tree_reduce_block, bs_plain_barrier))
539 #endif
540 
541 typedef int PACKED_REDUCTION_METHOD_T;
542 
543 /* -- end of fast reduction stuff ----------------------------------------- */
544 
545 #if KMP_OS_WINDOWS
546 #define USE_CBLKDATA
547 #pragma warning(push)
548 #pragma warning(disable : 271 310)
549 #include <windows.h>
550 #pragma warning(pop)
551 #endif
552 
553 #if KMP_OS_UNIX
554 #include <dlfcn.h>
555 #include <pthread.h>
556 #endif
557 
558 /* Only Linux* OS and Windows* OS support thread affinity. */
559 #if KMP_AFFINITY_SUPPORTED
560 
561 // GROUP_AFFINITY is already defined for _MSC_VER>=1600 (VS2010 and later).
562 #if KMP_OS_WINDOWS
563 #if _MSC_VER < 1600
564 typedef struct GROUP_AFFINITY {
565  KAFFINITY Mask;
566  WORD Group;
567  WORD Reserved[3];
568 } GROUP_AFFINITY;
569 #endif /* _MSC_VER < 1600 */
570 #if KMP_GROUP_AFFINITY
571 extern int __kmp_num_proc_groups;
572 #else
573 static const int __kmp_num_proc_groups = 1;
574 #endif /* KMP_GROUP_AFFINITY */
575 typedef DWORD (*kmp_GetActiveProcessorCount_t)(WORD);
576 extern kmp_GetActiveProcessorCount_t __kmp_GetActiveProcessorCount;
577 
578 typedef WORD (*kmp_GetActiveProcessorGroupCount_t)(void);
579 extern kmp_GetActiveProcessorGroupCount_t __kmp_GetActiveProcessorGroupCount;
580 
581 typedef BOOL (*kmp_GetThreadGroupAffinity_t)(HANDLE, GROUP_AFFINITY *);
582 extern kmp_GetThreadGroupAffinity_t __kmp_GetThreadGroupAffinity;
583 
584 typedef BOOL (*kmp_SetThreadGroupAffinity_t)(HANDLE, const GROUP_AFFINITY *,
585  GROUP_AFFINITY *);
586 extern kmp_SetThreadGroupAffinity_t __kmp_SetThreadGroupAffinity;
587 #endif /* KMP_OS_WINDOWS */
588 
589 #if KMP_USE_HWLOC
590 extern hwloc_topology_t __kmp_hwloc_topology;
591 extern int __kmp_hwloc_error;
592 extern int __kmp_numa_detected;
593 extern int __kmp_tile_depth;
594 #endif
595 
596 extern size_t __kmp_affin_mask_size;
597 #define KMP_AFFINITY_CAPABLE() (__kmp_affin_mask_size > 0)
598 #define KMP_AFFINITY_DISABLE() (__kmp_affin_mask_size = 0)
599 #define KMP_AFFINITY_ENABLE(mask_size) (__kmp_affin_mask_size = mask_size)
600 #define KMP_CPU_SET_ITERATE(i, mask) \
601  for (i = (mask)->begin(); (int)i != (mask)->end(); i = (mask)->next(i))
602 #define KMP_CPU_SET(i, mask) (mask)->set(i)
603 #define KMP_CPU_ISSET(i, mask) (mask)->is_set(i)
604 #define KMP_CPU_CLR(i, mask) (mask)->clear(i)
605 #define KMP_CPU_ZERO(mask) (mask)->zero()
606 #define KMP_CPU_COPY(dest, src) (dest)->copy(src)
607 #define KMP_CPU_AND(dest, src) (dest)->bitwise_and(src)
608 #define KMP_CPU_COMPLEMENT(max_bit_number, mask) (mask)->bitwise_not()
609 #define KMP_CPU_UNION(dest, src) (dest)->bitwise_or(src)
610 #define KMP_CPU_ALLOC(ptr) (ptr = __kmp_affinity_dispatch->allocate_mask())
611 #define KMP_CPU_FREE(ptr) __kmp_affinity_dispatch->deallocate_mask(ptr)
612 #define KMP_CPU_ALLOC_ON_STACK(ptr) KMP_CPU_ALLOC(ptr)
613 #define KMP_CPU_FREE_FROM_STACK(ptr) KMP_CPU_FREE(ptr)
614 #define KMP_CPU_INTERNAL_ALLOC(ptr) KMP_CPU_ALLOC(ptr)
615 #define KMP_CPU_INTERNAL_FREE(ptr) KMP_CPU_FREE(ptr)
616 #define KMP_CPU_INDEX(arr, i) __kmp_affinity_dispatch->index_mask_array(arr, i)
617 #define KMP_CPU_ALLOC_ARRAY(arr, n) \
618  (arr = __kmp_affinity_dispatch->allocate_mask_array(n))
619 #define KMP_CPU_FREE_ARRAY(arr, n) \
620  __kmp_affinity_dispatch->deallocate_mask_array(arr)
621 #define KMP_CPU_INTERNAL_ALLOC_ARRAY(arr, n) KMP_CPU_ALLOC_ARRAY(arr, n)
622 #define KMP_CPU_INTERNAL_FREE_ARRAY(arr, n) KMP_CPU_FREE_ARRAY(arr, n)
623 #define __kmp_get_system_affinity(mask, abort_bool) \
624  (mask)->get_system_affinity(abort_bool)
625 #define __kmp_set_system_affinity(mask, abort_bool) \
626  (mask)->set_system_affinity(abort_bool)
627 #define __kmp_get_proc_group(mask) (mask)->get_proc_group()
628 
629 class KMPAffinity {
630 public:
631  class Mask {
632  public:
633  void *operator new(size_t n);
634  void operator delete(void *p);
635  void *operator new[](size_t n);
636  void operator delete[](void *p);
637  virtual ~Mask() {}
638  // Set bit i to 1
639  virtual void set(int i) {}
640  // Return bit i
641  virtual bool is_set(int i) const { return false; }
642  // Set bit i to 0
643  virtual void clear(int i) {}
644  // Zero out entire mask
645  virtual void zero() {}
646  // Copy src into this mask
647  virtual void copy(const Mask *src) {}
648  // this &= rhs
649  virtual void bitwise_and(const Mask *rhs) {}
650  // this |= rhs
651  virtual void bitwise_or(const Mask *rhs) {}
652  // this = ~this
653  virtual void bitwise_not() {}
654  // API for iterating over an affinity mask
655  // for (int i = mask->begin(); i != mask->end(); i = mask->next(i))
656  virtual int begin() const { return 0; }
657  virtual int end() const { return 0; }
658  virtual int next(int previous) const { return 0; }
659  // Set the system's affinity to this affinity mask's value
660  virtual int set_system_affinity(bool abort_on_error) const { return -1; }
661  // Set this affinity mask to the current system affinity
662  virtual int get_system_affinity(bool abort_on_error) { return -1; }
663  // Only 1 DWORD in the mask should have any procs set.
664  // Return the appropriate index, or -1 for an invalid mask.
665  virtual int get_proc_group() const { return -1; }
666  };
667  void *operator new(size_t n);
668  void operator delete(void *p);
669  // Need virtual destructor
670  virtual ~KMPAffinity() = default;
671  // Determine if affinity is capable
672  virtual void determine_capable(const char *env_var) {}
673  // Bind the current thread to os proc
674  virtual void bind_thread(int proc) {}
675  // Factory functions to allocate/deallocate a mask
676  virtual Mask *allocate_mask() { return nullptr; }
677  virtual void deallocate_mask(Mask *m) {}
678  virtual Mask *allocate_mask_array(int num) { return nullptr; }
679  virtual void deallocate_mask_array(Mask *m) {}
680  virtual Mask *index_mask_array(Mask *m, int index) { return nullptr; }
681  static void pick_api();
682  static void destroy_api();
683  enum api_type {
684  NATIVE_OS
685 #if KMP_USE_HWLOC
686  ,
687  HWLOC
688 #endif
689  };
690  virtual api_type get_api_type() const {
691  KMP_ASSERT(0);
692  return NATIVE_OS;
693  }
694 
695 private:
696  static bool picked_api;
697 };
698 
699 typedef KMPAffinity::Mask kmp_affin_mask_t;
700 extern KMPAffinity *__kmp_affinity_dispatch;
701 
702 // Declare local char buffers with this size for printing debug and info
703 // messages, using __kmp_affinity_print_mask().
704 #define KMP_AFFIN_MASK_PRINT_LEN 1024
705 
706 enum affinity_type {
707  affinity_none = 0,
708  affinity_physical,
709  affinity_logical,
710  affinity_compact,
711  affinity_scatter,
712  affinity_explicit,
713  affinity_balanced,
714  affinity_disabled, // not used outsize the env var parser
715  affinity_default
716 };
717 
718 enum affinity_gran {
719  affinity_gran_fine = 0,
720  affinity_gran_thread,
721  affinity_gran_core,
722  affinity_gran_tile,
723  affinity_gran_numa,
724  affinity_gran_package,
725  affinity_gran_node,
726 #if KMP_GROUP_AFFINITY
727  // The "group" granularity isn't necesssarily coarser than all of the
728  // other levels, but we put it last in the enum.
729  affinity_gran_group,
730 #endif /* KMP_GROUP_AFFINITY */
731  affinity_gran_default
732 };
733 
734 enum affinity_top_method {
735  affinity_top_method_all = 0, // try all (supported) methods, in order
736 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
737  affinity_top_method_apicid,
738  affinity_top_method_x2apicid,
739 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
740  affinity_top_method_cpuinfo, // KMP_CPUINFO_FILE is usable on Windows* OS, too
741 #if KMP_GROUP_AFFINITY
742  affinity_top_method_group,
743 #endif /* KMP_GROUP_AFFINITY */
744  affinity_top_method_flat,
745 #if KMP_USE_HWLOC
746  affinity_top_method_hwloc,
747 #endif
748  affinity_top_method_default
749 };
750 
751 #define affinity_respect_mask_default (-1)
752 
753 extern enum affinity_type __kmp_affinity_type; /* Affinity type */
754 extern enum affinity_gran __kmp_affinity_gran; /* Affinity granularity */
755 extern int __kmp_affinity_gran_levels; /* corresponding int value */
756 extern int __kmp_affinity_dups; /* Affinity duplicate masks */
757 extern enum affinity_top_method __kmp_affinity_top_method;
758 extern int __kmp_affinity_compact; /* Affinity 'compact' value */
759 extern int __kmp_affinity_offset; /* Affinity offset value */
760 extern int __kmp_affinity_verbose; /* Was verbose specified for KMP_AFFINITY? */
761 extern int __kmp_affinity_warnings; /* KMP_AFFINITY warnings enabled ? */
762 extern int __kmp_affinity_respect_mask; // Respect process' init affinity mask?
763 extern char *__kmp_affinity_proclist; /* proc ID list */
764 extern kmp_affin_mask_t *__kmp_affinity_masks;
765 extern unsigned __kmp_affinity_num_masks;
766 extern void __kmp_affinity_bind_thread(int which);
767 
768 extern kmp_affin_mask_t *__kmp_affin_fullMask;
769 extern char *__kmp_cpuinfo_file;
770 
771 #endif /* KMP_AFFINITY_SUPPORTED */
772 
773 #if OMP_40_ENABLED
774 
775 // This needs to be kept in sync with the values in omp.h !!!
776 typedef enum kmp_proc_bind_t {
777  proc_bind_false = 0,
778  proc_bind_true,
779  proc_bind_master,
780  proc_bind_close,
781  proc_bind_spread,
782  proc_bind_intel, // use KMP_AFFINITY interface
783  proc_bind_default
784 } kmp_proc_bind_t;
785 
786 typedef struct kmp_nested_proc_bind_t {
787  kmp_proc_bind_t *bind_types;
788  int size;
789  int used;
790 } kmp_nested_proc_bind_t;
791 
792 extern kmp_nested_proc_bind_t __kmp_nested_proc_bind;
793 
794 #endif /* OMP_40_ENABLED */
795 
796 #if KMP_AFFINITY_SUPPORTED
797 #define KMP_PLACE_ALL (-1)
798 #define KMP_PLACE_UNDEFINED (-2)
799 // Is KMP_AFFINITY is being used instead of OMP_PROC_BIND/OMP_PLACES?
800 #define KMP_AFFINITY_NON_PROC_BIND \
801  ((__kmp_nested_proc_bind.bind_types[0] == proc_bind_false || \
802  __kmp_nested_proc_bind.bind_types[0] == proc_bind_intel) && \
803  (__kmp_affinity_num_masks > 0 || __kmp_affinity_type == affinity_balanced))
804 #endif /* KMP_AFFINITY_SUPPORTED */
805 
806 extern int __kmp_affinity_num_places;
807 
808 #if OMP_40_ENABLED
809 typedef enum kmp_cancel_kind_t {
810  cancel_noreq = 0,
811  cancel_parallel = 1,
812  cancel_loop = 2,
813  cancel_sections = 3,
814  cancel_taskgroup = 4
815 } kmp_cancel_kind_t;
816 #endif // OMP_40_ENABLED
817 
818 // KMP_HW_SUBSET support:
819 typedef struct kmp_hws_item {
820  int num;
821  int offset;
822 } kmp_hws_item_t;
823 
824 extern kmp_hws_item_t __kmp_hws_socket;
825 extern kmp_hws_item_t __kmp_hws_node;
826 extern kmp_hws_item_t __kmp_hws_tile;
827 extern kmp_hws_item_t __kmp_hws_core;
828 extern kmp_hws_item_t __kmp_hws_proc;
829 extern int __kmp_hws_requested;
830 extern int __kmp_hws_abs_flag; // absolute or per-item number requested
831 
832 /* ------------------------------------------------------------------------ */
833 
834 #define KMP_PAD(type, sz) \
835  (sizeof(type) + (sz - ((sizeof(type) - 1) % (sz)) - 1))
836 
837 // We need to avoid using -1 as a GTID as +1 is added to the gtid
838 // when storing it in a lock, and the value 0 is reserved.
839 #define KMP_GTID_DNE (-2) /* Does not exist */
840 #define KMP_GTID_SHUTDOWN (-3) /* Library is shutting down */
841 #define KMP_GTID_MONITOR (-4) /* Monitor thread ID */
842 #define KMP_GTID_UNKNOWN (-5) /* Is not known */
843 #define KMP_GTID_MIN (-6) /* Minimal gtid for low bound check in DEBUG */
844 
845 #if OMP_50_ENABLED
846 /* OpenMP 5.0 Memory Management support */
847 extern int __kmp_memkind_available;
848 extern int __kmp_hbw_mem_available;
849 typedef void *omp_allocator_t;
850 extern const omp_allocator_t *OMP_NULL_ALLOCATOR;
851 extern const omp_allocator_t *omp_default_mem_alloc;
852 extern const omp_allocator_t *omp_large_cap_mem_alloc;
853 extern const omp_allocator_t *omp_const_mem_alloc;
854 extern const omp_allocator_t *omp_high_bw_mem_alloc;
855 extern const omp_allocator_t *omp_low_lat_mem_alloc;
856 extern const omp_allocator_t *omp_cgroup_mem_alloc;
857 extern const omp_allocator_t *omp_pteam_mem_alloc;
858 extern const omp_allocator_t *omp_thread_mem_alloc;
859 extern const omp_allocator_t *__kmp_def_allocator;
860 
861 extern void __kmpc_set_default_allocator(int gtid, const omp_allocator_t *al);
862 extern const omp_allocator_t *__kmpc_get_default_allocator(int gtid);
863 extern void *__kmpc_alloc(int gtid, size_t sz, const omp_allocator_t *al);
864 extern void __kmpc_free(int gtid, void *ptr, const omp_allocator_t *al);
865 
866 extern void __kmp_init_memkind();
867 extern void __kmp_fini_memkind();
868 #endif // OMP_50_ENABLED
869 
870 /* ------------------------------------------------------------------------ */
871 
872 #define KMP_UINT64_MAX \
873  (~((kmp_uint64)1 << ((sizeof(kmp_uint64) * (1 << 3)) - 1)))
874 
875 #define KMP_MIN_NTH 1
876 
877 #ifndef KMP_MAX_NTH
878 #if defined(PTHREAD_THREADS_MAX) && PTHREAD_THREADS_MAX < INT_MAX
879 #define KMP_MAX_NTH PTHREAD_THREADS_MAX
880 #else
881 #define KMP_MAX_NTH INT_MAX
882 #endif
883 #endif /* KMP_MAX_NTH */
884 
885 #ifdef PTHREAD_STACK_MIN
886 #define KMP_MIN_STKSIZE PTHREAD_STACK_MIN
887 #else
888 #define KMP_MIN_STKSIZE ((size_t)(32 * 1024))
889 #endif
890 
891 #define KMP_MAX_STKSIZE (~((size_t)1 << ((sizeof(size_t) * (1 << 3)) - 1)))
892 
893 #if KMP_ARCH_X86
894 #define KMP_DEFAULT_STKSIZE ((size_t)(2 * 1024 * 1024))
895 #elif KMP_ARCH_X86_64
896 #define KMP_DEFAULT_STKSIZE ((size_t)(4 * 1024 * 1024))
897 #define KMP_BACKUP_STKSIZE ((size_t)(2 * 1024 * 1024))
898 #else
899 #define KMP_DEFAULT_STKSIZE ((size_t)(1024 * 1024))
900 #endif
901 
902 #define KMP_DEFAULT_MALLOC_POOL_INCR ((size_t)(1024 * 1024))
903 #define KMP_MIN_MALLOC_POOL_INCR ((size_t)(4 * 1024))
904 #define KMP_MAX_MALLOC_POOL_INCR \
905  (~((size_t)1 << ((sizeof(size_t) * (1 << 3)) - 1)))
906 
907 #define KMP_MIN_STKOFFSET (0)
908 #define KMP_MAX_STKOFFSET KMP_MAX_STKSIZE
909 #if KMP_OS_DARWIN
910 #define KMP_DEFAULT_STKOFFSET KMP_MIN_STKOFFSET
911 #else
912 #define KMP_DEFAULT_STKOFFSET CACHE_LINE
913 #endif
914 
915 #define KMP_MIN_STKPADDING (0)
916 #define KMP_MAX_STKPADDING (2 * 1024 * 1024)
917 
918 #define KMP_BLOCKTIME_MULTIPLIER \
919  (1000) /* number of blocktime units per second */
920 #define KMP_MIN_BLOCKTIME (0)
921 #define KMP_MAX_BLOCKTIME \
922  (INT_MAX) /* Must be this for "infinite" setting the work */
923 #define KMP_DEFAULT_BLOCKTIME (200) /* __kmp_blocktime is in milliseconds */
924 
925 #if KMP_USE_MONITOR
926 #define KMP_DEFAULT_MONITOR_STKSIZE ((size_t)(64 * 1024))
927 #define KMP_MIN_MONITOR_WAKEUPS (1) // min times monitor wakes up per second
928 #define KMP_MAX_MONITOR_WAKEUPS (1000) // max times monitor can wake up per sec
929 
930 /* Calculate new number of monitor wakeups for a specific block time based on
931  previous monitor_wakeups. Only allow increasing number of wakeups */
932 #define KMP_WAKEUPS_FROM_BLOCKTIME(blocktime, monitor_wakeups) \
933  (((blocktime) == KMP_MAX_BLOCKTIME) \
934  ? (monitor_wakeups) \
935  : ((blocktime) == KMP_MIN_BLOCKTIME) \
936  ? KMP_MAX_MONITOR_WAKEUPS \
937  : ((monitor_wakeups) > (KMP_BLOCKTIME_MULTIPLIER / (blocktime))) \
938  ? (monitor_wakeups) \
939  : (KMP_BLOCKTIME_MULTIPLIER) / (blocktime))
940 
941 /* Calculate number of intervals for a specific block time based on
942  monitor_wakeups */
943 #define KMP_INTERVALS_FROM_BLOCKTIME(blocktime, monitor_wakeups) \
944  (((blocktime) + (KMP_BLOCKTIME_MULTIPLIER / (monitor_wakeups)) - 1) / \
945  (KMP_BLOCKTIME_MULTIPLIER / (monitor_wakeups)))
946 #else
947 #define KMP_BLOCKTIME(team, tid) \
948  (get__bt_set(team, tid) ? get__blocktime(team, tid) : __kmp_dflt_blocktime)
949 #if KMP_OS_UNIX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
950 // HW TSC is used to reduce overhead (clock tick instead of nanosecond).
951 extern kmp_uint64 __kmp_ticks_per_msec;
952 #if KMP_COMPILER_ICC
953 #define KMP_NOW() ((kmp_uint64)_rdtsc())
954 #else
955 #define KMP_NOW() __kmp_hardware_timestamp()
956 #endif
957 #define KMP_NOW_MSEC() (KMP_NOW() / __kmp_ticks_per_msec)
958 #define KMP_BLOCKTIME_INTERVAL(team, tid) \
959  (KMP_BLOCKTIME(team, tid) * __kmp_ticks_per_msec)
960 #define KMP_BLOCKING(goal, count) ((goal) > KMP_NOW())
961 #else
962 // System time is retrieved sporadically while blocking.
963 extern kmp_uint64 __kmp_now_nsec();
964 #define KMP_NOW() __kmp_now_nsec()
965 #define KMP_NOW_MSEC() (KMP_NOW() / KMP_USEC_PER_SEC)
966 #define KMP_BLOCKTIME_INTERVAL(team, tid) \
967  (KMP_BLOCKTIME(team, tid) * KMP_USEC_PER_SEC)
968 #define KMP_BLOCKING(goal, count) ((count) % 1000 != 0 || (goal) > KMP_NOW())
969 #endif
970 #define KMP_YIELD_NOW() \
971  (KMP_NOW_MSEC() / KMP_MAX(__kmp_dflt_blocktime, 1) % \
972  (__kmp_yield_on_count + __kmp_yield_off_count) < \
973  (kmp_uint32)__kmp_yield_on_count)
974 #endif // KMP_USE_MONITOR
975 
976 #define KMP_MIN_STATSCOLS 40
977 #define KMP_MAX_STATSCOLS 4096
978 #define KMP_DEFAULT_STATSCOLS 80
979 
980 #define KMP_MIN_INTERVAL 0
981 #define KMP_MAX_INTERVAL (INT_MAX - 1)
982 #define KMP_DEFAULT_INTERVAL 0
983 
984 #define KMP_MIN_CHUNK 1
985 #define KMP_MAX_CHUNK (INT_MAX - 1)
986 #define KMP_DEFAULT_CHUNK 1
987 
988 #define KMP_MIN_INIT_WAIT 1
989 #define KMP_MAX_INIT_WAIT (INT_MAX / 2)
990 #define KMP_DEFAULT_INIT_WAIT 2048U
991 
992 #define KMP_MIN_NEXT_WAIT 1
993 #define KMP_MAX_NEXT_WAIT (INT_MAX / 2)
994 #define KMP_DEFAULT_NEXT_WAIT 1024U
995 
996 #define KMP_DFLT_DISP_NUM_BUFF 7
997 #define KMP_MAX_ORDERED 8
998 
999 #define KMP_MAX_FIELDS 32
1000 
1001 #define KMP_MAX_BRANCH_BITS 31
1002 
1003 #define KMP_MAX_ACTIVE_LEVELS_LIMIT INT_MAX
1004 
1005 #define KMP_MAX_DEFAULT_DEVICE_LIMIT INT_MAX
1006 
1007 #define KMP_MAX_TASK_PRIORITY_LIMIT INT_MAX
1008 
1009 /* Minimum number of threads before switch to TLS gtid (experimentally
1010  determined) */
1011 /* josh TODO: what about OS X* tuning? */
1012 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1013 #define KMP_TLS_GTID_MIN 5
1014 #else
1015 #define KMP_TLS_GTID_MIN INT_MAX
1016 #endif
1017 
1018 #define KMP_MASTER_TID(tid) ((tid) == 0)
1019 #define KMP_WORKER_TID(tid) ((tid) != 0)
1020 
1021 #define KMP_MASTER_GTID(gtid) (__kmp_tid_from_gtid((gtid)) == 0)
1022 #define KMP_WORKER_GTID(gtid) (__kmp_tid_from_gtid((gtid)) != 0)
1023 #define KMP_INITIAL_GTID(gtid) ((gtid) == 0)
1024 
1025 #ifndef TRUE
1026 #define FALSE 0
1027 #define TRUE (!FALSE)
1028 #endif
1029 
1030 /* NOTE: all of the following constants must be even */
1031 
1032 #if KMP_OS_WINDOWS
1033 #define KMP_INIT_WAIT 64U /* initial number of spin-tests */
1034 #define KMP_NEXT_WAIT 32U /* susequent number of spin-tests */
1035 #elif KMP_OS_CNK
1036 #define KMP_INIT_WAIT 16U /* initial number of spin-tests */
1037 #define KMP_NEXT_WAIT 8U /* susequent number of spin-tests */
1038 #elif KMP_OS_LINUX
1039 #define KMP_INIT_WAIT 1024U /* initial number of spin-tests */
1040 #define KMP_NEXT_WAIT 512U /* susequent number of spin-tests */
1041 #elif KMP_OS_DARWIN
1042 /* TODO: tune for KMP_OS_DARWIN */
1043 #define KMP_INIT_WAIT 1024U /* initial number of spin-tests */
1044 #define KMP_NEXT_WAIT 512U /* susequent number of spin-tests */
1045 #elif KMP_OS_FREEBSD
1046 /* TODO: tune for KMP_OS_FREEBSD */
1047 #define KMP_INIT_WAIT 1024U /* initial number of spin-tests */
1048 #define KMP_NEXT_WAIT 512U /* susequent number of spin-tests */
1049 #elif KMP_OS_NETBSD
1050 /* TODO: tune for KMP_OS_NETBSD */
1051 #define KMP_INIT_WAIT 1024U /* initial number of spin-tests */
1052 #define KMP_NEXT_WAIT 512U /* susequent number of spin-tests */
1053 #endif
1054 
1055 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1056 typedef struct kmp_cpuid {
1057  kmp_uint32 eax;
1058  kmp_uint32 ebx;
1059  kmp_uint32 ecx;
1060  kmp_uint32 edx;
1061 } kmp_cpuid_t;
1062 extern void __kmp_x86_cpuid(int mode, int mode2, struct kmp_cpuid *p);
1063 #if KMP_ARCH_X86
1064 extern void __kmp_x86_pause(void);
1065 #elif KMP_MIC
1066 // Performance testing on KNC (C0QS-7120 P/A/X/D, 61-core, 16 GB Memory) showed
1067 // regression after removal of extra PAUSE from KMP_YIELD_SPIN(). Changing
1068 // the delay from 100 to 300 showed even better performance than double PAUSE
1069 // on Spec OMP2001 and LCPC tasking tests, no regressions on EPCC.
1070 static inline void __kmp_x86_pause(void) { _mm_delay_32(300); }
1071 #else
1072 static inline void __kmp_x86_pause(void) { _mm_pause(); }
1073 #endif
1074 #define KMP_CPU_PAUSE() __kmp_x86_pause()
1075 #elif KMP_ARCH_PPC64
1076 #define KMP_PPC64_PRI_LOW() __asm__ volatile("or 1, 1, 1")
1077 #define KMP_PPC64_PRI_MED() __asm__ volatile("or 2, 2, 2")
1078 #define KMP_PPC64_PRI_LOC_MB() __asm__ volatile("" : : : "memory")
1079 #define KMP_CPU_PAUSE() \
1080  do { \
1081  KMP_PPC64_PRI_LOW(); \
1082  KMP_PPC64_PRI_MED(); \
1083  KMP_PPC64_PRI_LOC_MB(); \
1084  } while (0)
1085 #else
1086 #define KMP_CPU_PAUSE() /* nothing to do */
1087 #endif
1088 
1089 #define KMP_INIT_YIELD(count) \
1090  { (count) = __kmp_yield_init; }
1091 
1092 #define KMP_YIELD(cond) \
1093  { \
1094  KMP_CPU_PAUSE(); \
1095  __kmp_yield((cond)); \
1096  }
1097 
1098 // Note the decrement of 2 in the following Macros. With KMP_LIBRARY=turnaround,
1099 // there should be no yielding since initial value from KMP_INIT_YIELD() is odd.
1100 
1101 #define KMP_YIELD_WHEN(cond, count) \
1102  { \
1103  KMP_CPU_PAUSE(); \
1104  (count) -= 2; \
1105  if (!(count)) { \
1106  __kmp_yield(cond); \
1107  (count) = __kmp_yield_next; \
1108  } \
1109  }
1110 #define KMP_YIELD_SPIN(count) \
1111  { \
1112  KMP_CPU_PAUSE(); \
1113  (count) -= 2; \
1114  if (!(count)) { \
1115  __kmp_yield(1); \
1116  (count) = __kmp_yield_next; \
1117  } \
1118  }
1119 
1120 /* ------------------------------------------------------------------------ */
1121 /* Support datatypes for the orphaned construct nesting checks. */
1122 /* ------------------------------------------------------------------------ */
1123 
1124 enum cons_type {
1125  ct_none,
1126  ct_parallel,
1127  ct_pdo,
1128  ct_pdo_ordered,
1129  ct_psections,
1130  ct_psingle,
1131 
1132  /* the following must be left in order and not split up */
1133  ct_taskq,
1134  ct_task, // really task inside non-ordered taskq, considered worksharing type
1135  ct_task_ordered, /* really task inside ordered taskq, considered a worksharing
1136  type */
1137  /* the preceding must be left in order and not split up */
1138 
1139  ct_critical,
1140  ct_ordered_in_parallel,
1141  ct_ordered_in_pdo,
1142  ct_ordered_in_taskq,
1143  ct_master,
1144  ct_reduce,
1145  ct_barrier
1146 };
1147 
1148 /* test to see if we are in a taskq construct */
1149 #define IS_CONS_TYPE_TASKQ(ct) \
1150  (((int)(ct)) >= ((int)ct_taskq) && ((int)(ct)) <= ((int)ct_task_ordered))
1151 #define IS_CONS_TYPE_ORDERED(ct) \
1152  ((ct) == ct_pdo_ordered || (ct) == ct_task_ordered)
1153 
1154 struct cons_data {
1155  ident_t const *ident;
1156  enum cons_type type;
1157  int prev;
1158  kmp_user_lock_p
1159  name; /* address exclusively for critical section name comparison */
1160 };
1161 
1162 struct cons_header {
1163  int p_top, w_top, s_top;
1164  int stack_size, stack_top;
1165  struct cons_data *stack_data;
1166 };
1167 
1168 struct kmp_region_info {
1169  char *text;
1170  int offset[KMP_MAX_FIELDS];
1171  int length[KMP_MAX_FIELDS];
1172 };
1173 
1174 /* ---------------------------------------------------------------------- */
1175 /* ---------------------------------------------------------------------- */
1176 
1177 #if KMP_OS_WINDOWS
1178 typedef HANDLE kmp_thread_t;
1179 typedef DWORD kmp_key_t;
1180 #endif /* KMP_OS_WINDOWS */
1181 
1182 #if KMP_OS_UNIX
1183 typedef pthread_t kmp_thread_t;
1184 typedef pthread_key_t kmp_key_t;
1185 #endif
1186 
1187 extern kmp_key_t __kmp_gtid_threadprivate_key;
1188 
1189 typedef struct kmp_sys_info {
1190  long maxrss; /* the maximum resident set size utilized (in kilobytes) */
1191  long minflt; /* the number of page faults serviced without any I/O */
1192  long majflt; /* the number of page faults serviced that required I/O */
1193  long nswap; /* the number of times a process was "swapped" out of memory */
1194  long inblock; /* the number of times the file system had to perform input */
1195  long oublock; /* the number of times the file system had to perform output */
1196  long nvcsw; /* the number of times a context switch was voluntarily */
1197  long nivcsw; /* the number of times a context switch was forced */
1198 } kmp_sys_info_t;
1199 
1200 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1201 typedef struct kmp_cpuinfo {
1202  int initialized; // If 0, other fields are not initialized.
1203  int signature; // CPUID(1).EAX
1204  int family; // CPUID(1).EAX[27:20]+CPUID(1).EAX[11:8] (Extended Family+Family)
1205  int model; // ( CPUID(1).EAX[19:16] << 4 ) + CPUID(1).EAX[7:4] ( ( Extended
1206  // Model << 4 ) + Model)
1207  int stepping; // CPUID(1).EAX[3:0] ( Stepping )
1208  int sse2; // 0 if SSE2 instructions are not supported, 1 otherwise.
1209  int rtm; // 0 if RTM instructions are not supported, 1 otherwise.
1210  int cpu_stackoffset;
1211  int apic_id;
1212  int physical_id;
1213  int logical_id;
1214  kmp_uint64 frequency; // Nominal CPU frequency in Hz.
1215  char name[3 * sizeof(kmp_cpuid_t)]; // CPUID(0x80000002,0x80000003,0x80000004)
1216 } kmp_cpuinfo_t;
1217 #endif
1218 
1219 #if USE_ITT_BUILD
1220 // We cannot include "kmp_itt.h" due to circular dependency. Declare the only
1221 // required type here. Later we will check the type meets requirements.
1222 typedef int kmp_itt_mark_t;
1223 #define KMP_ITT_DEBUG 0
1224 #endif /* USE_ITT_BUILD */
1225 
1226 /* Taskq data structures */
1227 
1228 #define HIGH_WATER_MARK(nslots) (((nslots)*3) / 4)
1229 // num thunks that each thread can simultaneously execute from a task queue
1230 #define __KMP_TASKQ_THUNKS_PER_TH 1
1231 
1232 /* flags for taskq_global_flags, kmp_task_queue_t tq_flags, kmpc_thunk_t
1233  th_flags */
1234 
1235 #define TQF_IS_ORDERED 0x0001 // __kmpc_taskq interface, taskq ordered
1236 // __kmpc_taskq interface, taskq with lastprivate list
1237 #define TQF_IS_LASTPRIVATE 0x0002
1238 #define TQF_IS_NOWAIT 0x0004 // __kmpc_taskq interface, end taskq nowait
1239 // __kmpc_taskq interface, use heuristics to decide task queue size
1240 #define TQF_HEURISTICS 0x0008
1241 
1242 // __kmpc_taskq interface, reserved for future use
1243 #define TQF_INTERFACE_RESERVED1 0x0010
1244 // __kmpc_taskq interface, reserved for future use
1245 #define TQF_INTERFACE_RESERVED2 0x0020
1246 // __kmpc_taskq interface, reserved for future use
1247 #define TQF_INTERFACE_RESERVED3 0x0040
1248 // __kmpc_taskq interface, reserved for future use
1249 #define TQF_INTERFACE_RESERVED4 0x0080
1250 
1251 #define TQF_INTERFACE_FLAGS 0x00ff // all the __kmpc_taskq interface flags
1252 // internal/read by instrumentation; only used with TQF_IS_LASTPRIVATE
1253 #define TQF_IS_LAST_TASK 0x0100
1254 // internal use only; this thunk->th_task is the taskq_task
1255 #define TQF_TASKQ_TASK 0x0200
1256 // internal use only; must release worker threads once ANY queued task
1257 // exists (global)
1258 #define TQF_RELEASE_WORKERS 0x0400
1259 // internal use only; notify workers that master has finished enqueuing tasks
1260 #define TQF_ALL_TASKS_QUEUED 0x0800
1261 // internal use only: this queue encountered in parallel context: not serialized
1262 #define TQF_PARALLEL_CONTEXT 0x1000
1263 // internal use only; this queue is on the freelist and not in use
1264 #define TQF_DEALLOCATED 0x2000
1265 
1266 #define TQF_INTERNAL_FLAGS 0x3f00 // all the internal use only flags
1267 
1268 typedef struct KMP_ALIGN_CACHE kmpc_aligned_int32_t {
1269  kmp_int32 ai_data;
1270 } kmpc_aligned_int32_t;
1271 
1272 typedef struct KMP_ALIGN_CACHE kmpc_aligned_queue_slot_t {
1273  struct kmpc_thunk_t *qs_thunk;
1274 } kmpc_aligned_queue_slot_t;
1275 
1276 typedef struct kmpc_task_queue_t {
1277  /* task queue linkage fields for n-ary tree of queues (locked with global
1278  taskq_tree_lck) */
1279  kmp_lock_t tq_link_lck; /* lock for child link, child next/prev links and
1280  child ref counts */
1281  union {
1282  struct kmpc_task_queue_t *tq_parent; // pointer to parent taskq, not locked
1283  // for taskq internal freelists, locked with global taskq_freelist_lck
1284  struct kmpc_task_queue_t *tq_next_free;
1285  } tq;
1286  // pointer to linked-list of children, locked by tq's tq_link_lck
1287  volatile struct kmpc_task_queue_t *tq_first_child;
1288  // next child in linked-list, locked by parent tq's tq_link_lck
1289  struct kmpc_task_queue_t *tq_next_child;
1290  // previous child in linked-list, locked by parent tq's tq_link_lck
1291  struct kmpc_task_queue_t *tq_prev_child;
1292  // reference count of threads with access to this task queue
1293  volatile kmp_int32 tq_ref_count;
1294  /* (other than the thread executing the kmpc_end_taskq call) */
1295  /* locked by parent tq's tq_link_lck */
1296 
1297  /* shared data for task queue */
1298  /* per-thread array of pointers to shared variable structures */
1299  struct kmpc_aligned_shared_vars_t *tq_shareds;
1300  /* only one array element exists for all but outermost taskq */
1301 
1302  /* bookkeeping for ordered task queue */
1303  kmp_uint32 tq_tasknum_queuing; // ordered task # assigned while queuing tasks
1304  // ordered number of next task to be served (executed)
1305  volatile kmp_uint32 tq_tasknum_serving;
1306 
1307  /* thunk storage management for task queue */
1308  kmp_lock_t tq_free_thunks_lck; /* lock for thunk freelist manipulation */
1309  // thunk freelist, chained via th.th_next_free
1310  struct kmpc_thunk_t *tq_free_thunks;
1311  // space allocated for thunks for this task queue
1312  struct kmpc_thunk_t *tq_thunk_space;
1313 
1314  /* data fields for queue itself */
1315  kmp_lock_t tq_queue_lck; /* lock for [de]enqueue operations: tq_queue,
1316  tq_head, tq_tail, tq_nfull */
1317  /* array of queue slots to hold thunks for tasks */
1318  kmpc_aligned_queue_slot_t *tq_queue;
1319  volatile struct kmpc_thunk_t *tq_taskq_slot; /* special slot for taskq task
1320  thunk, occupied if not NULL */
1321  kmp_int32 tq_nslots; /* # of tq_thunk_space thunks alloc'd (not incl.
1322  tq_taskq_slot space) */
1323  kmp_int32 tq_head; // enqueue puts item here (index into tq_queue array)
1324  kmp_int32 tq_tail; // dequeue takes item from here (index into tq_queue array)
1325  volatile kmp_int32 tq_nfull; // # of occupied entries in task queue right now
1326  kmp_int32 tq_hiwat; /* high-water mark for tq_nfull and queue scheduling */
1327  volatile kmp_int32 tq_flags; /* TQF_xxx */
1328 
1329  /* bookkeeping for outstanding thunks */
1330 
1331  /* per-thread array for # of regular thunks currently being executed */
1332  struct kmpc_aligned_int32_t *tq_th_thunks;
1333  kmp_int32 tq_nproc; /* number of thunks in the th_thunks array */
1334 
1335  /* statistics library bookkeeping */
1336  ident_t *tq_loc; /* source location information for taskq directive */
1337 } kmpc_task_queue_t;
1338 
1339 typedef void (*kmpc_task_t)(kmp_int32 global_tid, struct kmpc_thunk_t *thunk);
1340 
1341 /* sizeof_shareds passed as arg to __kmpc_taskq call */
1342 typedef struct kmpc_shared_vars_t { /* aligned during dynamic allocation */
1343  kmpc_task_queue_t *sv_queue; /* (pointers to) shared vars */
1344 } kmpc_shared_vars_t;
1345 
1346 typedef struct KMP_ALIGN_CACHE kmpc_aligned_shared_vars_t {
1347  volatile struct kmpc_shared_vars_t *ai_data;
1348 } kmpc_aligned_shared_vars_t;
1349 
1350 /* sizeof_thunk passed as arg to kmpc_taskq call */
1351 typedef struct kmpc_thunk_t { /* aligned during dynamic allocation */
1352  union { /* field used for internal freelists too */
1353  kmpc_shared_vars_t *th_shareds;
1354  struct kmpc_thunk_t *th_next_free; /* freelist of individual thunks within
1355  queue, head at tq_free_thunks */
1356  } th;
1357  kmpc_task_t th_task; /* taskq_task if flags & TQF_TASKQ_TASK */
1358  struct kmpc_thunk_t *th_encl_thunk; /* pointer to dynamically enclosing thunk
1359  on this thread's call stack */
1360  // TQF_xxx(tq_flags interface plus possible internal flags)
1361  kmp_int32 th_flags;
1362 
1363  kmp_int32 th_status;
1364  kmp_uint32 th_tasknum; /* task number assigned in order of queuing, used for
1365  ordered sections */
1366  /* private vars */
1367 } kmpc_thunk_t;
1368 
1369 typedef struct KMP_ALIGN_CACHE kmp_taskq {
1370  int tq_curr_thunk_capacity;
1371 
1372  kmpc_task_queue_t *tq_root;
1373  kmp_int32 tq_global_flags;
1374 
1375  kmp_lock_t tq_freelist_lck;
1376  kmpc_task_queue_t *tq_freelist;
1377 
1378  kmpc_thunk_t **tq_curr_thunk;
1379 } kmp_taskq_t;
1380 
1381 /* END Taskq data structures */
1382 
1383 typedef kmp_int32 kmp_critical_name[8];
1384 
1394 typedef void (*kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid, ...);
1395 typedef void (*kmpc_micro_bound)(kmp_int32 *bound_tid, kmp_int32 *bound_nth,
1396  ...);
1397 
1402 /* ---------------------------------------------------------------------------
1403  */
1404 /* Threadprivate initialization/finalization function declarations */
1405 
1406 /* for non-array objects: __kmpc_threadprivate_register() */
1407 
1412 typedef void *(*kmpc_ctor)(void *);
1413 
1418 typedef void (*kmpc_dtor)(
1419  void * /*, size_t */); /* 2nd arg: magic number for KCC unused by Intel
1420  compiler */
1425 typedef void *(*kmpc_cctor)(void *, void *);
1426 
1427 /* for array objects: __kmpc_threadprivate_register_vec() */
1428 /* First arg: "this" pointer */
1429 /* Last arg: number of array elements */
1435 typedef void *(*kmpc_ctor_vec)(void *, size_t);
1441 typedef void (*kmpc_dtor_vec)(void *, size_t);
1447 typedef void *(*kmpc_cctor_vec)(void *, void *,
1448  size_t); /* function unused by compiler */
1449 
1454 /* keeps tracked of threadprivate cache allocations for cleanup later */
1455 typedef struct kmp_cached_addr {
1456  void **addr; /* address of allocated cache */
1457  void ***compiler_cache; /* pointer to compiler's cache */
1458  void *data; /* pointer to global data */
1459  struct kmp_cached_addr *next; /* pointer to next cached address */
1460 } kmp_cached_addr_t;
1461 
1462 struct private_data {
1463  struct private_data *next; /* The next descriptor in the list */
1464  void *data; /* The data buffer for this descriptor */
1465  int more; /* The repeat count for this descriptor */
1466  size_t size; /* The data size for this descriptor */
1467 };
1468 
1469 struct private_common {
1470  struct private_common *next;
1471  struct private_common *link;
1472  void *gbl_addr;
1473  void *par_addr; /* par_addr == gbl_addr for MASTER thread */
1474  size_t cmn_size;
1475 };
1476 
1477 struct shared_common {
1478  struct shared_common *next;
1479  struct private_data *pod_init;
1480  void *obj_init;
1481  void *gbl_addr;
1482  union {
1483  kmpc_ctor ctor;
1484  kmpc_ctor_vec ctorv;
1485  } ct;
1486  union {
1487  kmpc_cctor cctor;
1488  kmpc_cctor_vec cctorv;
1489  } cct;
1490  union {
1491  kmpc_dtor dtor;
1492  kmpc_dtor_vec dtorv;
1493  } dt;
1494  size_t vec_len;
1495  int is_vec;
1496  size_t cmn_size;
1497 };
1498 
1499 #define KMP_HASH_TABLE_LOG2 9 /* log2 of the hash table size */
1500 #define KMP_HASH_TABLE_SIZE \
1501  (1 << KMP_HASH_TABLE_LOG2) /* size of the hash table */
1502 #define KMP_HASH_SHIFT 3 /* throw away this many low bits from the address */
1503 #define KMP_HASH(x) \
1504  ((((kmp_uintptr_t)x) >> KMP_HASH_SHIFT) & (KMP_HASH_TABLE_SIZE - 1))
1505 
1506 struct common_table {
1507  struct private_common *data[KMP_HASH_TABLE_SIZE];
1508 };
1509 
1510 struct shared_table {
1511  struct shared_common *data[KMP_HASH_TABLE_SIZE];
1512 };
1513 
1514 /* ------------------------------------------------------------------------ */
1515 
1516 #if KMP_USE_HIER_SCHED
1517 // Shared barrier data that exists inside a single unit of the scheduling
1518 // hierarchy
1519 typedef struct kmp_hier_private_bdata_t {
1520  kmp_int32 num_active;
1521  kmp_uint64 index;
1522  kmp_uint64 wait_val[2];
1523 } kmp_hier_private_bdata_t;
1524 #endif
1525 
1526 typedef struct kmp_sched_flags {
1527  unsigned ordered : 1;
1528  unsigned nomerge : 1;
1529  unsigned contains_last : 1;
1530 #if KMP_USE_HIER_SCHED
1531  unsigned use_hier : 1;
1532  unsigned unused : 28;
1533 #else
1534  unsigned unused : 29;
1535 #endif
1536 } kmp_sched_flags_t;
1537 
1538 KMP_BUILD_ASSERT(sizeof(kmp_sched_flags_t) == 4);
1539 
1540 #if KMP_STATIC_STEAL_ENABLED
1541 typedef struct KMP_ALIGN_CACHE dispatch_private_info32 {
1542  kmp_int32 count;
1543  kmp_int32 ub;
1544  /* Adding KMP_ALIGN_CACHE here doesn't help / can hurt performance */
1545  kmp_int32 lb;
1546  kmp_int32 st;
1547  kmp_int32 tc;
1548  kmp_int32 static_steal_counter; /* for static_steal only; maybe better to put
1549  after ub */
1550 
1551  // KMP_ALIGN( 16 ) ensures ( if the KMP_ALIGN macro is turned on )
1552  // a) parm3 is properly aligned and
1553  // b) all parm1-4 are in the same cache line.
1554  // Because of parm1-4 are used together, performance seems to be better
1555  // if they are in the same line (not measured though).
1556 
1557  struct KMP_ALIGN(32) { // AC: changed 16 to 32 in order to simplify template
1558  kmp_int32 parm1; // structures in kmp_dispatch.cpp. This should
1559  kmp_int32 parm2; // make no real change at least while padding is off.
1560  kmp_int32 parm3;
1561  kmp_int32 parm4;
1562  };
1563 
1564  kmp_uint32 ordered_lower;
1565  kmp_uint32 ordered_upper;
1566 #if KMP_OS_WINDOWS
1567  // This var can be placed in the hole between 'tc' and 'parm1', instead of
1568  // 'static_steal_counter'. It would be nice to measure execution times.
1569  // Conditional if/endif can be removed at all.
1570  kmp_int32 last_upper;
1571 #endif /* KMP_OS_WINDOWS */
1572 } dispatch_private_info32_t;
1573 
1574 typedef struct KMP_ALIGN_CACHE dispatch_private_info64 {
1575  kmp_int64 count; // current chunk number for static & static-steal scheduling
1576  kmp_int64 ub; /* upper-bound */
1577  /* Adding KMP_ALIGN_CACHE here doesn't help / can hurt performance */
1578  kmp_int64 lb; /* lower-bound */
1579  kmp_int64 st; /* stride */
1580  kmp_int64 tc; /* trip count (number of iterations) */
1581  kmp_int64 static_steal_counter; /* for static_steal only; maybe better to put
1582  after ub */
1583 
1584  /* parm[1-4] are used in different ways by different scheduling algorithms */
1585 
1586  // KMP_ALIGN( 32 ) ensures ( if the KMP_ALIGN macro is turned on )
1587  // a) parm3 is properly aligned and
1588  // b) all parm1-4 are in the same cache line.
1589  // Because of parm1-4 are used together, performance seems to be better
1590  // if they are in the same line (not measured though).
1591 
1592  struct KMP_ALIGN(32) {
1593  kmp_int64 parm1;
1594  kmp_int64 parm2;
1595  kmp_int64 parm3;
1596  kmp_int64 parm4;
1597  };
1598 
1599  kmp_uint64 ordered_lower;
1600  kmp_uint64 ordered_upper;
1601 #if KMP_OS_WINDOWS
1602  // This var can be placed in the hole between 'tc' and 'parm1', instead of
1603  // 'static_steal_counter'. It would be nice to measure execution times.
1604  // Conditional if/endif can be removed at all.
1605  kmp_int64 last_upper;
1606 #endif /* KMP_OS_WINDOWS */
1607 } dispatch_private_info64_t;
1608 #else /* KMP_STATIC_STEAL_ENABLED */
1609 typedef struct KMP_ALIGN_CACHE dispatch_private_info32 {
1610  kmp_int32 lb;
1611  kmp_int32 ub;
1612  kmp_int32 st;
1613  kmp_int32 tc;
1614 
1615  kmp_int32 parm1;
1616  kmp_int32 parm2;
1617  kmp_int32 parm3;
1618  kmp_int32 parm4;
1619 
1620  kmp_int32 count;
1621 
1622  kmp_uint32 ordered_lower;
1623  kmp_uint32 ordered_upper;
1624 #if KMP_OS_WINDOWS
1625  kmp_int32 last_upper;
1626 #endif /* KMP_OS_WINDOWS */
1627 } dispatch_private_info32_t;
1628 
1629 typedef struct KMP_ALIGN_CACHE dispatch_private_info64 {
1630  kmp_int64 lb; /* lower-bound */
1631  kmp_int64 ub; /* upper-bound */
1632  kmp_int64 st; /* stride */
1633  kmp_int64 tc; /* trip count (number of iterations) */
1634 
1635  /* parm[1-4] are used in different ways by different scheduling algorithms */
1636  kmp_int64 parm1;
1637  kmp_int64 parm2;
1638  kmp_int64 parm3;
1639  kmp_int64 parm4;
1640 
1641  kmp_int64 count; /* current chunk number for static scheduling */
1642 
1643  kmp_uint64 ordered_lower;
1644  kmp_uint64 ordered_upper;
1645 #if KMP_OS_WINDOWS
1646  kmp_int64 last_upper;
1647 #endif /* KMP_OS_WINDOWS */
1648 } dispatch_private_info64_t;
1649 #endif /* KMP_STATIC_STEAL_ENABLED */
1650 
1651 typedef struct KMP_ALIGN_CACHE dispatch_private_info {
1652  union private_info {
1653  dispatch_private_info32_t p32;
1654  dispatch_private_info64_t p64;
1655  } u;
1656  enum sched_type schedule; /* scheduling algorithm */
1657  kmp_sched_flags_t flags; /* flags (e.g., ordered, nomerge, etc.) */
1658  kmp_int32 ordered_bumped;
1659  // To retain the structure size after making ordered_iteration scalar
1660  kmp_int32 ordered_dummy[KMP_MAX_ORDERED - 3];
1661  // Stack of buffers for nest of serial regions
1662  struct dispatch_private_info *next;
1663  kmp_int32 type_size; /* the size of types in private_info */
1664 #if KMP_USE_HIER_SCHED
1665  kmp_int32 hier_id;
1666  void *parent; /* hierarchical scheduling parent pointer */
1667 #endif
1668  enum cons_type pushed_ws;
1669 } dispatch_private_info_t;
1670 
1671 typedef struct dispatch_shared_info32 {
1672  /* chunk index under dynamic, number of idle threads under static-steal;
1673  iteration index otherwise */
1674  volatile kmp_uint32 iteration;
1675  volatile kmp_uint32 num_done;
1676  volatile kmp_uint32 ordered_iteration;
1677  // Dummy to retain the structure size after making ordered_iteration scalar
1678  kmp_int32 ordered_dummy[KMP_MAX_ORDERED - 1];
1679 } dispatch_shared_info32_t;
1680 
1681 typedef struct dispatch_shared_info64 {
1682  /* chunk index under dynamic, number of idle threads under static-steal;
1683  iteration index otherwise */
1684  volatile kmp_uint64 iteration;
1685  volatile kmp_uint64 num_done;
1686  volatile kmp_uint64 ordered_iteration;
1687  // Dummy to retain the structure size after making ordered_iteration scalar
1688  kmp_int64 ordered_dummy[KMP_MAX_ORDERED - 3];
1689 } dispatch_shared_info64_t;
1690 
1691 typedef struct dispatch_shared_info {
1692  union shared_info {
1693  dispatch_shared_info32_t s32;
1694  dispatch_shared_info64_t s64;
1695  } u;
1696  volatile kmp_uint32 buffer_index;
1697 #if OMP_45_ENABLED
1698  volatile kmp_int32 doacross_buf_idx; // teamwise index
1699  volatile kmp_uint32 *doacross_flags; // shared array of iteration flags (0/1)
1700  kmp_int32 doacross_num_done; // count finished threads
1701 #endif
1702 #if KMP_USE_HIER_SCHED
1703  void *hier;
1704 #endif
1705 #if KMP_USE_HWLOC
1706  // When linking with libhwloc, the ORDERED EPCC test slows down on big
1707  // machines (> 48 cores). Performance analysis showed that a cache thrash
1708  // was occurring and this padding helps alleviate the problem.
1709  char padding[64];
1710 #endif
1711 } dispatch_shared_info_t;
1712 
1713 typedef struct kmp_disp {
1714  /* Vector for ORDERED SECTION */
1715  void (*th_deo_fcn)(int *gtid, int *cid, ident_t *);
1716  /* Vector for END ORDERED SECTION */
1717  void (*th_dxo_fcn)(int *gtid, int *cid, ident_t *);
1718 
1719  dispatch_shared_info_t *th_dispatch_sh_current;
1720  dispatch_private_info_t *th_dispatch_pr_current;
1721 
1722  dispatch_private_info_t *th_disp_buffer;
1723  kmp_int32 th_disp_index;
1724 #if OMP_45_ENABLED
1725  kmp_int32 th_doacross_buf_idx; // thread's doacross buffer index
1726  volatile kmp_uint32 *th_doacross_flags; // pointer to shared array of flags
1727  union { // we can use union here because doacross cannot be used in
1728  // nonmonotonic loops
1729  kmp_int64 *th_doacross_info; // info on loop bounds
1730  kmp_lock_t *th_steal_lock; // lock used for chunk stealing (8-byte variable)
1731  };
1732 #else
1733 #if KMP_STATIC_STEAL_ENABLED
1734  kmp_lock_t *th_steal_lock; // lock used for chunk stealing (8-byte variable)
1735  void *dummy_padding[1]; // make it 64 bytes on Intel(R) 64
1736 #else
1737  void *dummy_padding[2]; // make it 64 bytes on Intel(R) 64
1738 #endif
1739 #endif
1740 #if KMP_USE_INTERNODE_ALIGNMENT
1741  char more_padding[INTERNODE_CACHE_LINE];
1742 #endif
1743 } kmp_disp_t;
1744 
1745 /* ------------------------------------------------------------------------ */
1746 /* Barrier stuff */
1747 
1748 /* constants for barrier state update */
1749 #define KMP_INIT_BARRIER_STATE 0 /* should probably start from zero */
1750 #define KMP_BARRIER_SLEEP_BIT 0 /* bit used for suspend/sleep part of state */
1751 #define KMP_BARRIER_UNUSED_BIT 1 // bit that must never be set for valid state
1752 #define KMP_BARRIER_BUMP_BIT 2 /* lsb used for bump of go/arrived state */
1753 
1754 #define KMP_BARRIER_SLEEP_STATE (1 << KMP_BARRIER_SLEEP_BIT)
1755 #define KMP_BARRIER_UNUSED_STATE (1 << KMP_BARRIER_UNUSED_BIT)
1756 #define KMP_BARRIER_STATE_BUMP (1 << KMP_BARRIER_BUMP_BIT)
1757 
1758 #if (KMP_BARRIER_SLEEP_BIT >= KMP_BARRIER_BUMP_BIT)
1759 #error "Barrier sleep bit must be smaller than barrier bump bit"
1760 #endif
1761 #if (KMP_BARRIER_UNUSED_BIT >= KMP_BARRIER_BUMP_BIT)
1762 #error "Barrier unused bit must be smaller than barrier bump bit"
1763 #endif
1764 
1765 // Constants for release barrier wait state: currently, hierarchical only
1766 #define KMP_BARRIER_NOT_WAITING 0 // Normal state; worker not in wait_sleep
1767 #define KMP_BARRIER_OWN_FLAG \
1768  1 // Normal state; worker waiting on own b_go flag in release
1769 #define KMP_BARRIER_PARENT_FLAG \
1770  2 // Special state; worker waiting on parent's b_go flag in release
1771 #define KMP_BARRIER_SWITCH_TO_OWN_FLAG \
1772  3 // Special state; tells worker to shift from parent to own b_go
1773 #define KMP_BARRIER_SWITCHING \
1774  4 // Special state; worker resets appropriate flag on wake-up
1775 
1776 #define KMP_NOT_SAFE_TO_REAP \
1777  0 // Thread th_reap_state: not safe to reap (tasking)
1778 #define KMP_SAFE_TO_REAP 1 // Thread th_reap_state: safe to reap (not tasking)
1779 
1780 enum barrier_type {
1781  bs_plain_barrier = 0, /* 0, All non-fork/join barriers (except reduction
1782  barriers if enabled) */
1783  bs_forkjoin_barrier, /* 1, All fork/join (parallel region) barriers */
1784 #if KMP_FAST_REDUCTION_BARRIER
1785  bs_reduction_barrier, /* 2, All barriers that are used in reduction */
1786 #endif // KMP_FAST_REDUCTION_BARRIER
1787  bs_last_barrier /* Just a placeholder to mark the end */
1788 };
1789 
1790 // to work with reduction barriers just like with plain barriers
1791 #if !KMP_FAST_REDUCTION_BARRIER
1792 #define bs_reduction_barrier bs_plain_barrier
1793 #endif // KMP_FAST_REDUCTION_BARRIER
1794 
1795 typedef enum kmp_bar_pat { /* Barrier communication patterns */
1796  bp_linear_bar =
1797  0, /* Single level (degenerate) tree */
1798  bp_tree_bar =
1799  1, /* Balanced tree with branching factor 2^n */
1800  bp_hyper_bar =
1801  2, /* Hypercube-embedded tree with min branching
1802  factor 2^n */
1803  bp_hierarchical_bar = 3, /* Machine hierarchy tree */
1804  bp_last_bar /* Placeholder to mark the end */
1805 } kmp_bar_pat_e;
1806 
1807 #define KMP_BARRIER_ICV_PUSH 1
1808 
1809 /* Record for holding the values of the internal controls stack records */
1810 typedef struct kmp_internal_control {
1811  int serial_nesting_level; /* corresponds to the value of the
1812  th_team_serialized field */
1813  kmp_int8 nested; /* internal control for nested parallelism (per thread) */
1814  kmp_int8 dynamic; /* internal control for dynamic adjustment of threads (per
1815  thread) */
1816  kmp_int8
1817  bt_set; /* internal control for whether blocktime is explicitly set */
1818  int blocktime; /* internal control for blocktime */
1819 #if KMP_USE_MONITOR
1820  int bt_intervals; /* internal control for blocktime intervals */
1821 #endif
1822  int nproc; /* internal control for #threads for next parallel region (per
1823  thread) */
1824  int max_active_levels; /* internal control for max_active_levels */
1825  kmp_r_sched_t
1826  sched; /* internal control for runtime schedule {sched,chunk} pair */
1827 #if OMP_40_ENABLED
1828  kmp_proc_bind_t proc_bind; /* internal control for affinity */
1829  kmp_int32 default_device; /* internal control for default device */
1830 #endif // OMP_40_ENABLED
1831  struct kmp_internal_control *next;
1832 } kmp_internal_control_t;
1833 
1834 static inline void copy_icvs(kmp_internal_control_t *dst,
1835  kmp_internal_control_t *src) {
1836  *dst = *src;
1837 }
1838 
1839 /* Thread barrier needs volatile barrier fields */
1840 typedef struct KMP_ALIGN_CACHE kmp_bstate {
1841  // th_fixed_icvs is aligned by virtue of kmp_bstate being aligned (and all
1842  // uses of it). It is not explicitly aligned below, because we *don't* want
1843  // it to be padded -- instead, we fit b_go into the same cache line with
1844  // th_fixed_icvs, enabling NGO cache lines stores in the hierarchical barrier.
1845  kmp_internal_control_t th_fixed_icvs; // Initial ICVs for the thread
1846  // Tuck b_go into end of th_fixed_icvs cache line, so it can be stored with
1847  // same NGO store
1848  volatile kmp_uint64 b_go; // STATE => task should proceed (hierarchical)
1849  KMP_ALIGN_CACHE volatile kmp_uint64
1850  b_arrived; // STATE => task reached synch point.
1851  kmp_uint32 *skip_per_level;
1852  kmp_uint32 my_level;
1853  kmp_int32 parent_tid;
1854  kmp_int32 old_tid;
1855  kmp_uint32 depth;
1856  struct kmp_bstate *parent_bar;
1857  kmp_team_t *team;
1858  kmp_uint64 leaf_state;
1859  kmp_uint32 nproc;
1860  kmp_uint8 base_leaf_kids;
1861  kmp_uint8 leaf_kids;
1862  kmp_uint8 offset;
1863  kmp_uint8 wait_flag;
1864  kmp_uint8 use_oncore_barrier;
1865 #if USE_DEBUGGER
1866  // The following field is intended for the debugger solely. Only the worker
1867  // thread itself accesses this field: the worker increases it by 1 when it
1868  // arrives to a barrier.
1869  KMP_ALIGN_CACHE kmp_uint b_worker_arrived;
1870 #endif /* USE_DEBUGGER */
1871 } kmp_bstate_t;
1872 
1873 union KMP_ALIGN_CACHE kmp_barrier_union {
1874  double b_align; /* use worst case alignment */
1875  char b_pad[KMP_PAD(kmp_bstate_t, CACHE_LINE)];
1876  kmp_bstate_t bb;
1877 };
1878 
1879 typedef union kmp_barrier_union kmp_balign_t;
1880 
1881 /* Team barrier needs only non-volatile arrived counter */
1882 union KMP_ALIGN_CACHE kmp_barrier_team_union {
1883  double b_align; /* use worst case alignment */
1884  char b_pad[CACHE_LINE];
1885  struct {
1886  kmp_uint64 b_arrived; /* STATE => task reached synch point. */
1887 #if USE_DEBUGGER
1888  // The following two fields are indended for the debugger solely. Only
1889  // master of the team accesses these fields: the first one is increased by
1890  // 1 when master arrives to a barrier, the second one is increased by one
1891  // when all the threads arrived.
1892  kmp_uint b_master_arrived;
1893  kmp_uint b_team_arrived;
1894 #endif
1895  };
1896 };
1897 
1898 typedef union kmp_barrier_team_union kmp_balign_team_t;
1899 
1900 /* Padding for Linux* OS pthreads condition variables and mutexes used to signal
1901  threads when a condition changes. This is to workaround an NPTL bug where
1902  padding was added to pthread_cond_t which caused the initialization routine
1903  to write outside of the structure if compiled on pre-NPTL threads. */
1904 #if KMP_OS_WINDOWS
1905 typedef struct kmp_win32_mutex {
1906  /* The Lock */
1907  CRITICAL_SECTION cs;
1908 } kmp_win32_mutex_t;
1909 
1910 typedef struct kmp_win32_cond {
1911  /* Count of the number of waiters. */
1912  int waiters_count_;
1913 
1914  /* Serialize access to <waiters_count_> */
1915  kmp_win32_mutex_t waiters_count_lock_;
1916 
1917  /* Number of threads to release via a <cond_broadcast> or a <cond_signal> */
1918  int release_count_;
1919 
1920  /* Keeps track of the current "generation" so that we don't allow */
1921  /* one thread to steal all the "releases" from the broadcast. */
1922  int wait_generation_count_;
1923 
1924  /* A manual-reset event that's used to block and release waiting threads. */
1925  HANDLE event_;
1926 } kmp_win32_cond_t;
1927 #endif
1928 
1929 #if KMP_OS_UNIX
1930 
1931 union KMP_ALIGN_CACHE kmp_cond_union {
1932  double c_align;
1933  char c_pad[CACHE_LINE];
1934  pthread_cond_t c_cond;
1935 };
1936 
1937 typedef union kmp_cond_union kmp_cond_align_t;
1938 
1939 union KMP_ALIGN_CACHE kmp_mutex_union {
1940  double m_align;
1941  char m_pad[CACHE_LINE];
1942  pthread_mutex_t m_mutex;
1943 };
1944 
1945 typedef union kmp_mutex_union kmp_mutex_align_t;
1946 
1947 #endif /* KMP_OS_UNIX */
1948 
1949 typedef struct kmp_desc_base {
1950  void *ds_stackbase;
1951  size_t ds_stacksize;
1952  int ds_stackgrow;
1953  kmp_thread_t ds_thread;
1954  volatile int ds_tid;
1955  int ds_gtid;
1956 #if KMP_OS_WINDOWS
1957  volatile int ds_alive;
1958  DWORD ds_thread_id;
1959 /* ds_thread keeps thread handle on Windows* OS. It is enough for RTL purposes.
1960  However, debugger support (libomp_db) cannot work with handles, because they
1961  uncomparable. For example, debugger requests info about thread with handle h.
1962  h is valid within debugger process, and meaningless within debugee process.
1963  Even if h is duped by call to DuplicateHandle(), so the result h' is valid
1964  within debugee process, but it is a *new* handle which does *not* equal to
1965  any other handle in debugee... The only way to compare handles is convert
1966  them to system-wide ids. GetThreadId() function is available only in
1967  Longhorn and Server 2003. :-( In contrast, GetCurrentThreadId() is available
1968  on all Windows* OS flavours (including Windows* 95). Thus, we have to get
1969  thread id by call to GetCurrentThreadId() from within the thread and save it
1970  to let libomp_db identify threads. */
1971 #endif /* KMP_OS_WINDOWS */
1972 } kmp_desc_base_t;
1973 
1974 typedef union KMP_ALIGN_CACHE kmp_desc {
1975  double ds_align; /* use worst case alignment */
1976  char ds_pad[KMP_PAD(kmp_desc_base_t, CACHE_LINE)];
1977  kmp_desc_base_t ds;
1978 } kmp_desc_t;
1979 
1980 typedef struct kmp_local {
1981  volatile int this_construct; /* count of single's encountered by thread */
1982  void *reduce_data;
1983 #if KMP_USE_BGET
1984  void *bget_data;
1985  void *bget_list;
1986 #if !USE_CMP_XCHG_FOR_BGET
1987 #ifdef USE_QUEUING_LOCK_FOR_BGET
1988  kmp_lock_t bget_lock; /* Lock for accessing bget free list */
1989 #else
1990  kmp_bootstrap_lock_t bget_lock; // Lock for accessing bget free list. Must be
1991 // bootstrap lock so we can use it at library
1992 // shutdown.
1993 #endif /* USE_LOCK_FOR_BGET */
1994 #endif /* ! USE_CMP_XCHG_FOR_BGET */
1995 #endif /* KMP_USE_BGET */
1996 
1997  PACKED_REDUCTION_METHOD_T
1998  packed_reduction_method; /* stored by __kmpc_reduce*(), used by
1999  __kmpc_end_reduce*() */
2000 
2001 } kmp_local_t;
2002 
2003 #define KMP_CHECK_UPDATE(a, b) \
2004  if ((a) != (b)) \
2005  (a) = (b)
2006 #define KMP_CHECK_UPDATE_SYNC(a, b) \
2007  if ((a) != (b)) \
2008  TCW_SYNC_PTR((a), (b))
2009 
2010 #define get__blocktime(xteam, xtid) \
2011  ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.blocktime)
2012 #define get__bt_set(xteam, xtid) \
2013  ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_set)
2014 #if KMP_USE_MONITOR
2015 #define get__bt_intervals(xteam, xtid) \
2016  ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_intervals)
2017 #endif
2018 
2019 #define get__nested_2(xteam, xtid) \
2020  ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.nested)
2021 #define get__dynamic_2(xteam, xtid) \
2022  ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.dynamic)
2023 #define get__nproc_2(xteam, xtid) \
2024  ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.nproc)
2025 #define get__sched_2(xteam, xtid) \
2026  ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.sched)
2027 
2028 #define set__blocktime_team(xteam, xtid, xval) \
2029  (((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.blocktime) = \
2030  (xval))
2031 
2032 #if KMP_USE_MONITOR
2033 #define set__bt_intervals_team(xteam, xtid, xval) \
2034  (((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_intervals) = \
2035  (xval))
2036 #endif
2037 
2038 #define set__bt_set_team(xteam, xtid, xval) \
2039  (((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_set) = (xval))
2040 
2041 #define set__nested(xthread, xval) \
2042  (((xthread)->th.th_current_task->td_icvs.nested) = (xval))
2043 #define get__nested(xthread) \
2044  (((xthread)->th.th_current_task->td_icvs.nested) ? (FTN_TRUE) : (FTN_FALSE))
2045 
2046 #define set__dynamic(xthread, xval) \
2047  (((xthread)->th.th_current_task->td_icvs.dynamic) = (xval))
2048 #define get__dynamic(xthread) \
2049  (((xthread)->th.th_current_task->td_icvs.dynamic) ? (FTN_TRUE) : (FTN_FALSE))
2050 
2051 #define set__nproc(xthread, xval) \
2052  (((xthread)->th.th_current_task->td_icvs.nproc) = (xval))
2053 
2054 #define set__max_active_levels(xthread, xval) \
2055  (((xthread)->th.th_current_task->td_icvs.max_active_levels) = (xval))
2056 
2057 #define set__sched(xthread, xval) \
2058  (((xthread)->th.th_current_task->td_icvs.sched) = (xval))
2059 
2060 #if OMP_40_ENABLED
2061 
2062 #define set__proc_bind(xthread, xval) \
2063  (((xthread)->th.th_current_task->td_icvs.proc_bind) = (xval))
2064 #define get__proc_bind(xthread) \
2065  ((xthread)->th.th_current_task->td_icvs.proc_bind)
2066 
2067 #endif /* OMP_40_ENABLED */
2068 
2069 // OpenMP tasking data structures
2070 
2071 typedef enum kmp_tasking_mode {
2072  tskm_immediate_exec = 0,
2073  tskm_extra_barrier = 1,
2074  tskm_task_teams = 2,
2075  tskm_max = 2
2076 } kmp_tasking_mode_t;
2077 
2078 extern kmp_tasking_mode_t
2079  __kmp_tasking_mode; /* determines how/when to execute tasks */
2080 extern int __kmp_task_stealing_constraint;
2081 #if OMP_40_ENABLED
2082 extern kmp_int32 __kmp_default_device; // Set via OMP_DEFAULT_DEVICE if
2083 // specified, defaults to 0 otherwise
2084 #endif
2085 #if OMP_45_ENABLED
2086 // Set via OMP_MAX_TASK_PRIORITY if specified, defaults to 0 otherwise
2087 extern kmp_int32 __kmp_max_task_priority;
2088 // Set via KMP_TASKLOOP_MIN_TASKS if specified, defaults to 0 otherwise
2089 extern kmp_uint64 __kmp_taskloop_min_tasks;
2090 #endif
2091 
2092 /* NOTE: kmp_taskdata_t and kmp_task_t structures allocated in single block with
2093  taskdata first */
2094 #define KMP_TASK_TO_TASKDATA(task) (((kmp_taskdata_t *)task) - 1)
2095 #define KMP_TASKDATA_TO_TASK(taskdata) (kmp_task_t *)(taskdata + 1)
2096 
2097 // The tt_found_tasks flag is a signal to all threads in the team that tasks
2098 // were spawned and queued since the previous barrier release.
2099 #define KMP_TASKING_ENABLED(task_team) \
2100  (TCR_SYNC_4((task_team)->tt.tt_found_tasks) == TRUE)
2101 
2108 typedef kmp_int32 (*kmp_routine_entry_t)(kmp_int32, void *);
2109 
2110 #if OMP_40_ENABLED || OMP_45_ENABLED
2111 typedef union kmp_cmplrdata {
2112 #if OMP_45_ENABLED
2113  kmp_int32 priority;
2114 #endif // OMP_45_ENABLED
2115 #if OMP_40_ENABLED
2116  kmp_routine_entry_t
2117  destructors; /* pointer to function to invoke deconstructors of
2118  firstprivate C++ objects */
2119 #endif // OMP_40_ENABLED
2120  /* future data */
2121 } kmp_cmplrdata_t;
2122 #endif
2123 
2124 /* sizeof_kmp_task_t passed as arg to kmpc_omp_task call */
2127 typedef struct kmp_task { /* GEH: Shouldn't this be aligned somehow? */
2128  void *shareds;
2129  kmp_routine_entry_t
2130  routine;
2131  kmp_int32 part_id;
2132 #if OMP_40_ENABLED || OMP_45_ENABLED
2133  kmp_cmplrdata_t
2134  data1; /* Two known optional additions: destructors and priority */
2135  kmp_cmplrdata_t data2; /* Process destructors first, priority second */
2136 /* future data */
2137 #endif
2138  /* private vars */
2139 } kmp_task_t;
2140 
2145 #if OMP_40_ENABLED
2146 typedef struct kmp_taskgroup {
2147  std::atomic<kmp_int32> count; // number of allocated and incomplete tasks
2148  std::atomic<kmp_int32>
2149  cancel_request; // request for cancellation of this taskgroup
2150  struct kmp_taskgroup *parent; // parent taskgroup
2151 // TODO: change to OMP_50_ENABLED, need to change build tools for this to work
2152 #if OMP_45_ENABLED
2153  // Block of data to perform task reduction
2154  void *reduce_data; // reduction related info
2155  kmp_int32 reduce_num_data; // number of data items to reduce
2156 #endif
2157 } kmp_taskgroup_t;
2158 
2159 // forward declarations
2160 typedef union kmp_depnode kmp_depnode_t;
2161 typedef struct kmp_depnode_list kmp_depnode_list_t;
2162 typedef struct kmp_dephash_entry kmp_dephash_entry_t;
2163 
2164 typedef struct kmp_depend_info {
2165  kmp_intptr_t base_addr;
2166  size_t len;
2167  struct {
2168  bool in : 1;
2169  bool out : 1;
2170  } flags;
2171 } kmp_depend_info_t;
2172 
2173 struct kmp_depnode_list {
2174  kmp_depnode_t *node;
2175  kmp_depnode_list_t *next;
2176 };
2177 
2178 typedef struct kmp_base_depnode {
2179  kmp_depnode_list_t *successors;
2180  kmp_task_t *task;
2181 
2182  kmp_lock_t lock;
2183 
2184 #if KMP_SUPPORT_GRAPH_OUTPUT
2185  kmp_uint32 id;
2186 #endif
2187 
2188  std::atomic<kmp_int32> npredecessors;
2189  std::atomic<kmp_int32> nrefs;
2190 } kmp_base_depnode_t;
2191 
2192 union KMP_ALIGN_CACHE kmp_depnode {
2193  double dn_align; /* use worst case alignment */
2194  char dn_pad[KMP_PAD(kmp_base_depnode_t, CACHE_LINE)];
2195  kmp_base_depnode_t dn;
2196 };
2197 
2198 struct kmp_dephash_entry {
2199  kmp_intptr_t addr;
2200  kmp_depnode_t *last_out;
2201  kmp_depnode_list_t *last_ins;
2202  kmp_dephash_entry_t *next_in_bucket;
2203 };
2204 
2205 typedef struct kmp_dephash {
2206  kmp_dephash_entry_t **buckets;
2207  size_t size;
2208 #ifdef KMP_DEBUG
2209  kmp_uint32 nelements;
2210  kmp_uint32 nconflicts;
2211 #endif
2212 } kmp_dephash_t;
2213 
2214 #endif
2215 
2216 #ifdef BUILD_TIED_TASK_STACK
2217 
2218 /* Tied Task stack definitions */
2219 typedef struct kmp_stack_block {
2220  kmp_taskdata_t *sb_block[TASK_STACK_BLOCK_SIZE];
2221  struct kmp_stack_block *sb_next;
2222  struct kmp_stack_block *sb_prev;
2223 } kmp_stack_block_t;
2224 
2225 typedef struct kmp_task_stack {
2226  kmp_stack_block_t ts_first_block; // first block of stack entries
2227  kmp_taskdata_t **ts_top; // pointer to the top of stack
2228  kmp_int32 ts_entries; // number of entries on the stack
2229 } kmp_task_stack_t;
2230 
2231 #endif // BUILD_TIED_TASK_STACK
2232 
2233 typedef struct kmp_tasking_flags { /* Total struct must be exactly 32 bits */
2234  /* Compiler flags */ /* Total compiler flags must be 16 bits */
2235  unsigned tiedness : 1; /* task is either tied (1) or untied (0) */
2236  unsigned final : 1; /* task is final(1) so execute immediately */
2237  unsigned merged_if0 : 1; /* no __kmpc_task_{begin/complete}_if0 calls in if0
2238  code path */
2239 #if OMP_40_ENABLED
2240  unsigned destructors_thunk : 1; /* set if the compiler creates a thunk to
2241  invoke destructors from the runtime */
2242 #if OMP_45_ENABLED
2243  unsigned proxy : 1; /* task is a proxy task (it will be executed outside the
2244  context of the RTL) */
2245  unsigned priority_specified : 1; /* set if the compiler provides priority
2246  setting for the task */
2247  unsigned reserved : 10; /* reserved for compiler use */
2248 #else
2249  unsigned reserved : 12; /* reserved for compiler use */
2250 #endif
2251 #else // OMP_40_ENABLED
2252  unsigned reserved : 13; /* reserved for compiler use */
2253 #endif // OMP_40_ENABLED
2254 
2255  /* Library flags */ /* Total library flags must be 16 bits */
2256  unsigned tasktype : 1; /* task is either explicit(1) or implicit (0) */
2257  unsigned task_serial : 1; // task is executed immediately (1) or deferred (0)
2258  unsigned tasking_ser : 1; // all tasks in team are either executed immediately
2259  // (1) or may be deferred (0)
2260  unsigned team_serial : 1; // entire team is serial (1) [1 thread] or parallel
2261  // (0) [>= 2 threads]
2262  /* If either team_serial or tasking_ser is set, task team may be NULL */
2263  /* Task State Flags: */
2264  unsigned started : 1; /* 1==started, 0==not started */
2265  unsigned executing : 1; /* 1==executing, 0==not executing */
2266  unsigned complete : 1; /* 1==complete, 0==not complete */
2267  unsigned freed : 1; /* 1==freed, 0==allocateed */
2268  unsigned native : 1; /* 1==gcc-compiled task, 0==intel */
2269  unsigned reserved31 : 7; /* reserved for library use */
2270 
2271 } kmp_tasking_flags_t;
2272 
2273 struct kmp_taskdata { /* aligned during dynamic allocation */
2274  kmp_int32 td_task_id; /* id, assigned by debugger */
2275  kmp_tasking_flags_t td_flags; /* task flags */
2276  kmp_team_t *td_team; /* team for this task */
2277  kmp_info_p *td_alloc_thread; /* thread that allocated data structures */
2278  /* Currently not used except for perhaps IDB */
2279  kmp_taskdata_t *td_parent; /* parent task */
2280  kmp_int32 td_level; /* task nesting level */
2281  std::atomic<kmp_int32> td_untied_count; // untied task active parts counter
2282  ident_t *td_ident; /* task identifier */
2283  // Taskwait data.
2284  ident_t *td_taskwait_ident;
2285  kmp_uint32 td_taskwait_counter;
2286  kmp_int32 td_taskwait_thread; /* gtid + 1 of thread encountered taskwait */
2287  KMP_ALIGN_CACHE kmp_internal_control_t
2288  td_icvs; /* Internal control variables for the task */
2289  KMP_ALIGN_CACHE std::atomic<kmp_int32>
2290  td_allocated_child_tasks; /* Child tasks (+ current task) not yet
2291  deallocated */
2292  std::atomic<kmp_int32>
2293  td_incomplete_child_tasks; /* Child tasks not yet complete */
2294 #if OMP_40_ENABLED
2295  kmp_taskgroup_t
2296  *td_taskgroup; // Each task keeps pointer to its current taskgroup
2297  kmp_dephash_t
2298  *td_dephash; // Dependencies for children tasks are tracked from here
2299  kmp_depnode_t
2300  *td_depnode; // Pointer to graph node if this task has dependencies
2301 #endif // OMP_40_ENABLED
2302 #if OMP_45_ENABLED
2303  kmp_task_team_t *td_task_team;
2304  kmp_int32 td_size_alloc; // The size of task structure, including shareds etc.
2305 #if defined(KMP_GOMP_COMPAT)
2306  // 4 or 8 byte integers for the loop bounds in GOMP_taskloop
2307  kmp_int32 td_size_loop_bounds;
2308 #endif
2309 #endif // OMP_45_ENABLED
2310  kmp_taskdata_t *td_last_tied; // keep tied task for task scheduling constraint
2311 #if defined(KMP_GOMP_COMPAT) && OMP_45_ENABLED
2312  // GOMP sends in a copy function for copy constructors
2313  void (*td_copy_func)(void *, void *);
2314 #endif
2315 #if OMPT_SUPPORT
2316  ompt_task_info_t ompt_task_info;
2317 #endif
2318 }; // struct kmp_taskdata
2319 
2320 // Make sure padding above worked
2321 KMP_BUILD_ASSERT(sizeof(kmp_taskdata_t) % sizeof(void *) == 0);
2322 
2323 // Data for task team but per thread
2324 typedef struct kmp_base_thread_data {
2325  kmp_info_p *td_thr; // Pointer back to thread info
2326  // Used only in __kmp_execute_tasks_template, maybe not avail until task is
2327  // queued?
2328  kmp_bootstrap_lock_t td_deque_lock; // Lock for accessing deque
2329  kmp_taskdata_t *
2330  *td_deque; // Deque of tasks encountered by td_thr, dynamically allocated
2331  kmp_int32 td_deque_size; // Size of deck
2332  kmp_uint32 td_deque_head; // Head of deque (will wrap)
2333  kmp_uint32 td_deque_tail; // Tail of deque (will wrap)
2334  kmp_int32 td_deque_ntasks; // Number of tasks in deque
2335  // GEH: shouldn't this be volatile since used in while-spin?
2336  kmp_int32 td_deque_last_stolen; // Thread number of last successful steal
2337 #ifdef BUILD_TIED_TASK_STACK
2338  kmp_task_stack_t td_susp_tied_tasks; // Stack of suspended tied tasks for task
2339 // scheduling constraint
2340 #endif // BUILD_TIED_TASK_STACK
2341 } kmp_base_thread_data_t;
2342 
2343 #define TASK_DEQUE_BITS 8 // Used solely to define INITIAL_TASK_DEQUE_SIZE
2344 #define INITIAL_TASK_DEQUE_SIZE (1 << TASK_DEQUE_BITS)
2345 
2346 #define TASK_DEQUE_SIZE(td) ((td).td_deque_size)
2347 #define TASK_DEQUE_MASK(td) ((td).td_deque_size - 1)
2348 
2349 typedef union KMP_ALIGN_CACHE kmp_thread_data {
2350  kmp_base_thread_data_t td;
2351  double td_align; /* use worst case alignment */
2352  char td_pad[KMP_PAD(kmp_base_thread_data_t, CACHE_LINE)];
2353 } kmp_thread_data_t;
2354 
2355 // Data for task teams which are used when tasking is enabled for the team
2356 typedef struct kmp_base_task_team {
2357  kmp_bootstrap_lock_t
2358  tt_threads_lock; /* Lock used to allocate per-thread part of task team */
2359  /* must be bootstrap lock since used at library shutdown*/
2360  kmp_task_team_t *tt_next; /* For linking the task team free list */
2361  kmp_thread_data_t
2362  *tt_threads_data; /* Array of per-thread structures for task team */
2363  /* Data survives task team deallocation */
2364  kmp_int32 tt_found_tasks; /* Have we found tasks and queued them while
2365  executing this team? */
2366  /* TRUE means tt_threads_data is set up and initialized */
2367  kmp_int32 tt_nproc; /* #threads in team */
2368  kmp_int32
2369  tt_max_threads; /* number of entries allocated for threads_data array */
2370 #if OMP_45_ENABLED
2371  kmp_int32
2372  tt_found_proxy_tasks; /* Have we found proxy tasks since last barrier */
2373 #endif
2374  kmp_int32 tt_untied_task_encountered;
2375 
2376  KMP_ALIGN_CACHE
2377  std::atomic<kmp_int32> tt_unfinished_threads; /* #threads still active */
2378 
2379  KMP_ALIGN_CACHE
2380  volatile kmp_uint32
2381  tt_active; /* is the team still actively executing tasks */
2382 } kmp_base_task_team_t;
2383 
2384 union KMP_ALIGN_CACHE kmp_task_team {
2385  kmp_base_task_team_t tt;
2386  double tt_align; /* use worst case alignment */
2387  char tt_pad[KMP_PAD(kmp_base_task_team_t, CACHE_LINE)];
2388 };
2389 
2390 #if (USE_FAST_MEMORY == 3) || (USE_FAST_MEMORY == 5)
2391 // Free lists keep same-size free memory slots for fast memory allocation
2392 // routines
2393 typedef struct kmp_free_list {
2394  void *th_free_list_self; // Self-allocated tasks free list
2395  void *th_free_list_sync; // Self-allocated tasks stolen/returned by other
2396  // threads
2397  void *th_free_list_other; // Non-self free list (to be returned to owner's
2398  // sync list)
2399 } kmp_free_list_t;
2400 #endif
2401 #if KMP_NESTED_HOT_TEAMS
2402 // Hot teams array keeps hot teams and their sizes for given thread. Hot teams
2403 // are not put in teams pool, and they don't put threads in threads pool.
2404 typedef struct kmp_hot_team_ptr {
2405  kmp_team_p *hot_team; // pointer to hot_team of given nesting level
2406  kmp_int32 hot_team_nth; // number of threads allocated for the hot_team
2407 } kmp_hot_team_ptr_t;
2408 #endif
2409 #if OMP_40_ENABLED
2410 typedef struct kmp_teams_size {
2411  kmp_int32 nteams; // number of teams in a league
2412  kmp_int32 nth; // number of threads in each team of the league
2413 } kmp_teams_size_t;
2414 #endif
2415 
2416 // OpenMP thread data structures
2417 
2418 typedef struct KMP_ALIGN_CACHE kmp_base_info {
2419  /* Start with the readonly data which is cache aligned and padded. This is
2420  written before the thread starts working by the master. Uber masters may
2421  update themselves later. Usage does not consider serialized regions. */
2422  kmp_desc_t th_info;
2423  kmp_team_p *th_team; /* team we belong to */
2424  kmp_root_p *th_root; /* pointer to root of task hierarchy */
2425  kmp_info_p *th_next_pool; /* next available thread in the pool */
2426  kmp_disp_t *th_dispatch; /* thread's dispatch data */
2427  int th_in_pool; /* in thread pool (32 bits for TCR/TCW) */
2428 
2429  /* The following are cached from the team info structure */
2430  /* TODO use these in more places as determined to be needed via profiling */
2431  int th_team_nproc; /* number of threads in a team */
2432  kmp_info_p *th_team_master; /* the team's master thread */
2433  int th_team_serialized; /* team is serialized */
2434 #if OMP_40_ENABLED
2435  microtask_t th_teams_microtask; /* save entry address for teams construct */
2436  int th_teams_level; /* save initial level of teams construct */
2437 /* it is 0 on device but may be any on host */
2438 #endif
2439 
2440 /* The blocktime info is copied from the team struct to the thread sruct */
2441 /* at the start of a barrier, and the values stored in the team are used */
2442 /* at points in the code where the team struct is no longer guaranteed */
2443 /* to exist (from the POV of worker threads). */
2444 #if KMP_USE_MONITOR
2445  int th_team_bt_intervals;
2446  int th_team_bt_set;
2447 #else
2448  kmp_uint64 th_team_bt_intervals;
2449 #endif
2450 
2451 #if KMP_AFFINITY_SUPPORTED
2452  kmp_affin_mask_t *th_affin_mask; /* thread's current affinity mask */
2453 #endif
2454 #if OMP_50_ENABLED
2455  void *const *th_def_allocator; /* per implicit task default allocator */
2456 #endif
2457  /* The data set by the master at reinit, then R/W by the worker */
2458  KMP_ALIGN_CACHE int
2459  th_set_nproc; /* if > 0, then only use this request for the next fork */
2460 #if KMP_NESTED_HOT_TEAMS
2461  kmp_hot_team_ptr_t *th_hot_teams; /* array of hot teams */
2462 #endif
2463 #if OMP_40_ENABLED
2464  kmp_proc_bind_t
2465  th_set_proc_bind; /* if != proc_bind_default, use request for next fork */
2466  kmp_teams_size_t
2467  th_teams_size; /* number of teams/threads in teams construct */
2468 #if KMP_AFFINITY_SUPPORTED
2469  int th_current_place; /* place currently bound to */
2470  int th_new_place; /* place to bind to in par reg */
2471  int th_first_place; /* first place in partition */
2472  int th_last_place; /* last place in partition */
2473 #endif
2474 #endif
2475 #if USE_ITT_BUILD
2476  kmp_uint64 th_bar_arrive_time; /* arrival to barrier timestamp */
2477  kmp_uint64 th_bar_min_time; /* minimum arrival time at the barrier */
2478  kmp_uint64 th_frame_time; /* frame timestamp */
2479 #endif /* USE_ITT_BUILD */
2480  kmp_local_t th_local;
2481  struct private_common *th_pri_head;
2482 
2483  /* Now the data only used by the worker (after initial allocation) */
2484  /* TODO the first serial team should actually be stored in the info_t
2485  structure. this will help reduce initial allocation overhead */
2486  KMP_ALIGN_CACHE kmp_team_p
2487  *th_serial_team; /*serialized team held in reserve*/
2488 
2489 #if OMPT_SUPPORT
2490  ompt_thread_info_t ompt_thread_info;
2491 #endif
2492 
2493  /* The following are also read by the master during reinit */
2494  struct common_table *th_pri_common;
2495 
2496  volatile kmp_uint32 th_spin_here; /* thread-local location for spinning */
2497  /* while awaiting queuing lock acquire */
2498 
2499  volatile void *th_sleep_loc; // this points at a kmp_flag<T>
2500 
2501  ident_t *th_ident;
2502  unsigned th_x; // Random number generator data
2503  unsigned th_a; // Random number generator data
2504 
2505  /* Tasking-related data for the thread */
2506  kmp_task_team_t *th_task_team; // Task team struct
2507  kmp_taskdata_t *th_current_task; // Innermost Task being executed
2508  kmp_uint8 th_task_state; // alternating 0/1 for task team identification
2509  kmp_uint8 *th_task_state_memo_stack; // Stack holding memos of th_task_state
2510  // at nested levels
2511  kmp_uint32 th_task_state_top; // Top element of th_task_state_memo_stack
2512  kmp_uint32 th_task_state_stack_sz; // Size of th_task_state_memo_stack
2513  kmp_uint32 th_reap_state; // Non-zero indicates thread is not
2514  // tasking, thus safe to reap
2515 
2516  /* More stuff for keeping track of active/sleeping threads (this part is
2517  written by the worker thread) */
2518  kmp_uint8 th_active_in_pool; // included in count of #active threads in pool
2519  int th_active; // ! sleeping; 32 bits for TCR/TCW
2520  struct cons_header *th_cons; // used for consistency check
2521 #if KMP_USE_HIER_SCHED
2522  // used for hierarchical scheduling
2523  kmp_hier_private_bdata_t *th_hier_bar_data;
2524 #endif
2525 
2526  /* Add the syncronizing data which is cache aligned and padded. */
2527  KMP_ALIGN_CACHE kmp_balign_t th_bar[bs_last_barrier];
2528 
2529  KMP_ALIGN_CACHE volatile kmp_int32
2530  th_next_waiting; /* gtid+1 of next thread on lock wait queue, 0 if none */
2531 
2532 #if (USE_FAST_MEMORY == 3) || (USE_FAST_MEMORY == 5)
2533 #define NUM_LISTS 4
2534  kmp_free_list_t th_free_lists[NUM_LISTS]; // Free lists for fast memory
2535 // allocation routines
2536 #endif
2537 
2538 #if KMP_OS_WINDOWS
2539  kmp_win32_cond_t th_suspend_cv;
2540  kmp_win32_mutex_t th_suspend_mx;
2541  int th_suspend_init;
2542 #endif
2543 #if KMP_OS_UNIX
2544  kmp_cond_align_t th_suspend_cv;
2545  kmp_mutex_align_t th_suspend_mx;
2546  int th_suspend_init_count;
2547 #endif
2548 
2549 #if USE_ITT_BUILD
2550  kmp_itt_mark_t th_itt_mark_single;
2551 // alignment ???
2552 #endif /* USE_ITT_BUILD */
2553 #if KMP_STATS_ENABLED
2554  kmp_stats_list *th_stats;
2555 #endif
2556 #if KMP_OS_UNIX
2557  std::atomic<bool> th_blocking;
2558 #endif
2559 } kmp_base_info_t;
2560 
2561 typedef union KMP_ALIGN_CACHE kmp_info {
2562  double th_align; /* use worst case alignment */
2563  char th_pad[KMP_PAD(kmp_base_info_t, CACHE_LINE)];
2564  kmp_base_info_t th;
2565 } kmp_info_t;
2566 
2567 // OpenMP thread team data structures
2568 
2569 typedef struct kmp_base_data { volatile kmp_uint32 t_value; } kmp_base_data_t;
2570 
2571 typedef union KMP_ALIGN_CACHE kmp_sleep_team {
2572  double dt_align; /* use worst case alignment */
2573  char dt_pad[KMP_PAD(kmp_base_data_t, CACHE_LINE)];
2574  kmp_base_data_t dt;
2575 } kmp_sleep_team_t;
2576 
2577 typedef union KMP_ALIGN_CACHE kmp_ordered_team {
2578  double dt_align; /* use worst case alignment */
2579  char dt_pad[KMP_PAD(kmp_base_data_t, CACHE_LINE)];
2580  kmp_base_data_t dt;
2581 } kmp_ordered_team_t;
2582 
2583 typedef int (*launch_t)(int gtid);
2584 
2585 /* Minimum number of ARGV entries to malloc if necessary */
2586 #define KMP_MIN_MALLOC_ARGV_ENTRIES 100
2587 
2588 // Set up how many argv pointers will fit in cache lines containing
2589 // t_inline_argv. Historically, we have supported at least 96 bytes. Using a
2590 // larger value for more space between the master write/worker read section and
2591 // read/write by all section seems to buy more performance on EPCC PARALLEL.
2592 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
2593 #define KMP_INLINE_ARGV_BYTES \
2594  (4 * CACHE_LINE - \
2595  ((3 * KMP_PTR_SKIP + 2 * sizeof(int) + 2 * sizeof(kmp_int8) + \
2596  sizeof(kmp_int16) + sizeof(kmp_uint32)) % \
2597  CACHE_LINE))
2598 #else
2599 #define KMP_INLINE_ARGV_BYTES \
2600  (2 * CACHE_LINE - ((3 * KMP_PTR_SKIP + 2 * sizeof(int)) % CACHE_LINE))
2601 #endif
2602 #define KMP_INLINE_ARGV_ENTRIES (int)(KMP_INLINE_ARGV_BYTES / KMP_PTR_SKIP)
2603 
2604 typedef struct KMP_ALIGN_CACHE kmp_base_team {
2605  // Synchronization Data
2606  // ---------------------------------------------------------------------------
2607  KMP_ALIGN_CACHE kmp_ordered_team_t t_ordered;
2608  kmp_balign_team_t t_bar[bs_last_barrier];
2609  std::atomic<int> t_construct; // count of single directive encountered by team
2610  char pad[sizeof(kmp_lock_t)]; // padding to maintain performance on big iron
2611 
2612  // Master only
2613  // ---------------------------------------------------------------------------
2614  KMP_ALIGN_CACHE int t_master_tid; // tid of master in parent team
2615  int t_master_this_cons; // "this_construct" single counter of master in parent
2616  // team
2617  ident_t *t_ident; // if volatile, have to change too much other crud to
2618  // volatile too
2619  kmp_team_p *t_parent; // parent team
2620  kmp_team_p *t_next_pool; // next free team in the team pool
2621  kmp_disp_t *t_dispatch; // thread's dispatch data
2622  kmp_task_team_t *t_task_team[2]; // Task team struct; switch between 2
2623 #if OMP_40_ENABLED
2624  kmp_proc_bind_t t_proc_bind; // bind type for par region
2625 #endif // OMP_40_ENABLED
2626 #if USE_ITT_BUILD
2627  kmp_uint64 t_region_time; // region begin timestamp
2628 #endif /* USE_ITT_BUILD */
2629 
2630  // Master write, workers read
2631  // --------------------------------------------------------------------------
2632  KMP_ALIGN_CACHE void **t_argv;
2633  int t_argc;
2634  int t_nproc; // number of threads in team
2635  microtask_t t_pkfn;
2636  launch_t t_invoke; // procedure to launch the microtask
2637 
2638 #if OMPT_SUPPORT
2639  ompt_team_info_t ompt_team_info;
2640  ompt_lw_taskteam_t *ompt_serialized_team_info;
2641 #endif
2642 
2643 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
2644  kmp_int8 t_fp_control_saved;
2645  kmp_int8 t_pad2b;
2646  kmp_int16 t_x87_fpu_control_word; // FP control regs
2647  kmp_uint32 t_mxcsr;
2648 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
2649 
2650  void *t_inline_argv[KMP_INLINE_ARGV_ENTRIES];
2651 
2652  KMP_ALIGN_CACHE kmp_info_t **t_threads;
2653  kmp_taskdata_t
2654  *t_implicit_task_taskdata; // Taskdata for the thread's implicit task
2655  int t_level; // nested parallel level
2656 
2657  KMP_ALIGN_CACHE int t_max_argc;
2658  int t_max_nproc; // max threads this team can handle (dynamicly expandable)
2659  int t_serialized; // levels deep of serialized teams
2660  dispatch_shared_info_t *t_disp_buffer; // buffers for dispatch system
2661  int t_id; // team's id, assigned by debugger.
2662  int t_active_level; // nested active parallel level
2663  kmp_r_sched_t t_sched; // run-time schedule for the team
2664 #if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED
2665  int t_first_place; // first & last place in parent thread's partition.
2666  int t_last_place; // Restore these values to master after par region.
2667 #endif // OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED
2668  int t_size_changed; // team size was changed?: 0: no, 1: yes, -1: changed via
2669 // omp_set_num_threads() call
2670 #if OMP_50_ENABLED
2671  void *const *t_def_allocator; /* per implicit task default allocator */
2672 #endif
2673 
2674 // Read/write by workers as well
2675 #if (KMP_ARCH_X86 || KMP_ARCH_X86_64)
2676  // Using CACHE_LINE=64 reduces memory footprint, but causes a big perf
2677  // regression of epcc 'parallel' and 'barrier' on fxe256lin01. This extra
2678  // padding serves to fix the performance of epcc 'parallel' and 'barrier' when
2679  // CACHE_LINE=64. TODO: investigate more and get rid if this padding.
2680  char dummy_padding[1024];
2681 #endif
2682  // Internal control stack for additional nested teams.
2683  KMP_ALIGN_CACHE kmp_internal_control_t *t_control_stack_top;
2684 // for SERIALIZED teams nested 2 or more levels deep
2685 #if OMP_40_ENABLED
2686  // typed flag to store request state of cancellation
2687  std::atomic<kmp_int32> t_cancel_request;
2688 #endif
2689  int t_master_active; // save on fork, restore on join
2690  kmp_taskq_t t_taskq; // this team's task queue
2691  void *t_copypriv_data; // team specific pointer to copyprivate data array
2692 #if KMP_OS_WINDOWS
2693  std::atomic<kmp_uint32> t_copyin_counter;
2694 #endif
2695 #if USE_ITT_BUILD
2696  void *t_stack_id; // team specific stack stitching id (for ittnotify)
2697 #endif /* USE_ITT_BUILD */
2698 } kmp_base_team_t;
2699 
2700 union KMP_ALIGN_CACHE kmp_team {
2701  kmp_base_team_t t;
2702  double t_align; /* use worst case alignment */
2703  char t_pad[KMP_PAD(kmp_base_team_t, CACHE_LINE)];
2704 };
2705 
2706 typedef union KMP_ALIGN_CACHE kmp_time_global {
2707  double dt_align; /* use worst case alignment */
2708  char dt_pad[KMP_PAD(kmp_base_data_t, CACHE_LINE)];
2709  kmp_base_data_t dt;
2710 } kmp_time_global_t;
2711 
2712 typedef struct kmp_base_global {
2713  /* cache-aligned */
2714  kmp_time_global_t g_time;
2715 
2716  /* non cache-aligned */
2717  volatile int g_abort;
2718  volatile int g_done;
2719 
2720  int g_dynamic;
2721  enum dynamic_mode g_dynamic_mode;
2722 } kmp_base_global_t;
2723 
2724 typedef union KMP_ALIGN_CACHE kmp_global {
2725  kmp_base_global_t g;
2726  double g_align; /* use worst case alignment */
2727  char g_pad[KMP_PAD(kmp_base_global_t, CACHE_LINE)];
2728 } kmp_global_t;
2729 
2730 typedef struct kmp_base_root {
2731  // TODO: GEH - combine r_active with r_in_parallel then r_active ==
2732  // (r_in_parallel>= 0)
2733  // TODO: GEH - then replace r_active with t_active_levels if we can to reduce
2734  // the synch overhead or keeping r_active
2735  volatile int r_active; /* TRUE if some region in a nest has > 1 thread */
2736  // GEH: This is misnamed, should be r_in_parallel
2737  volatile int r_nested; // TODO: GEH - This is unused, just remove it entirely.
2738  // keeps a count of active parallel regions per root
2739  std::atomic<int> r_in_parallel;
2740  // GEH: This is misnamed, should be r_active_levels
2741  kmp_team_t *r_root_team;
2742  kmp_team_t *r_hot_team;
2743  kmp_info_t *r_uber_thread;
2744  kmp_lock_t r_begin_lock;
2745  volatile int r_begin;
2746  int r_blocktime; /* blocktime for this root and descendants */
2747  int r_cg_nthreads; // count of active threads in a contention group
2748 } kmp_base_root_t;
2749 
2750 typedef union KMP_ALIGN_CACHE kmp_root {
2751  kmp_base_root_t r;
2752  double r_align; /* use worst case alignment */
2753  char r_pad[KMP_PAD(kmp_base_root_t, CACHE_LINE)];
2754 } kmp_root_t;
2755 
2756 struct fortran_inx_info {
2757  kmp_int32 data;
2758 };
2759 
2760 /* ------------------------------------------------------------------------ */
2761 
2762 extern int __kmp_settings;
2763 extern int __kmp_duplicate_library_ok;
2764 #if USE_ITT_BUILD
2765 extern int __kmp_forkjoin_frames;
2766 extern int __kmp_forkjoin_frames_mode;
2767 #endif
2768 extern PACKED_REDUCTION_METHOD_T __kmp_force_reduction_method;
2769 extern int __kmp_determ_red;
2770 
2771 #ifdef KMP_DEBUG
2772 extern int kmp_a_debug;
2773 extern int kmp_b_debug;
2774 extern int kmp_c_debug;
2775 extern int kmp_d_debug;
2776 extern int kmp_e_debug;
2777 extern int kmp_f_debug;
2778 #endif /* KMP_DEBUG */
2779 
2780 /* For debug information logging using rotating buffer */
2781 #define KMP_DEBUG_BUF_LINES_INIT 512
2782 #define KMP_DEBUG_BUF_LINES_MIN 1
2783 
2784 #define KMP_DEBUG_BUF_CHARS_INIT 128
2785 #define KMP_DEBUG_BUF_CHARS_MIN 2
2786 
2787 extern int
2788  __kmp_debug_buf; /* TRUE means use buffer, FALSE means print to stderr */
2789 extern int __kmp_debug_buf_lines; /* How many lines of debug stored in buffer */
2790 extern int
2791  __kmp_debug_buf_chars; /* How many characters allowed per line in buffer */
2792 extern int __kmp_debug_buf_atomic; /* TRUE means use atomic update of buffer
2793  entry pointer */
2794 
2795 extern char *__kmp_debug_buffer; /* Debug buffer itself */
2796 extern std::atomic<int> __kmp_debug_count; /* Counter for number of lines
2797  printed in buffer so far */
2798 extern int __kmp_debug_buf_warn_chars; /* Keep track of char increase
2799  recommended in warnings */
2800 /* end rotating debug buffer */
2801 
2802 #ifdef KMP_DEBUG
2803 extern int __kmp_par_range; /* +1 => only go par for constructs in range */
2804 
2805 #define KMP_PAR_RANGE_ROUTINE_LEN 1024
2806 extern char __kmp_par_range_routine[KMP_PAR_RANGE_ROUTINE_LEN];
2807 #define KMP_PAR_RANGE_FILENAME_LEN 1024
2808 extern char __kmp_par_range_filename[KMP_PAR_RANGE_FILENAME_LEN];
2809 extern int __kmp_par_range_lb;
2810 extern int __kmp_par_range_ub;
2811 #endif
2812 
2813 /* For printing out dynamic storage map for threads and teams */
2814 extern int
2815  __kmp_storage_map; /* True means print storage map for threads and teams */
2816 extern int __kmp_storage_map_verbose; /* True means storage map includes
2817  placement info */
2818 extern int __kmp_storage_map_verbose_specified;
2819 
2820 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
2821 extern kmp_cpuinfo_t __kmp_cpuinfo;
2822 #endif
2823 
2824 extern volatile int __kmp_init_serial;
2825 extern volatile int __kmp_init_gtid;
2826 extern volatile int __kmp_init_common;
2827 extern volatile int __kmp_init_middle;
2828 extern volatile int __kmp_init_parallel;
2829 #if KMP_USE_MONITOR
2830 extern volatile int __kmp_init_monitor;
2831 #endif
2832 extern volatile int __kmp_init_user_locks;
2833 extern int __kmp_init_counter;
2834 extern int __kmp_root_counter;
2835 extern int __kmp_version;
2836 
2837 /* list of address of allocated caches for commons */
2838 extern kmp_cached_addr_t *__kmp_threadpriv_cache_list;
2839 
2840 /* Barrier algorithm types and options */
2841 extern kmp_uint32 __kmp_barrier_gather_bb_dflt;
2842 extern kmp_uint32 __kmp_barrier_release_bb_dflt;
2843 extern kmp_bar_pat_e __kmp_barrier_gather_pat_dflt;
2844 extern kmp_bar_pat_e __kmp_barrier_release_pat_dflt;
2845 extern kmp_uint32 __kmp_barrier_gather_branch_bits[bs_last_barrier];
2846 extern kmp_uint32 __kmp_barrier_release_branch_bits[bs_last_barrier];
2847 extern kmp_bar_pat_e __kmp_barrier_gather_pattern[bs_last_barrier];
2848 extern kmp_bar_pat_e __kmp_barrier_release_pattern[bs_last_barrier];
2849 extern char const *__kmp_barrier_branch_bit_env_name[bs_last_barrier];
2850 extern char const *__kmp_barrier_pattern_env_name[bs_last_barrier];
2851 extern char const *__kmp_barrier_type_name[bs_last_barrier];
2852 extern char const *__kmp_barrier_pattern_name[bp_last_bar];
2853 
2854 /* Global Locks */
2855 extern kmp_bootstrap_lock_t __kmp_initz_lock; /* control initialization */
2856 extern kmp_bootstrap_lock_t __kmp_forkjoin_lock; /* control fork/join access */
2857 extern kmp_bootstrap_lock_t __kmp_task_team_lock;
2858 extern kmp_bootstrap_lock_t
2859  __kmp_exit_lock; /* exit() is not always thread-safe */
2860 #if KMP_USE_MONITOR
2861 extern kmp_bootstrap_lock_t
2862  __kmp_monitor_lock; /* control monitor thread creation */
2863 #endif
2864 extern kmp_bootstrap_lock_t
2865  __kmp_tp_cached_lock; /* used for the hack to allow threadprivate cache and
2866  __kmp_threads expansion to co-exist */
2867 
2868 extern kmp_lock_t __kmp_global_lock; /* control OS/global access */
2869 extern kmp_queuing_lock_t __kmp_dispatch_lock; /* control dispatch access */
2870 extern kmp_lock_t __kmp_debug_lock; /* control I/O access for KMP_DEBUG */
2871 
2872 /* used for yielding spin-waits */
2873 extern unsigned int __kmp_init_wait; /* initial number of spin-tests */
2874 extern unsigned int __kmp_next_wait; /* susequent number of spin-tests */
2875 
2876 extern enum library_type __kmp_library;
2877 
2878 extern enum sched_type __kmp_sched; /* default runtime scheduling */
2879 extern enum sched_type __kmp_static; /* default static scheduling method */
2880 extern enum sched_type __kmp_guided; /* default guided scheduling method */
2881 extern enum sched_type __kmp_auto; /* default auto scheduling method */
2882 extern int __kmp_chunk; /* default runtime chunk size */
2883 
2884 extern size_t __kmp_stksize; /* stack size per thread */
2885 #if KMP_USE_MONITOR
2886 extern size_t __kmp_monitor_stksize; /* stack size for monitor thread */
2887 #endif
2888 extern size_t __kmp_stkoffset; /* stack offset per thread */
2889 extern int __kmp_stkpadding; /* Should we pad root thread(s) stack */
2890 
2891 extern size_t
2892  __kmp_malloc_pool_incr; /* incremental size of pool for kmp_malloc() */
2893 extern int __kmp_env_stksize; /* was KMP_STACKSIZE specified? */
2894 extern int __kmp_env_blocktime; /* was KMP_BLOCKTIME specified? */
2895 extern int __kmp_env_checks; /* was KMP_CHECKS specified? */
2896 extern int __kmp_env_consistency_check; // was KMP_CONSISTENCY_CHECK specified?
2897 extern int __kmp_generate_warnings; /* should we issue warnings? */
2898 extern int __kmp_reserve_warn; /* have we issued reserve_threads warning? */
2899 
2900 #ifdef DEBUG_SUSPEND
2901 extern int __kmp_suspend_count; /* count inside __kmp_suspend_template() */
2902 #endif
2903 
2904 extern kmp_uint32 __kmp_yield_init;
2905 extern kmp_uint32 __kmp_yield_next;
2906 
2907 #if KMP_USE_MONITOR
2908 extern kmp_uint32 __kmp_yielding_on;
2909 #endif
2910 extern kmp_uint32 __kmp_yield_cycle;
2911 extern kmp_int32 __kmp_yield_on_count;
2912 extern kmp_int32 __kmp_yield_off_count;
2913 
2914 /* ------------------------------------------------------------------------- */
2915 extern int __kmp_allThreadsSpecified;
2916 
2917 extern size_t __kmp_align_alloc;
2918 /* following data protected by initialization routines */
2919 extern int __kmp_xproc; /* number of processors in the system */
2920 extern int __kmp_avail_proc; /* number of processors available to the process */
2921 extern size_t __kmp_sys_min_stksize; /* system-defined minimum stack size */
2922 extern int __kmp_sys_max_nth; /* system-imposed maximum number of threads */
2923 // maximum total number of concurrently-existing threads on device
2924 extern int __kmp_max_nth;
2925 // maximum total number of concurrently-existing threads in a contention group
2926 extern int __kmp_cg_max_nth;
2927 extern int __kmp_teams_max_nth; // max threads used in a teams construct
2928 extern int __kmp_threads_capacity; /* capacity of the arrays __kmp_threads and
2929  __kmp_root */
2930 extern int __kmp_dflt_team_nth; /* default number of threads in a parallel
2931  region a la OMP_NUM_THREADS */
2932 extern int __kmp_dflt_team_nth_ub; /* upper bound on "" determined at serial
2933  initialization */
2934 extern int __kmp_tp_capacity; /* capacity of __kmp_threads if threadprivate is
2935  used (fixed) */
2936 extern int __kmp_tp_cached; /* whether threadprivate cache has been created
2937  (__kmpc_threadprivate_cached()) */
2938 extern int __kmp_dflt_nested; /* nested parallelism enabled by default a la
2939  OMP_NESTED */
2940 extern int __kmp_dflt_blocktime; /* number of milliseconds to wait before
2941  blocking (env setting) */
2942 #if KMP_USE_MONITOR
2943 extern int
2944  __kmp_monitor_wakeups; /* number of times monitor wakes up per second */
2945 extern int __kmp_bt_intervals; /* number of monitor timestamp intervals before
2946  blocking */
2947 #endif
2948 #ifdef KMP_ADJUST_BLOCKTIME
2949 extern int __kmp_zero_bt; /* whether blocktime has been forced to zero */
2950 #endif /* KMP_ADJUST_BLOCKTIME */
2951 #ifdef KMP_DFLT_NTH_CORES
2952 extern int __kmp_ncores; /* Total number of cores for threads placement */
2953 #endif
2954 /* Number of millisecs to delay on abort for Intel(R) VTune(TM) tools */
2955 extern int __kmp_abort_delay;
2956 
2957 extern int __kmp_need_register_atfork_specified;
2958 extern int
2959  __kmp_need_register_atfork; /* At initialization, call pthread_atfork to
2960  install fork handler */
2961 extern int __kmp_gtid_mode; /* Method of getting gtid, values:
2962  0 - not set, will be set at runtime
2963  1 - using stack search
2964  2 - dynamic TLS (pthread_getspecific(Linux* OS/OS
2965  X*) or TlsGetValue(Windows* OS))
2966  3 - static TLS (__declspec(thread) __kmp_gtid),
2967  Linux* OS .so only. */
2968 extern int
2969  __kmp_adjust_gtid_mode; /* If true, adjust method based on #threads */
2970 #ifdef KMP_TDATA_GTID
2971 extern KMP_THREAD_LOCAL int __kmp_gtid;
2972 #endif
2973 extern int __kmp_tls_gtid_min; /* #threads below which use sp search for gtid */
2974 extern int __kmp_foreign_tp; // If true, separate TP var for each foreign thread
2975 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
2976 extern int __kmp_inherit_fp_control; // copy fp creg(s) parent->workers at fork
2977 extern kmp_int16 __kmp_init_x87_fpu_control_word; // init thread's FP ctrl reg
2978 extern kmp_uint32 __kmp_init_mxcsr; /* init thread's mxscr */
2979 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
2980 
2981 extern int __kmp_dflt_max_active_levels; /* max_active_levels for nested
2982  parallelism enabled by default via
2983  OMP_MAX_ACTIVE_LEVELS */
2984 extern int __kmp_dispatch_num_buffers; /* max possible dynamic loops in
2985  concurrent execution per team */
2986 #if KMP_NESTED_HOT_TEAMS
2987 extern int __kmp_hot_teams_mode;
2988 extern int __kmp_hot_teams_max_level;
2989 #endif
2990 
2991 #if KMP_OS_LINUX
2992 extern enum clock_function_type __kmp_clock_function;
2993 extern int __kmp_clock_function_param;
2994 #endif /* KMP_OS_LINUX */
2995 
2996 #if KMP_MIC_SUPPORTED
2997 extern enum mic_type __kmp_mic_type;
2998 #endif
2999 
3000 #ifdef USE_LOAD_BALANCE
3001 extern double __kmp_load_balance_interval; // load balance algorithm interval
3002 #endif /* USE_LOAD_BALANCE */
3003 
3004 // OpenMP 3.1 - Nested num threads array
3005 typedef struct kmp_nested_nthreads_t {
3006  int *nth;
3007  int size;
3008  int used;
3009 } kmp_nested_nthreads_t;
3010 
3011 extern kmp_nested_nthreads_t __kmp_nested_nth;
3012 
3013 #if KMP_USE_ADAPTIVE_LOCKS
3014 
3015 // Parameters for the speculative lock backoff system.
3016 struct kmp_adaptive_backoff_params_t {
3017  // Number of soft retries before it counts as a hard retry.
3018  kmp_uint32 max_soft_retries;
3019  // Badness is a bit mask : 0,1,3,7,15,... on each hard failure we move one to
3020  // the right
3021  kmp_uint32 max_badness;
3022 };
3023 
3024 extern kmp_adaptive_backoff_params_t __kmp_adaptive_backoff_params;
3025 
3026 #if KMP_DEBUG_ADAPTIVE_LOCKS
3027 extern const char *__kmp_speculative_statsfile;
3028 #endif
3029 
3030 #endif // KMP_USE_ADAPTIVE_LOCKS
3031 
3032 #if OMP_40_ENABLED
3033 extern int __kmp_display_env; /* TRUE or FALSE */
3034 extern int __kmp_display_env_verbose; /* TRUE if OMP_DISPLAY_ENV=VERBOSE */
3035 extern int __kmp_omp_cancellation; /* TRUE or FALSE */
3036 #endif
3037 
3038 /* ------------------------------------------------------------------------- */
3039 
3040 /* the following are protected by the fork/join lock */
3041 /* write: lock read: anytime */
3042 extern kmp_info_t **__kmp_threads; /* Descriptors for the threads */
3043 /* read/write: lock */
3044 extern volatile kmp_team_t *__kmp_team_pool;
3045 extern volatile kmp_info_t *__kmp_thread_pool;
3046 extern kmp_info_t *__kmp_thread_pool_insert_pt;
3047 
3048 // total num threads reachable from some root thread including all root threads
3049 extern volatile int __kmp_nth;
3050 /* total number of threads reachable from some root thread including all root
3051  threads, and those in the thread pool */
3052 extern volatile int __kmp_all_nth;
3053 extern int __kmp_thread_pool_nth;
3054 extern std::atomic<int> __kmp_thread_pool_active_nth;
3055 
3056 extern kmp_root_t **__kmp_root; /* root of thread hierarchy */
3057 /* end data protected by fork/join lock */
3058 /* ------------------------------------------------------------------------- */
3059 
3060 #define __kmp_get_gtid() __kmp_get_global_thread_id()
3061 #define __kmp_entry_gtid() __kmp_get_global_thread_id_reg()
3062 #define __kmp_get_tid() (__kmp_tid_from_gtid(__kmp_get_gtid()))
3063 #define __kmp_get_team() (__kmp_threads[(__kmp_get_gtid())]->th.th_team)
3064 #define __kmp_get_thread() (__kmp_thread_from_gtid(__kmp_get_gtid()))
3065 
3066 // AT: Which way is correct?
3067 // AT: 1. nproc = __kmp_threads[ ( gtid ) ] -> th.th_team -> t.t_nproc;
3068 // AT: 2. nproc = __kmp_threads[ ( gtid ) ] -> th.th_team_nproc;
3069 #define __kmp_get_team_num_threads(gtid) \
3070  (__kmp_threads[(gtid)]->th.th_team->t.t_nproc)
3071 
3072 static inline bool KMP_UBER_GTID(int gtid) {
3073  KMP_DEBUG_ASSERT(gtid >= KMP_GTID_MIN);
3074  KMP_DEBUG_ASSERT(gtid < __kmp_threads_capacity);
3075  return (gtid >= 0 && __kmp_root[gtid] && __kmp_threads[gtid] &&
3076  __kmp_threads[gtid] == __kmp_root[gtid]->r.r_uber_thread);
3077 }
3078 
3079 static inline int __kmp_tid_from_gtid(int gtid) {
3080  KMP_DEBUG_ASSERT(gtid >= 0);
3081  return __kmp_threads[gtid]->th.th_info.ds.ds_tid;
3082 }
3083 
3084 static inline int __kmp_gtid_from_tid(int tid, const kmp_team_t *team) {
3085  KMP_DEBUG_ASSERT(tid >= 0 && team);
3086  return team->t.t_threads[tid]->th.th_info.ds.ds_gtid;
3087 }
3088 
3089 static inline int __kmp_gtid_from_thread(const kmp_info_t *thr) {
3090  KMP_DEBUG_ASSERT(thr);
3091  return thr->th.th_info.ds.ds_gtid;
3092 }
3093 
3094 static inline kmp_info_t *__kmp_thread_from_gtid(int gtid) {
3095  KMP_DEBUG_ASSERT(gtid >= 0);
3096  return __kmp_threads[gtid];
3097 }
3098 
3099 static inline kmp_team_t *__kmp_team_from_gtid(int gtid) {
3100  KMP_DEBUG_ASSERT(gtid >= 0);
3101  return __kmp_threads[gtid]->th.th_team;
3102 }
3103 
3104 /* ------------------------------------------------------------------------- */
3105 
3106 extern kmp_global_t __kmp_global; /* global status */
3107 
3108 extern kmp_info_t __kmp_monitor;
3109 // For Debugging Support Library
3110 extern std::atomic<kmp_uint32> __kmp_team_counter;
3111 // For Debugging Support Library
3112 extern std::atomic<kmp_uint32> __kmp_task_counter;
3113 
3114 #if USE_DEBUGGER
3115 #define _KMP_GEN_ID(counter) \
3116  (__kmp_debugging ? KMP_ATOMIC_INC(&counter) + 1 : ~0)
3117 #else
3118 #define _KMP_GEN_ID(counter) (~0)
3119 #endif /* USE_DEBUGGER */
3120 
3121 #define KMP_GEN_TASK_ID() _KMP_GEN_ID(__kmp_task_counter)
3122 #define KMP_GEN_TEAM_ID() _KMP_GEN_ID(__kmp_team_counter)
3123 
3124 /* ------------------------------------------------------------------------ */
3125 
3126 extern void __kmp_print_storage_map_gtid(int gtid, void *p1, void *p2,
3127  size_t size, char const *format, ...);
3128 
3129 extern void __kmp_serial_initialize(void);
3130 extern void __kmp_middle_initialize(void);
3131 extern void __kmp_parallel_initialize(void);
3132 
3133 extern void __kmp_internal_begin(void);
3134 extern void __kmp_internal_end_library(int gtid);
3135 extern void __kmp_internal_end_thread(int gtid);
3136 extern void __kmp_internal_end_atexit(void);
3137 extern void __kmp_internal_end_fini(void);
3138 extern void __kmp_internal_end_dtor(void);
3139 extern void __kmp_internal_end_dest(void *);
3140 
3141 extern int __kmp_register_root(int initial_thread);
3142 extern void __kmp_unregister_root(int gtid);
3143 
3144 extern int __kmp_ignore_mppbeg(void);
3145 extern int __kmp_ignore_mppend(void);
3146 
3147 extern int __kmp_enter_single(int gtid, ident_t *id_ref, int push_ws);
3148 extern void __kmp_exit_single(int gtid);
3149 
3150 extern void __kmp_parallel_deo(int *gtid_ref, int *cid_ref, ident_t *loc_ref);
3151 extern void __kmp_parallel_dxo(int *gtid_ref, int *cid_ref, ident_t *loc_ref);
3152 
3153 #ifdef USE_LOAD_BALANCE
3154 extern int __kmp_get_load_balance(int);
3155 #endif
3156 
3157 extern int __kmp_get_global_thread_id(void);
3158 extern int __kmp_get_global_thread_id_reg(void);
3159 extern void __kmp_exit_thread(int exit_status);
3160 extern void __kmp_abort(char const *format, ...);
3161 extern void __kmp_abort_thread(void);
3162 KMP_NORETURN extern void __kmp_abort_process(void);
3163 extern void __kmp_warn(char const *format, ...);
3164 
3165 extern void __kmp_set_num_threads(int new_nth, int gtid);
3166 
3167 // Returns current thread (pointer to kmp_info_t). Current thread *must* be
3168 // registered.
3169 static inline kmp_info_t *__kmp_entry_thread() {
3170  int gtid = __kmp_entry_gtid();
3171 
3172  return __kmp_threads[gtid];
3173 }
3174 
3175 extern void __kmp_set_max_active_levels(int gtid, int new_max_active_levels);
3176 extern int __kmp_get_max_active_levels(int gtid);
3177 extern int __kmp_get_ancestor_thread_num(int gtid, int level);
3178 extern int __kmp_get_team_size(int gtid, int level);
3179 extern void __kmp_set_schedule(int gtid, kmp_sched_t new_sched, int chunk);
3180 extern void __kmp_get_schedule(int gtid, kmp_sched_t *sched, int *chunk);
3181 
3182 extern unsigned short __kmp_get_random(kmp_info_t *thread);
3183 extern void __kmp_init_random(kmp_info_t *thread);
3184 
3185 extern kmp_r_sched_t __kmp_get_schedule_global(void);
3186 extern void __kmp_adjust_num_threads(int new_nproc);
3187 
3188 extern void *___kmp_allocate(size_t size KMP_SRC_LOC_DECL);
3189 extern void *___kmp_page_allocate(size_t size KMP_SRC_LOC_DECL);
3190 extern void ___kmp_free(void *ptr KMP_SRC_LOC_DECL);
3191 #define __kmp_allocate(size) ___kmp_allocate((size)KMP_SRC_LOC_CURR)
3192 #define __kmp_page_allocate(size) ___kmp_page_allocate((size)KMP_SRC_LOC_CURR)
3193 #define __kmp_free(ptr) ___kmp_free((ptr)KMP_SRC_LOC_CURR)
3194 
3195 #if USE_FAST_MEMORY
3196 extern void *___kmp_fast_allocate(kmp_info_t *this_thr,
3197  size_t size KMP_SRC_LOC_DECL);
3198 extern void ___kmp_fast_free(kmp_info_t *this_thr, void *ptr KMP_SRC_LOC_DECL);
3199 extern void __kmp_free_fast_memory(kmp_info_t *this_thr);
3200 extern void __kmp_initialize_fast_memory(kmp_info_t *this_thr);
3201 #define __kmp_fast_allocate(this_thr, size) \
3202  ___kmp_fast_allocate((this_thr), (size)KMP_SRC_LOC_CURR)
3203 #define __kmp_fast_free(this_thr, ptr) \
3204  ___kmp_fast_free((this_thr), (ptr)KMP_SRC_LOC_CURR)
3205 #endif
3206 
3207 extern void *___kmp_thread_malloc(kmp_info_t *th, size_t size KMP_SRC_LOC_DECL);
3208 extern void *___kmp_thread_calloc(kmp_info_t *th, size_t nelem,
3209  size_t elsize KMP_SRC_LOC_DECL);
3210 extern void *___kmp_thread_realloc(kmp_info_t *th, void *ptr,
3211  size_t size KMP_SRC_LOC_DECL);
3212 extern void ___kmp_thread_free(kmp_info_t *th, void *ptr KMP_SRC_LOC_DECL);
3213 #define __kmp_thread_malloc(th, size) \
3214  ___kmp_thread_malloc((th), (size)KMP_SRC_LOC_CURR)
3215 #define __kmp_thread_calloc(th, nelem, elsize) \
3216  ___kmp_thread_calloc((th), (nelem), (elsize)KMP_SRC_LOC_CURR)
3217 #define __kmp_thread_realloc(th, ptr, size) \
3218  ___kmp_thread_realloc((th), (ptr), (size)KMP_SRC_LOC_CURR)
3219 #define __kmp_thread_free(th, ptr) \
3220  ___kmp_thread_free((th), (ptr)KMP_SRC_LOC_CURR)
3221 
3222 #define KMP_INTERNAL_MALLOC(sz) malloc(sz)
3223 #define KMP_INTERNAL_FREE(p) free(p)
3224 #define KMP_INTERNAL_REALLOC(p, sz) realloc((p), (sz))
3225 #define KMP_INTERNAL_CALLOC(n, sz) calloc((n), (sz))
3226 
3227 extern void __kmp_push_num_threads(ident_t *loc, int gtid, int num_threads);
3228 
3229 #if OMP_40_ENABLED
3230 extern void __kmp_push_proc_bind(ident_t *loc, int gtid,
3231  kmp_proc_bind_t proc_bind);
3232 extern void __kmp_push_num_teams(ident_t *loc, int gtid, int num_teams,
3233  int num_threads);
3234 #endif
3235 
3236 extern void __kmp_yield(int cond);
3237 
3238 extern void __kmpc_dispatch_init_4(ident_t *loc, kmp_int32 gtid,
3239  enum sched_type schedule, kmp_int32 lb,
3240  kmp_int32 ub, kmp_int32 st, kmp_int32 chunk);
3241 extern void __kmpc_dispatch_init_4u(ident_t *loc, kmp_int32 gtid,
3242  enum sched_type schedule, kmp_uint32 lb,
3243  kmp_uint32 ub, kmp_int32 st,
3244  kmp_int32 chunk);
3245 extern void __kmpc_dispatch_init_8(ident_t *loc, kmp_int32 gtid,
3246  enum sched_type schedule, kmp_int64 lb,
3247  kmp_int64 ub, kmp_int64 st, kmp_int64 chunk);
3248 extern void __kmpc_dispatch_init_8u(ident_t *loc, kmp_int32 gtid,
3249  enum sched_type schedule, kmp_uint64 lb,
3250  kmp_uint64 ub, kmp_int64 st,
3251  kmp_int64 chunk);
3252 
3253 extern int __kmpc_dispatch_next_4(ident_t *loc, kmp_int32 gtid,
3254  kmp_int32 *p_last, kmp_int32 *p_lb,
3255  kmp_int32 *p_ub, kmp_int32 *p_st);
3256 extern int __kmpc_dispatch_next_4u(ident_t *loc, kmp_int32 gtid,
3257  kmp_int32 *p_last, kmp_uint32 *p_lb,
3258  kmp_uint32 *p_ub, kmp_int32 *p_st);
3259 extern int __kmpc_dispatch_next_8(ident_t *loc, kmp_int32 gtid,
3260  kmp_int32 *p_last, kmp_int64 *p_lb,
3261  kmp_int64 *p_ub, kmp_int64 *p_st);
3262 extern int __kmpc_dispatch_next_8u(ident_t *loc, kmp_int32 gtid,
3263  kmp_int32 *p_last, kmp_uint64 *p_lb,
3264  kmp_uint64 *p_ub, kmp_int64 *p_st);
3265 
3266 extern void __kmpc_dispatch_fini_4(ident_t *loc, kmp_int32 gtid);
3267 extern void __kmpc_dispatch_fini_8(ident_t *loc, kmp_int32 gtid);
3268 extern void __kmpc_dispatch_fini_4u(ident_t *loc, kmp_int32 gtid);
3269 extern void __kmpc_dispatch_fini_8u(ident_t *loc, kmp_int32 gtid);
3270 
3271 #ifdef KMP_GOMP_COMPAT
3272 
3273 extern void __kmp_aux_dispatch_init_4(ident_t *loc, kmp_int32 gtid,
3274  enum sched_type schedule, kmp_int32 lb,
3275  kmp_int32 ub, kmp_int32 st,
3276  kmp_int32 chunk, int push_ws);
3277 extern void __kmp_aux_dispatch_init_4u(ident_t *loc, kmp_int32 gtid,
3278  enum sched_type schedule, kmp_uint32 lb,
3279  kmp_uint32 ub, kmp_int32 st,
3280  kmp_int32 chunk, int push_ws);
3281 extern void __kmp_aux_dispatch_init_8(ident_t *loc, kmp_int32 gtid,
3282  enum sched_type schedule, kmp_int64 lb,
3283  kmp_int64 ub, kmp_int64 st,
3284  kmp_int64 chunk, int push_ws);
3285 extern void __kmp_aux_dispatch_init_8u(ident_t *loc, kmp_int32 gtid,
3286  enum sched_type schedule, kmp_uint64 lb,
3287  kmp_uint64 ub, kmp_int64 st,
3288  kmp_int64 chunk, int push_ws);
3289 extern void __kmp_aux_dispatch_fini_chunk_4(ident_t *loc, kmp_int32 gtid);
3290 extern void __kmp_aux_dispatch_fini_chunk_8(ident_t *loc, kmp_int32 gtid);
3291 extern void __kmp_aux_dispatch_fini_chunk_4u(ident_t *loc, kmp_int32 gtid);
3292 extern void __kmp_aux_dispatch_fini_chunk_8u(ident_t *loc, kmp_int32 gtid);
3293 
3294 #endif /* KMP_GOMP_COMPAT */
3295 
3296 extern kmp_uint32 __kmp_eq_4(kmp_uint32 value, kmp_uint32 checker);
3297 extern kmp_uint32 __kmp_neq_4(kmp_uint32 value, kmp_uint32 checker);
3298 extern kmp_uint32 __kmp_lt_4(kmp_uint32 value, kmp_uint32 checker);
3299 extern kmp_uint32 __kmp_ge_4(kmp_uint32 value, kmp_uint32 checker);
3300 extern kmp_uint32 __kmp_le_4(kmp_uint32 value, kmp_uint32 checker);
3301 extern kmp_uint32 __kmp_wait_yield_4(kmp_uint32 volatile *spinner,
3302  kmp_uint32 checker,
3303  kmp_uint32 (*pred)(kmp_uint32, kmp_uint32),
3304  void *obj);
3305 extern void __kmp_wait_yield_4_ptr(void *spinner, kmp_uint32 checker,
3306  kmp_uint32 (*pred)(void *, kmp_uint32),
3307  void *obj);
3308 
3309 class kmp_flag_32;
3310 class kmp_flag_64;
3311 class kmp_flag_oncore;
3312 extern void __kmp_wait_64(kmp_info_t *this_thr, kmp_flag_64 *flag,
3313  int final_spin
3314 #if USE_ITT_BUILD
3315  ,
3316  void *itt_sync_obj
3317 #endif
3318  );
3319 extern void __kmp_release_64(kmp_flag_64 *flag);
3320 
3321 extern void __kmp_infinite_loop(void);
3322 
3323 extern void __kmp_cleanup(void);
3324 
3325 #if KMP_HANDLE_SIGNALS
3326 extern int __kmp_handle_signals;
3327 extern void __kmp_install_signals(int parallel_init);
3328 extern void __kmp_remove_signals(void);
3329 #endif
3330 
3331 extern void __kmp_clear_system_time(void);
3332 extern void __kmp_read_system_time(double *delta);
3333 
3334 extern void __kmp_check_stack_overlap(kmp_info_t *thr);
3335 
3336 extern void __kmp_expand_host_name(char *buffer, size_t size);
3337 extern void __kmp_expand_file_name(char *result, size_t rlen, char *pattern);
3338 
3339 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
3340 extern void
3341 __kmp_initialize_system_tick(void); /* Initialize timer tick value */
3342 #endif
3343 
3344 extern void
3345 __kmp_runtime_initialize(void); /* machine specific initialization */
3346 extern void __kmp_runtime_destroy(void);
3347 
3348 #if KMP_AFFINITY_SUPPORTED
3349 extern char *__kmp_affinity_print_mask(char *buf, int buf_len,
3350  kmp_affin_mask_t *mask);
3351 extern void __kmp_affinity_initialize(void);
3352 extern void __kmp_affinity_uninitialize(void);
3353 extern void __kmp_affinity_set_init_mask(
3354  int gtid, int isa_root); /* set affinity according to KMP_AFFINITY */
3355 #if OMP_40_ENABLED
3356 extern void __kmp_affinity_set_place(int gtid);
3357 #endif
3358 extern void __kmp_affinity_determine_capable(const char *env_var);
3359 extern int __kmp_aux_set_affinity(void **mask);
3360 extern int __kmp_aux_get_affinity(void **mask);
3361 extern int __kmp_aux_get_affinity_max_proc();
3362 extern int __kmp_aux_set_affinity_mask_proc(int proc, void **mask);
3363 extern int __kmp_aux_unset_affinity_mask_proc(int proc, void **mask);
3364 extern int __kmp_aux_get_affinity_mask_proc(int proc, void **mask);
3365 extern void __kmp_balanced_affinity(int tid, int team_size);
3366 #if KMP_OS_LINUX
3367 extern int kmp_set_thread_affinity_mask_initial(void);
3368 #endif
3369 #endif /* KMP_AFFINITY_SUPPORTED */
3370 
3371 extern void __kmp_cleanup_hierarchy();
3372 extern void __kmp_get_hierarchy(kmp_uint32 nproc, kmp_bstate_t *thr_bar);
3373 
3374 #if KMP_USE_FUTEX
3375 
3376 extern int __kmp_futex_determine_capable(void);
3377 
3378 #endif // KMP_USE_FUTEX
3379 
3380 extern void __kmp_gtid_set_specific(int gtid);
3381 extern int __kmp_gtid_get_specific(void);
3382 
3383 extern double __kmp_read_cpu_time(void);
3384 
3385 extern int __kmp_read_system_info(struct kmp_sys_info *info);
3386 
3387 #if KMP_USE_MONITOR
3388 extern void __kmp_create_monitor(kmp_info_t *th);
3389 #endif
3390 
3391 extern void *__kmp_launch_thread(kmp_info_t *thr);
3392 
3393 extern void __kmp_create_worker(int gtid, kmp_info_t *th, size_t stack_size);
3394 
3395 #if KMP_OS_WINDOWS
3396 extern int __kmp_still_running(kmp_info_t *th);
3397 extern int __kmp_is_thread_alive(kmp_info_t *th, DWORD *exit_val);
3398 extern void __kmp_free_handle(kmp_thread_t tHandle);
3399 #endif
3400 
3401 #if KMP_USE_MONITOR
3402 extern void __kmp_reap_monitor(kmp_info_t *th);
3403 #endif
3404 extern void __kmp_reap_worker(kmp_info_t *th);
3405 extern void __kmp_terminate_thread(int gtid);
3406 
3407 extern void __kmp_suspend_32(int th_gtid, kmp_flag_32 *flag);
3408 extern void __kmp_suspend_64(int th_gtid, kmp_flag_64 *flag);
3409 extern void __kmp_suspend_oncore(int th_gtid, kmp_flag_oncore *flag);
3410 extern void __kmp_resume_32(int target_gtid, kmp_flag_32 *flag);
3411 extern void __kmp_resume_64(int target_gtid, kmp_flag_64 *flag);
3412 extern void __kmp_resume_oncore(int target_gtid, kmp_flag_oncore *flag);
3413 
3414 extern void __kmp_elapsed(double *);
3415 extern void __kmp_elapsed_tick(double *);
3416 
3417 extern void __kmp_enable(int old_state);
3418 extern void __kmp_disable(int *old_state);
3419 
3420 extern void __kmp_thread_sleep(int millis);
3421 
3422 extern void __kmp_common_initialize(void);
3423 extern void __kmp_common_destroy(void);
3424 extern void __kmp_common_destroy_gtid(int gtid);
3425 
3426 #if KMP_OS_UNIX
3427 extern void __kmp_register_atfork(void);
3428 #endif
3429 extern void __kmp_suspend_initialize(void);
3430 extern void __kmp_suspend_uninitialize_thread(kmp_info_t *th);
3431 
3432 extern kmp_info_t *__kmp_allocate_thread(kmp_root_t *root, kmp_team_t *team,
3433  int tid);
3434 #if OMP_40_ENABLED
3435 extern kmp_team_t *
3436 __kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc,
3437 #if OMPT_SUPPORT
3438  ompt_data_t ompt_parallel_data,
3439 #endif
3440  kmp_proc_bind_t proc_bind, kmp_internal_control_t *new_icvs,
3441  int argc USE_NESTED_HOT_ARG(kmp_info_t *thr));
3442 #else
3443 extern kmp_team_t *
3444 __kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc,
3445 #if OMPT_SUPPORT
3446  ompt_id_t ompt_parallel_id,
3447 #endif
3448  kmp_internal_control_t *new_icvs,
3449  int argc USE_NESTED_HOT_ARG(kmp_info_t *thr));
3450 #endif // OMP_40_ENABLED
3451 extern void __kmp_free_thread(kmp_info_t *);
3452 extern void __kmp_free_team(kmp_root_t *,
3453  kmp_team_t *USE_NESTED_HOT_ARG(kmp_info_t *));
3454 extern kmp_team_t *__kmp_reap_team(kmp_team_t *);
3455 
3456 /* ------------------------------------------------------------------------ */
3457 
3458 extern void __kmp_initialize_bget(kmp_info_t *th);
3459 extern void __kmp_finalize_bget(kmp_info_t *th);
3460 
3461 KMP_EXPORT void *kmpc_malloc(size_t size);
3462 KMP_EXPORT void *kmpc_aligned_malloc(size_t size, size_t alignment);
3463 KMP_EXPORT void *kmpc_calloc(size_t nelem, size_t elsize);
3464 KMP_EXPORT void *kmpc_realloc(void *ptr, size_t size);
3465 KMP_EXPORT void kmpc_free(void *ptr);
3466 
3467 /* declarations for internal use */
3468 
3469 extern int __kmp_barrier(enum barrier_type bt, int gtid, int is_split,
3470  size_t reduce_size, void *reduce_data,
3471  void (*reduce)(void *, void *));
3472 extern void __kmp_end_split_barrier(enum barrier_type bt, int gtid);
3473 
3478 enum fork_context_e {
3479  fork_context_gnu,
3481  fork_context_intel,
3482  fork_context_last
3483 };
3484 extern int __kmp_fork_call(ident_t *loc, int gtid,
3485  enum fork_context_e fork_context, kmp_int32 argc,
3486  microtask_t microtask, launch_t invoker,
3487 /* TODO: revert workaround for Intel(R) 64 tracker #96 */
3488 #if (KMP_ARCH_ARM || KMP_ARCH_X86_64 || KMP_ARCH_AARCH64) && KMP_OS_LINUX
3489  va_list *ap
3490 #else
3491  va_list ap
3492 #endif
3493  );
3494 
3495 extern void __kmp_join_call(ident_t *loc, int gtid
3496 #if OMPT_SUPPORT
3497  ,
3498  enum fork_context_e fork_context
3499 #endif
3500 #if OMP_40_ENABLED
3501  ,
3502  int exit_teams = 0
3503 #endif
3504  );
3505 
3506 extern void __kmp_serialized_parallel(ident_t *id, kmp_int32 gtid);
3507 extern void __kmp_internal_fork(ident_t *id, int gtid, kmp_team_t *team);
3508 extern void __kmp_internal_join(ident_t *id, int gtid, kmp_team_t *team);
3509 extern int __kmp_invoke_task_func(int gtid);
3510 extern void __kmp_run_before_invoked_task(int gtid, int tid,
3511  kmp_info_t *this_thr,
3512  kmp_team_t *team);
3513 extern void __kmp_run_after_invoked_task(int gtid, int tid,
3514  kmp_info_t *this_thr,
3515  kmp_team_t *team);
3516 
3517 // should never have been exported
3518 KMP_EXPORT int __kmpc_invoke_task_func(int gtid);
3519 #if OMP_40_ENABLED
3520 extern int __kmp_invoke_teams_master(int gtid);
3521 extern void __kmp_teams_master(int gtid);
3522 #endif
3523 extern void __kmp_save_internal_controls(kmp_info_t *thread);
3524 extern void __kmp_user_set_library(enum library_type arg);
3525 extern void __kmp_aux_set_library(enum library_type arg);
3526 extern void __kmp_aux_set_stacksize(size_t arg);
3527 extern void __kmp_aux_set_blocktime(int arg, kmp_info_t *thread, int tid);
3528 extern void __kmp_aux_set_defaults(char const *str, int len);
3529 
3530 /* Functions called from __kmp_aux_env_initialize() in kmp_settings.cpp */
3531 void kmpc_set_blocktime(int arg);
3532 void ompc_set_nested(int flag);
3533 void ompc_set_dynamic(int flag);
3534 void ompc_set_num_threads(int arg);
3535 
3536 extern void __kmp_push_current_task_to_thread(kmp_info_t *this_thr,
3537  kmp_team_t *team, int tid);
3538 extern void __kmp_pop_current_task_from_thread(kmp_info_t *this_thr);
3539 extern kmp_task_t *__kmp_task_alloc(ident_t *loc_ref, kmp_int32 gtid,
3540  kmp_tasking_flags_t *flags,
3541  size_t sizeof_kmp_task_t,
3542  size_t sizeof_shareds,
3543  kmp_routine_entry_t task_entry);
3544 extern void __kmp_init_implicit_task(ident_t *loc_ref, kmp_info_t *this_thr,
3545  kmp_team_t *team, int tid,
3546  int set_curr_task);
3547 extern void __kmp_finish_implicit_task(kmp_info_t *this_thr);
3548 extern void __kmp_free_implicit_task(kmp_info_t *this_thr);
3549 int __kmp_execute_tasks_32(kmp_info_t *thread, kmp_int32 gtid,
3550  kmp_flag_32 *flag, int final_spin,
3551  int *thread_finished,
3552 #if USE_ITT_BUILD
3553  void *itt_sync_obj,
3554 #endif /* USE_ITT_BUILD */
3555  kmp_int32 is_constrained);
3556 int __kmp_execute_tasks_64(kmp_info_t *thread, kmp_int32 gtid,
3557  kmp_flag_64 *flag, int final_spin,
3558  int *thread_finished,
3559 #if USE_ITT_BUILD
3560  void *itt_sync_obj,
3561 #endif /* USE_ITT_BUILD */
3562  kmp_int32 is_constrained);
3563 int __kmp_execute_tasks_oncore(kmp_info_t *thread, kmp_int32 gtid,
3564  kmp_flag_oncore *flag, int final_spin,
3565  int *thread_finished,
3566 #if USE_ITT_BUILD
3567  void *itt_sync_obj,
3568 #endif /* USE_ITT_BUILD */
3569  kmp_int32 is_constrained);
3570 
3571 extern void __kmp_free_task_team(kmp_info_t *thread,
3572  kmp_task_team_t *task_team);
3573 extern void __kmp_reap_task_teams(void);
3574 extern void __kmp_wait_to_unref_task_teams(void);
3575 extern void __kmp_task_team_setup(kmp_info_t *this_thr, kmp_team_t *team,
3576  int always);
3577 extern void __kmp_task_team_sync(kmp_info_t *this_thr, kmp_team_t *team);
3578 extern void __kmp_task_team_wait(kmp_info_t *this_thr, kmp_team_t *team
3579 #if USE_ITT_BUILD
3580  ,
3581  void *itt_sync_obj
3582 #endif /* USE_ITT_BUILD */
3583  ,
3584  int wait = 1);
3585 extern void __kmp_tasking_barrier(kmp_team_t *team, kmp_info_t *thread,
3586  int gtid);
3587 
3588 extern int __kmp_is_address_mapped(void *addr);
3589 extern kmp_uint64 __kmp_hardware_timestamp(void);
3590 
3591 #if KMP_OS_UNIX
3592 extern int __kmp_read_from_file(char const *path, char const *format, ...);
3593 #endif
3594 
3595 /* ------------------------------------------------------------------------ */
3596 //
3597 // Assembly routines that have no compiler intrinsic replacement
3598 //
3599 
3600 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
3601 
3602 extern void __kmp_query_cpuid(kmp_cpuinfo_t *p);
3603 
3604 #define __kmp_load_mxcsr(p) _mm_setcsr(*(p))
3605 static inline void __kmp_store_mxcsr(kmp_uint32 *p) { *p = _mm_getcsr(); }
3606 
3607 extern void __kmp_load_x87_fpu_control_word(kmp_int16 *p);
3608 extern void __kmp_store_x87_fpu_control_word(kmp_int16 *p);
3609 extern void __kmp_clear_x87_fpu_status_word();
3610 #define KMP_X86_MXCSR_MASK 0xffffffc0 /* ignore status flags (6 lsb) */
3611 
3612 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
3613 
3614 extern int __kmp_invoke_microtask(microtask_t pkfn, int gtid, int npr, int argc,
3615  void *argv[]
3616 #if OMPT_SUPPORT
3617  ,
3618  void **exit_frame_ptr
3619 #endif
3620  );
3621 
3622 /* ------------------------------------------------------------------------ */
3623 
3624 KMP_EXPORT void __kmpc_begin(ident_t *, kmp_int32 flags);
3625 KMP_EXPORT void __kmpc_end(ident_t *);
3626 
3627 KMP_EXPORT void __kmpc_threadprivate_register_vec(ident_t *, void *data,
3628  kmpc_ctor_vec ctor,
3629  kmpc_cctor_vec cctor,
3630  kmpc_dtor_vec dtor,
3631  size_t vector_length);
3632 KMP_EXPORT void __kmpc_threadprivate_register(ident_t *, void *data,
3633  kmpc_ctor ctor, kmpc_cctor cctor,
3634  kmpc_dtor dtor);
3635 KMP_EXPORT void *__kmpc_threadprivate(ident_t *, kmp_int32 global_tid,
3636  void *data, size_t size);
3637 
3638 KMP_EXPORT kmp_int32 __kmpc_global_thread_num(ident_t *);
3639 KMP_EXPORT kmp_int32 __kmpc_global_num_threads(ident_t *);
3640 KMP_EXPORT kmp_int32 __kmpc_bound_thread_num(ident_t *);
3641 KMP_EXPORT kmp_int32 __kmpc_bound_num_threads(ident_t *);
3642 
3643 KMP_EXPORT kmp_int32 __kmpc_ok_to_fork(ident_t *);
3644 KMP_EXPORT void __kmpc_fork_call(ident_t *, kmp_int32 nargs,
3645  kmpc_micro microtask, ...);
3646 
3647 KMP_EXPORT void __kmpc_serialized_parallel(ident_t *, kmp_int32 global_tid);
3648 KMP_EXPORT void __kmpc_end_serialized_parallel(ident_t *, kmp_int32 global_tid);
3649 
3650 KMP_EXPORT void __kmpc_flush(ident_t *);
3651 KMP_EXPORT void __kmpc_barrier(ident_t *, kmp_int32 global_tid);
3652 KMP_EXPORT kmp_int32 __kmpc_master(ident_t *, kmp_int32 global_tid);
3653 KMP_EXPORT void __kmpc_end_master(ident_t *, kmp_int32 global_tid);
3654 KMP_EXPORT void __kmpc_ordered(ident_t *, kmp_int32 global_tid);
3655 KMP_EXPORT void __kmpc_end_ordered(ident_t *, kmp_int32 global_tid);
3656 KMP_EXPORT void __kmpc_critical(ident_t *, kmp_int32 global_tid,
3657  kmp_critical_name *);
3658 KMP_EXPORT void __kmpc_end_critical(ident_t *, kmp_int32 global_tid,
3659  kmp_critical_name *);
3660 
3661 #if OMP_45_ENABLED
3662 KMP_EXPORT void __kmpc_critical_with_hint(ident_t *, kmp_int32 global_tid,
3663  kmp_critical_name *, uint32_t hint);
3664 #endif
3665 
3666 KMP_EXPORT kmp_int32 __kmpc_barrier_master(ident_t *, kmp_int32 global_tid);
3667 KMP_EXPORT void __kmpc_end_barrier_master(ident_t *, kmp_int32 global_tid);
3668 
3669 KMP_EXPORT kmp_int32 __kmpc_barrier_master_nowait(ident_t *,
3670  kmp_int32 global_tid);
3671 
3672 KMP_EXPORT kmp_int32 __kmpc_single(ident_t *, kmp_int32 global_tid);
3673 KMP_EXPORT void __kmpc_end_single(ident_t *, kmp_int32 global_tid);
3674 
3675 KMP_EXPORT void KMPC_FOR_STATIC_INIT(ident_t *loc, kmp_int32 global_tid,
3676  kmp_int32 schedtype, kmp_int32 *plastiter,
3677  kmp_int *plower, kmp_int *pupper,
3678  kmp_int *pstride, kmp_int incr,
3679  kmp_int chunk);
3680 
3681 KMP_EXPORT void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
3682 
3683 KMP_EXPORT void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid,
3684  size_t cpy_size, void *cpy_data,
3685  void (*cpy_func)(void *, void *),
3686  kmp_int32 didit);
3687 
3688 extern void KMPC_SET_NUM_THREADS(int arg);
3689 extern void KMPC_SET_DYNAMIC(int flag);
3690 extern void KMPC_SET_NESTED(int flag);
3691 
3692 /* Taskq interface routines */
3693 KMP_EXPORT kmpc_thunk_t *__kmpc_taskq(ident_t *loc, kmp_int32 global_tid,
3694  kmpc_task_t taskq_task,
3695  size_t sizeof_thunk,
3696  size_t sizeof_shareds, kmp_int32 flags,
3697  kmpc_shared_vars_t **shareds);
3698 KMP_EXPORT void __kmpc_end_taskq(ident_t *loc, kmp_int32 global_tid,
3699  kmpc_thunk_t *thunk);
3700 KMP_EXPORT kmp_int32 __kmpc_task(ident_t *loc, kmp_int32 global_tid,
3701  kmpc_thunk_t *thunk);
3702 KMP_EXPORT void __kmpc_taskq_task(ident_t *loc, kmp_int32 global_tid,
3703  kmpc_thunk_t *thunk, kmp_int32 status);
3704 KMP_EXPORT void __kmpc_end_taskq_task(ident_t *loc, kmp_int32 global_tid,
3705  kmpc_thunk_t *thunk);
3706 KMP_EXPORT kmpc_thunk_t *__kmpc_task_buffer(ident_t *loc, kmp_int32 global_tid,
3707  kmpc_thunk_t *taskq_thunk,
3708  kmpc_task_t task);
3709 
3710 /* OMP 3.0 tasking interface routines */
3711 KMP_EXPORT kmp_int32 __kmpc_omp_task(ident_t *loc_ref, kmp_int32 gtid,
3712  kmp_task_t *new_task);
3713 KMP_EXPORT kmp_task_t *__kmpc_omp_task_alloc(ident_t *loc_ref, kmp_int32 gtid,
3714  kmp_int32 flags,
3715  size_t sizeof_kmp_task_t,
3716  size_t sizeof_shareds,
3717  kmp_routine_entry_t task_entry);
3718 KMP_EXPORT void __kmpc_omp_task_begin_if0(ident_t *loc_ref, kmp_int32 gtid,
3719  kmp_task_t *task);
3720 KMP_EXPORT void __kmpc_omp_task_complete_if0(ident_t *loc_ref, kmp_int32 gtid,
3721  kmp_task_t *task);
3722 KMP_EXPORT kmp_int32 __kmpc_omp_task_parts(ident_t *loc_ref, kmp_int32 gtid,
3723  kmp_task_t *new_task);
3724 KMP_EXPORT kmp_int32 __kmpc_omp_taskwait(ident_t *loc_ref, kmp_int32 gtid);
3725 
3726 KMP_EXPORT kmp_int32 __kmpc_omp_taskyield(ident_t *loc_ref, kmp_int32 gtid,
3727  int end_part);
3728 
3729 #if TASK_UNUSED
3730 void __kmpc_omp_task_begin(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *task);
3731 void __kmpc_omp_task_complete(ident_t *loc_ref, kmp_int32 gtid,
3732  kmp_task_t *task);
3733 #endif // TASK_UNUSED
3734 
3735 /* ------------------------------------------------------------------------ */
3736 
3737 #if OMP_40_ENABLED
3738 
3739 KMP_EXPORT void __kmpc_taskgroup(ident_t *loc, int gtid);
3740 KMP_EXPORT void __kmpc_end_taskgroup(ident_t *loc, int gtid);
3741 
3742 KMP_EXPORT kmp_int32 __kmpc_omp_task_with_deps(
3743  ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 ndeps,
3744  kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias,
3745  kmp_depend_info_t *noalias_dep_list);
3746 KMP_EXPORT void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid,
3747  kmp_int32 ndeps,
3748  kmp_depend_info_t *dep_list,
3749  kmp_int32 ndeps_noalias,
3750  kmp_depend_info_t *noalias_dep_list);
3751 extern void __kmp_release_deps(kmp_int32 gtid, kmp_taskdata_t *task);
3752 extern void __kmp_dephash_free_entries(kmp_info_t *thread, kmp_dephash_t *h);
3753 extern void __kmp_dephash_free(kmp_info_t *thread, kmp_dephash_t *h);
3754 
3755 extern kmp_int32 __kmp_omp_task(kmp_int32 gtid, kmp_task_t *new_task,
3756  bool serialize_immediate);
3757 
3758 KMP_EXPORT kmp_int32 __kmpc_cancel(ident_t *loc_ref, kmp_int32 gtid,
3759  kmp_int32 cncl_kind);
3760 KMP_EXPORT kmp_int32 __kmpc_cancellationpoint(ident_t *loc_ref, kmp_int32 gtid,
3761  kmp_int32 cncl_kind);
3762 KMP_EXPORT kmp_int32 __kmpc_cancel_barrier(ident_t *loc_ref, kmp_int32 gtid);
3763 KMP_EXPORT int __kmp_get_cancellation_status(int cancel_kind);
3764 
3765 #if OMP_45_ENABLED
3766 
3767 KMP_EXPORT void __kmpc_proxy_task_completed(kmp_int32 gtid, kmp_task_t *ptask);
3768 KMP_EXPORT void __kmpc_proxy_task_completed_ooo(kmp_task_t *ptask);
3769 KMP_EXPORT void __kmpc_taskloop(ident_t *loc, kmp_int32 gtid, kmp_task_t *task,
3770  kmp_int32 if_val, kmp_uint64 *lb,
3771  kmp_uint64 *ub, kmp_int64 st, kmp_int32 nogroup,
3772  kmp_int32 sched, kmp_uint64 grainsize,
3773  void *task_dup);
3774 #endif
3775 // TODO: change to OMP_50_ENABLED, need to change build tools for this to work
3776 #if OMP_45_ENABLED
3777 KMP_EXPORT void *__kmpc_task_reduction_init(int gtid, int num_data, void *data);
3778 KMP_EXPORT void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void *d);
3779 #endif
3780 
3781 #endif
3782 
3783 /* Lock interface routines (fast versions with gtid passed in) */
3784 KMP_EXPORT void __kmpc_init_lock(ident_t *loc, kmp_int32 gtid,
3785  void **user_lock);
3786 KMP_EXPORT void __kmpc_init_nest_lock(ident_t *loc, kmp_int32 gtid,
3787  void **user_lock);
3788 KMP_EXPORT void __kmpc_destroy_lock(ident_t *loc, kmp_int32 gtid,
3789  void **user_lock);
3790 KMP_EXPORT void __kmpc_destroy_nest_lock(ident_t *loc, kmp_int32 gtid,
3791  void **user_lock);
3792 KMP_EXPORT void __kmpc_set_lock(ident_t *loc, kmp_int32 gtid, void **user_lock);
3793 KMP_EXPORT void __kmpc_set_nest_lock(ident_t *loc, kmp_int32 gtid,
3794  void **user_lock);
3795 KMP_EXPORT void __kmpc_unset_lock(ident_t *loc, kmp_int32 gtid,
3796  void **user_lock);
3797 KMP_EXPORT void __kmpc_unset_nest_lock(ident_t *loc, kmp_int32 gtid,
3798  void **user_lock);
3799 KMP_EXPORT int __kmpc_test_lock(ident_t *loc, kmp_int32 gtid, void **user_lock);
3800 KMP_EXPORT int __kmpc_test_nest_lock(ident_t *loc, kmp_int32 gtid,
3801  void **user_lock);
3802 
3803 #if OMP_45_ENABLED
3804 KMP_EXPORT void __kmpc_init_lock_with_hint(ident_t *loc, kmp_int32 gtid,
3805  void **user_lock, uintptr_t hint);
3806 KMP_EXPORT void __kmpc_init_nest_lock_with_hint(ident_t *loc, kmp_int32 gtid,
3807  void **user_lock,
3808  uintptr_t hint);
3809 #endif
3810 
3811 /* Interface to fast scalable reduce methods routines */
3812 
3813 KMP_EXPORT kmp_int32 __kmpc_reduce_nowait(
3814  ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size,
3815  void *reduce_data, void (*reduce_func)(void *lhs_data, void *rhs_data),
3816  kmp_critical_name *lck);
3817 KMP_EXPORT void __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
3818  kmp_critical_name *lck);
3819 KMP_EXPORT kmp_int32 __kmpc_reduce(
3820  ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size,
3821  void *reduce_data, void (*reduce_func)(void *lhs_data, void *rhs_data),
3822  kmp_critical_name *lck);
3823 KMP_EXPORT void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
3824  kmp_critical_name *lck);
3825 
3826 /* Internal fast reduction routines */
3827 
3828 extern PACKED_REDUCTION_METHOD_T __kmp_determine_reduction_method(
3829  ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size,
3830  void *reduce_data, void (*reduce_func)(void *lhs_data, void *rhs_data),
3831  kmp_critical_name *lck);
3832 
3833 // this function is for testing set/get/determine reduce method
3834 KMP_EXPORT kmp_int32 __kmp_get_reduce_method(void);
3835 
3836 KMP_EXPORT kmp_uint64 __kmpc_get_taskid();
3837 KMP_EXPORT kmp_uint64 __kmpc_get_parent_taskid();
3838 
3839 // C++ port
3840 // missing 'extern "C"' declarations
3841 
3842 KMP_EXPORT kmp_int32 __kmpc_in_parallel(ident_t *loc);
3843 KMP_EXPORT void __kmpc_pop_num_threads(ident_t *loc, kmp_int32 global_tid);
3844 KMP_EXPORT void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
3845  kmp_int32 num_threads);
3846 
3847 #if OMP_40_ENABLED
3848 KMP_EXPORT void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
3849  int proc_bind);
3850 KMP_EXPORT void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid,
3851  kmp_int32 num_teams,
3852  kmp_int32 num_threads);
3853 KMP_EXPORT void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc,
3854  kmpc_micro microtask, ...);
3855 #endif
3856 #if OMP_45_ENABLED
3857 struct kmp_dim { // loop bounds info casted to kmp_int64
3858  kmp_int64 lo; // lower
3859  kmp_int64 up; // upper
3860  kmp_int64 st; // stride
3861 };
3862 KMP_EXPORT void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid,
3863  kmp_int32 num_dims,
3864  const struct kmp_dim *dims);
3865 KMP_EXPORT void __kmpc_doacross_wait(ident_t *loc, kmp_int32 gtid,
3866  const kmp_int64 *vec);
3867 KMP_EXPORT void __kmpc_doacross_post(ident_t *loc, kmp_int32 gtid,
3868  const kmp_int64 *vec);
3869 KMP_EXPORT void __kmpc_doacross_fini(ident_t *loc, kmp_int32 gtid);
3870 #endif
3871 
3872 KMP_EXPORT void *__kmpc_threadprivate_cached(ident_t *loc, kmp_int32 global_tid,
3873  void *data, size_t size,
3874  void ***cache);
3875 
3876 // Symbols for MS mutual detection.
3877 extern int _You_must_link_with_exactly_one_OpenMP_library;
3878 extern int _You_must_link_with_Intel_OpenMP_library;
3879 #if KMP_OS_WINDOWS && (KMP_VERSION_MAJOR > 4)
3880 extern int _You_must_link_with_Microsoft_OpenMP_library;
3881 #endif
3882 
3883 // The routines below are not exported.
3884 // Consider making them 'static' in corresponding source files.
3885 void kmp_threadprivate_insert_private_data(int gtid, void *pc_addr,
3886  void *data_addr, size_t pc_size);
3887 struct private_common *kmp_threadprivate_insert(int gtid, void *pc_addr,
3888  void *data_addr,
3889  size_t pc_size);
3890 void __kmp_threadprivate_resize_cache(int newCapacity);
3891 void __kmp_cleanup_threadprivate_caches();
3892 
3893 // ompc_, kmpc_ entries moved from omp.h.
3894 #if KMP_OS_WINDOWS
3895 #define KMPC_CONVENTION __cdecl
3896 #else
3897 #define KMPC_CONVENTION
3898 #endif
3899 
3900 #ifndef __OMP_H
3901 typedef enum omp_sched_t {
3902  omp_sched_static = 1,
3903  omp_sched_dynamic = 2,
3904  omp_sched_guided = 3,
3905  omp_sched_auto = 4
3906 } omp_sched_t;
3907 typedef void *kmp_affinity_mask_t;
3908 #endif
3909 
3910 KMP_EXPORT void KMPC_CONVENTION ompc_set_max_active_levels(int);
3911 KMP_EXPORT void KMPC_CONVENTION ompc_set_schedule(omp_sched_t, int);
3912 KMP_EXPORT int KMPC_CONVENTION ompc_get_ancestor_thread_num(int);
3913 KMP_EXPORT int KMPC_CONVENTION ompc_get_team_size(int);
3914 KMP_EXPORT int KMPC_CONVENTION
3915 kmpc_set_affinity_mask_proc(int, kmp_affinity_mask_t *);
3916 KMP_EXPORT int KMPC_CONVENTION
3917 kmpc_unset_affinity_mask_proc(int, kmp_affinity_mask_t *);
3918 KMP_EXPORT int KMPC_CONVENTION
3919 kmpc_get_affinity_mask_proc(int, kmp_affinity_mask_t *);
3920 
3921 KMP_EXPORT void KMPC_CONVENTION kmpc_set_stacksize(int);
3922 KMP_EXPORT void KMPC_CONVENTION kmpc_set_stacksize_s(size_t);
3923 KMP_EXPORT void KMPC_CONVENTION kmpc_set_library(int);
3924 KMP_EXPORT void KMPC_CONVENTION kmpc_set_defaults(char const *);
3925 KMP_EXPORT void KMPC_CONVENTION kmpc_set_disp_num_buffers(int);
3926 
3927 #if OMP_50_ENABLED
3928 enum kmp_target_offload_kind {
3929  tgt_disabled = 0,
3930  tgt_default = 1,
3931  tgt_mandatory = 2
3932 };
3933 typedef enum kmp_target_offload_kind kmp_target_offload_kind_t;
3934 // Set via OMP_TARGET_OFFLOAD if specified, defaults to tgt_default otherwise
3935 extern kmp_target_offload_kind_t __kmp_target_offload;
3936 extern int __kmpc_get_target_offload();
3937 #endif
3938 
3939 #ifdef __cplusplus
3940 }
3941 #endif
3942 
3943 #endif /* KMP_H */
KMP_EXPORT kmp_int32 __kmpc_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_barrier_master(ident_t *, kmp_int32 global_tid)
kmp_int32 reserved_2
Definition: kmp.h:223
void __kmpc_dispatch_fini_4(ident_t *loc, kmp_int32 gtid)
KMP_EXPORT void __kmpc_end_single(ident_t *, kmp_int32 global_tid)
void(* kmpc_dtor)(void *)
Definition: kmp.h:1418
void __kmpc_dispatch_init_4(ident_t *loc, kmp_int32 gtid, enum sched_type schedule, kmp_int32 lb, kmp_int32 ub, kmp_int32 st, kmp_int32 chunk)
KMP_EXPORT kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void(*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck)
KMP_EXPORT kmp_int32 __kmpc_global_thread_num(ident_t *)
int __kmpc_dispatch_next_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint32 *p_lb, kmp_uint32 *p_ub, kmp_int32 *p_st)
void(* kmpc_dtor_vec)(void *, size_t)
Definition: kmp.h:1441
KMP_EXPORT void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
kmp_int32 reserved_1
Definition: kmp.h:220
void *(* kmpc_ctor_vec)(void *, size_t)
Definition: kmp.h:1435
KMP_EXPORT void * __kmpc_threadprivate_cached(ident_t *loc, kmp_int32 global_tid, void *data, size_t size, void ***cache)
kmp_int32 reserved_3
Definition: kmp.h:228
void *(* kmpc_cctor_vec)(void *, void *, size_t)
Definition: kmp.h:1447
KMP_EXPORT void __kmpc_flush(ident_t *)
void __kmpc_dispatch_init_8u(ident_t *loc, kmp_int32 gtid, enum sched_type schedule, kmp_uint64 lb, kmp_uint64 ub, kmp_int64 st, kmp_int64 chunk)
KMP_EXPORT kmp_int32 __kmpc_single(ident_t *, kmp_int32 global_tid)
int __kmpc_dispatch_next_4(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int32 *p_lb, kmp_int32 *p_ub, kmp_int32 *p_st)
KMP_EXPORT void __kmpc_end(ident_t *)
KMP_EXPORT void __kmpc_end_ordered(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_serialized_parallel(ident_t *, kmp_int32 global_tid)
void *(* kmpc_cctor)(void *, void *)
Definition: kmp.h:1425
KMP_EXPORT void __kmpc_threadprivate_register(ident_t *, void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor)
KMP_EXPORT kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
KMP_EXPORT void __kmpc_begin(ident_t *, kmp_int32 flags)
KMP_EXPORT kmp_int32 __kmpc_bound_thread_num(ident_t *)
KMP_EXPORT kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void(*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck)
int __kmpc_dispatch_next_8(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int64 *p_lb, kmp_int64 *p_ub, kmp_int64 *p_st)
KMP_EXPORT void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid, size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *), kmp_int32 didit)
KMP_EXPORT void __kmpc_ordered(ident_t *, kmp_int32 global_tid)
sched_type
Definition: kmp.h:332
KMP_EXPORT void __kmpc_critical(ident_t *, kmp_int32 global_tid, kmp_critical_name *)
Definition: kmp.h:219
KMP_EXPORT void __kmpc_end_barrier_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_threads)
KMP_EXPORT void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro microtask,...)
KMP_EXPORT kmp_int32 __kmpc_in_parallel(ident_t *loc)
KMP_EXPORT kmp_int32 __kmpc_ok_to_fork(ident_t *)
KMP_EXPORT kmp_int32 __kmpc_global_num_threads(ident_t *)
void __kmpc_dispatch_fini_8u(ident_t *loc, kmp_int32 gtid)
KMP_EXPORT kmp_int32 __kmpc_bound_num_threads(ident_t *)
KMP_EXPORT void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck)
void __kmpc_dispatch_fini_4u(ident_t *loc, kmp_int32 gtid)
KMP_EXPORT void __kmpc_barrier(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck)
KMP_EXPORT void __kmpc_end_critical(ident_t *, kmp_int32 global_tid, kmp_critical_name *)
void *(* kmpc_ctor)(void *)
Definition: kmp.h:1412
KMP_EXPORT void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_teams, kmp_int32 num_threads)
void __kmpc_dispatch_fini_8(ident_t *loc, kmp_int32 gtid)
void __kmpc_dispatch_init_4u(ident_t *loc, kmp_int32 gtid, enum sched_type schedule, kmp_uint32 lb, kmp_uint32 ub, kmp_int32 st, kmp_int32 chunk)
void(* kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid,...)
Definition: kmp.h:1394
KMP_EXPORT kmp_int32 __kmpc_barrier_master_nowait(ident_t *, kmp_int32 global_tid)
int __kmpc_dispatch_next_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint64 *p_lb, kmp_uint64 *p_ub, kmp_int64 *p_st)
KMP_EXPORT void __kmpc_serialized_parallel(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_fork_call(ident_t *, kmp_int32 nargs, kmpc_micro microtask,...)
KMP_EXPORT void __kmpc_threadprivate_register_vec(ident_t *, void *data, kmpc_ctor_vec ctor, kmpc_cctor_vec cctor, kmpc_dtor_vec dtor, size_t vector_length)
char const * psource
Definition: kmp.h:229
void __kmpc_dispatch_init_8(ident_t *loc, kmp_int32 gtid, enum sched_type schedule, kmp_int64 lb, kmp_int64 ub, kmp_int64 st, kmp_int64 chunk)
kmp_int32 flags
Definition: kmp.h:221
struct ident ident_t