22 #include "kmp_wait_release.h" 23 #include "kmp_wrapper_getpid.h" 25 #include "tsan_annotations.h" 28 #include <sys/syscall.h> 46 void __kmp_validate_locks(
void) {
51 x = ~((kmp_uint32)0) - 2;
54 for (i = 0; i < 8; ++i, ++x, ++y) {
55 kmp_uint32 z = (x - y);
59 KMP_ASSERT(offsetof(kmp_base_queuing_lock, tail_id) % 8 == 0);
73 static kmp_int32 __kmp_get_tas_lock_owner(kmp_tas_lock_t *lck) {
74 return KMP_LOCK_STRIP(KMP_ATOMIC_LD_RLX(&lck->lk.poll)) - 1;
77 static inline bool __kmp_is_tas_lock_nestable(kmp_tas_lock_t *lck) {
78 return lck->lk.depth_locked != -1;
81 __forceinline
static int 82 __kmp_acquire_tas_lock_timed_template(kmp_tas_lock_t *lck, kmp_int32 gtid) {
85 #ifdef USE_LOCK_PROFILE 86 kmp_uint32 curr = KMP_LOCK_STRIP(lck->lk.poll);
87 if ((curr != 0) && (curr != gtid + 1))
88 __kmp_printf(
"LOCK CONTENTION: %p\n", lck);
92 kmp_int32 tas_free = KMP_LOCK_FREE(tas);
93 kmp_int32 tas_busy = KMP_LOCK_BUSY(gtid + 1, tas);
95 if (KMP_ATOMIC_LD_RLX(&lck->lk.poll) == tas_free &&
96 __kmp_atomic_compare_store_acq(&lck->lk.poll, tas_free, tas_busy)) {
97 KMP_FSYNC_ACQUIRED(lck);
98 return KMP_LOCK_ACQUIRED_FIRST;
102 KMP_FSYNC_PREPARE(lck);
103 KMP_INIT_YIELD(spins);
104 if (TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) {
107 KMP_YIELD_SPIN(spins);
110 kmp_backoff_t backoff = __kmp_spin_backoff_params;
111 while (KMP_ATOMIC_LD_RLX(&lck->lk.poll) != tas_free ||
112 !__kmp_atomic_compare_store_acq(&lck->lk.poll, tas_free, tas_busy)) {
113 __kmp_spin_backoff(&backoff);
114 if (TCR_4(__kmp_nth) >
115 (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) {
118 KMP_YIELD_SPIN(spins);
121 KMP_FSYNC_ACQUIRED(lck);
122 return KMP_LOCK_ACQUIRED_FIRST;
125 int __kmp_acquire_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
126 int retval = __kmp_acquire_tas_lock_timed_template(lck, gtid);
127 ANNOTATE_TAS_ACQUIRED(lck);
131 static int __kmp_acquire_tas_lock_with_checks(kmp_tas_lock_t *lck,
133 char const *
const func =
"omp_set_lock";
134 if ((
sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) &&
135 __kmp_is_tas_lock_nestable(lck)) {
136 KMP_FATAL(LockNestableUsedAsSimple, func);
138 if ((gtid >= 0) && (__kmp_get_tas_lock_owner(lck) == gtid)) {
139 KMP_FATAL(LockIsAlreadyOwned, func);
141 return __kmp_acquire_tas_lock(lck, gtid);
144 int __kmp_test_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
145 kmp_int32 tas_free = KMP_LOCK_FREE(tas);
146 kmp_int32 tas_busy = KMP_LOCK_BUSY(gtid + 1, tas);
147 if (KMP_ATOMIC_LD_RLX(&lck->lk.poll) == tas_free &&
148 __kmp_atomic_compare_store_acq(&lck->lk.poll, tas_free, tas_busy)) {
149 KMP_FSYNC_ACQUIRED(lck);
155 static int __kmp_test_tas_lock_with_checks(kmp_tas_lock_t *lck,
157 char const *
const func =
"omp_test_lock";
158 if ((
sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) &&
159 __kmp_is_tas_lock_nestable(lck)) {
160 KMP_FATAL(LockNestableUsedAsSimple, func);
162 return __kmp_test_tas_lock(lck, gtid);
165 int __kmp_release_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
168 KMP_FSYNC_RELEASING(lck);
169 ANNOTATE_TAS_RELEASED(lck);
170 KMP_ATOMIC_ST_REL(&lck->lk.poll, KMP_LOCK_FREE(tas));
173 KMP_YIELD(TCR_4(__kmp_nth) >
174 (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc));
175 return KMP_LOCK_RELEASED;
178 static int __kmp_release_tas_lock_with_checks(kmp_tas_lock_t *lck,
180 char const *
const func =
"omp_unset_lock";
182 if ((
sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) &&
183 __kmp_is_tas_lock_nestable(lck)) {
184 KMP_FATAL(LockNestableUsedAsSimple, func);
186 if (__kmp_get_tas_lock_owner(lck) == -1) {
187 KMP_FATAL(LockUnsettingFree, func);
189 if ((gtid >= 0) && (__kmp_get_tas_lock_owner(lck) >= 0) &&
190 (__kmp_get_tas_lock_owner(lck) != gtid)) {
191 KMP_FATAL(LockUnsettingSetByAnother, func);
193 return __kmp_release_tas_lock(lck, gtid);
196 void __kmp_init_tas_lock(kmp_tas_lock_t *lck) {
197 lck->lk.poll = KMP_LOCK_FREE(tas);
200 void __kmp_destroy_tas_lock(kmp_tas_lock_t *lck) { lck->lk.poll = 0; }
202 static void __kmp_destroy_tas_lock_with_checks(kmp_tas_lock_t *lck) {
203 char const *
const func =
"omp_destroy_lock";
204 if ((
sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) &&
205 __kmp_is_tas_lock_nestable(lck)) {
206 KMP_FATAL(LockNestableUsedAsSimple, func);
208 if (__kmp_get_tas_lock_owner(lck) != -1) {
209 KMP_FATAL(LockStillOwned, func);
211 __kmp_destroy_tas_lock(lck);
216 int __kmp_acquire_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
217 KMP_DEBUG_ASSERT(gtid >= 0);
219 if (__kmp_get_tas_lock_owner(lck) == gtid) {
220 lck->lk.depth_locked += 1;
221 return KMP_LOCK_ACQUIRED_NEXT;
223 __kmp_acquire_tas_lock_timed_template(lck, gtid);
224 ANNOTATE_TAS_ACQUIRED(lck);
225 lck->lk.depth_locked = 1;
226 return KMP_LOCK_ACQUIRED_FIRST;
230 static int __kmp_acquire_nested_tas_lock_with_checks(kmp_tas_lock_t *lck,
232 char const *
const func =
"omp_set_nest_lock";
233 if (!__kmp_is_tas_lock_nestable(lck)) {
234 KMP_FATAL(LockSimpleUsedAsNestable, func);
236 return __kmp_acquire_nested_tas_lock(lck, gtid);
239 int __kmp_test_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
242 KMP_DEBUG_ASSERT(gtid >= 0);
244 if (__kmp_get_tas_lock_owner(lck) == gtid) {
245 retval = ++lck->lk.depth_locked;
246 }
else if (!__kmp_test_tas_lock(lck, gtid)) {
250 retval = lck->lk.depth_locked = 1;
255 static int __kmp_test_nested_tas_lock_with_checks(kmp_tas_lock_t *lck,
257 char const *
const func =
"omp_test_nest_lock";
258 if (!__kmp_is_tas_lock_nestable(lck)) {
259 KMP_FATAL(LockSimpleUsedAsNestable, func);
261 return __kmp_test_nested_tas_lock(lck, gtid);
264 int __kmp_release_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
265 KMP_DEBUG_ASSERT(gtid >= 0);
268 if (--(lck->lk.depth_locked) == 0) {
269 __kmp_release_tas_lock(lck, gtid);
270 return KMP_LOCK_RELEASED;
272 return KMP_LOCK_STILL_HELD;
275 static int __kmp_release_nested_tas_lock_with_checks(kmp_tas_lock_t *lck,
277 char const *
const func =
"omp_unset_nest_lock";
279 if (!__kmp_is_tas_lock_nestable(lck)) {
280 KMP_FATAL(LockSimpleUsedAsNestable, func);
282 if (__kmp_get_tas_lock_owner(lck) == -1) {
283 KMP_FATAL(LockUnsettingFree, func);
285 if (__kmp_get_tas_lock_owner(lck) != gtid) {
286 KMP_FATAL(LockUnsettingSetByAnother, func);
288 return __kmp_release_nested_tas_lock(lck, gtid);
291 void __kmp_init_nested_tas_lock(kmp_tas_lock_t *lck) {
292 __kmp_init_tas_lock(lck);
293 lck->lk.depth_locked = 0;
296 void __kmp_destroy_nested_tas_lock(kmp_tas_lock_t *lck) {
297 __kmp_destroy_tas_lock(lck);
298 lck->lk.depth_locked = 0;
301 static void __kmp_destroy_nested_tas_lock_with_checks(kmp_tas_lock_t *lck) {
302 char const *
const func =
"omp_destroy_nest_lock";
303 if (!__kmp_is_tas_lock_nestable(lck)) {
304 KMP_FATAL(LockSimpleUsedAsNestable, func);
306 if (__kmp_get_tas_lock_owner(lck) != -1) {
307 KMP_FATAL(LockStillOwned, func);
309 __kmp_destroy_nested_tas_lock(lck);
322 static kmp_int32 __kmp_get_futex_lock_owner(kmp_futex_lock_t *lck) {
323 return KMP_LOCK_STRIP((TCR_4(lck->lk.poll) >> 1)) - 1;
326 static inline bool __kmp_is_futex_lock_nestable(kmp_futex_lock_t *lck) {
327 return lck->lk.depth_locked != -1;
330 __forceinline
static int 331 __kmp_acquire_futex_lock_timed_template(kmp_futex_lock_t *lck, kmp_int32 gtid) {
332 kmp_int32 gtid_code = (gtid + 1) << 1;
336 #ifdef USE_LOCK_PROFILE 337 kmp_uint32 curr = KMP_LOCK_STRIP(TCR_4(lck->lk.poll));
338 if ((curr != 0) && (curr != gtid_code))
339 __kmp_printf(
"LOCK CONTENTION: %p\n", lck);
343 KMP_FSYNC_PREPARE(lck);
344 KA_TRACE(1000, (
"__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d entering\n",
345 lck, lck->lk.poll, gtid));
349 while ((poll_val = KMP_COMPARE_AND_STORE_RET32(
350 &(lck->lk.poll), KMP_LOCK_FREE(futex),
351 KMP_LOCK_BUSY(gtid_code, futex))) != KMP_LOCK_FREE(futex)) {
353 kmp_int32 cond = KMP_LOCK_STRIP(poll_val) & 1;
356 (
"__kmp_acquire_futex_lock: lck:%p, T#%d poll_val = 0x%x cond = 0x%x\n",
357 lck, gtid, poll_val, cond));
368 if (!KMP_COMPARE_AND_STORE_REL32(&(lck->lk.poll), poll_val,
369 poll_val | KMP_LOCK_BUSY(1, futex))) {
372 (
"__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d can't set bit 0\n",
373 lck, lck->lk.poll, gtid));
376 poll_val |= KMP_LOCK_BUSY(1, futex);
379 (
"__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d bit 0 set\n", lck,
380 lck->lk.poll, gtid));
385 (
"__kmp_acquire_futex_lock: lck:%p, T#%d before futex_wait(0x%x)\n",
386 lck, gtid, poll_val));
389 if ((rc = syscall(__NR_futex, &(lck->lk.poll), FUTEX_WAIT, poll_val, NULL,
391 KA_TRACE(1000, (
"__kmp_acquire_futex_lock: lck:%p, T#%d futex_wait(0x%x) " 392 "failed (rc=%d errno=%d)\n",
393 lck, gtid, poll_val, rc, errno));
398 (
"__kmp_acquire_futex_lock: lck:%p, T#%d after futex_wait(0x%x)\n",
399 lck, gtid, poll_val));
406 KMP_FSYNC_ACQUIRED(lck);
407 KA_TRACE(1000, (
"__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d exiting\n", lck,
408 lck->lk.poll, gtid));
409 return KMP_LOCK_ACQUIRED_FIRST;
412 int __kmp_acquire_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
413 int retval = __kmp_acquire_futex_lock_timed_template(lck, gtid);
414 ANNOTATE_FUTEX_ACQUIRED(lck);
418 static int __kmp_acquire_futex_lock_with_checks(kmp_futex_lock_t *lck,
420 char const *
const func =
"omp_set_lock";
421 if ((
sizeof(kmp_futex_lock_t) <= OMP_LOCK_T_SIZE) &&
422 __kmp_is_futex_lock_nestable(lck)) {
423 KMP_FATAL(LockNestableUsedAsSimple, func);
425 if ((gtid >= 0) && (__kmp_get_futex_lock_owner(lck) == gtid)) {
426 KMP_FATAL(LockIsAlreadyOwned, func);
428 return __kmp_acquire_futex_lock(lck, gtid);
431 int __kmp_test_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
432 if (KMP_COMPARE_AND_STORE_ACQ32(&(lck->lk.poll), KMP_LOCK_FREE(futex),
433 KMP_LOCK_BUSY((gtid + 1) << 1, futex))) {
434 KMP_FSYNC_ACQUIRED(lck);
440 static int __kmp_test_futex_lock_with_checks(kmp_futex_lock_t *lck,
442 char const *
const func =
"omp_test_lock";
443 if ((
sizeof(kmp_futex_lock_t) <= OMP_LOCK_T_SIZE) &&
444 __kmp_is_futex_lock_nestable(lck)) {
445 KMP_FATAL(LockNestableUsedAsSimple, func);
447 return __kmp_test_futex_lock(lck, gtid);
450 int __kmp_release_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
453 KA_TRACE(1000, (
"__kmp_release_futex_lock: lck:%p(0x%x), T#%d entering\n",
454 lck, lck->lk.poll, gtid));
456 KMP_FSYNC_RELEASING(lck);
457 ANNOTATE_FUTEX_RELEASED(lck);
459 kmp_int32 poll_val = KMP_XCHG_FIXED32(&(lck->lk.poll), KMP_LOCK_FREE(futex));
462 (
"__kmp_release_futex_lock: lck:%p, T#%d released poll_val = 0x%x\n",
463 lck, gtid, poll_val));
465 if (KMP_LOCK_STRIP(poll_val) & 1) {
467 (
"__kmp_release_futex_lock: lck:%p, T#%d futex_wake 1 thread\n",
469 syscall(__NR_futex, &(lck->lk.poll), FUTEX_WAKE, KMP_LOCK_BUSY(1, futex),
475 KA_TRACE(1000, (
"__kmp_release_futex_lock: lck:%p(0x%x), T#%d exiting\n", lck,
476 lck->lk.poll, gtid));
478 KMP_YIELD(TCR_4(__kmp_nth) >
479 (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc));
480 return KMP_LOCK_RELEASED;
483 static int __kmp_release_futex_lock_with_checks(kmp_futex_lock_t *lck,
485 char const *
const func =
"omp_unset_lock";
487 if ((
sizeof(kmp_futex_lock_t) <= OMP_LOCK_T_SIZE) &&
488 __kmp_is_futex_lock_nestable(lck)) {
489 KMP_FATAL(LockNestableUsedAsSimple, func);
491 if (__kmp_get_futex_lock_owner(lck) == -1) {
492 KMP_FATAL(LockUnsettingFree, func);
494 if ((gtid >= 0) && (__kmp_get_futex_lock_owner(lck) >= 0) &&
495 (__kmp_get_futex_lock_owner(lck) != gtid)) {
496 KMP_FATAL(LockUnsettingSetByAnother, func);
498 return __kmp_release_futex_lock(lck, gtid);
501 void __kmp_init_futex_lock(kmp_futex_lock_t *lck) {
502 TCW_4(lck->lk.poll, KMP_LOCK_FREE(futex));
505 void __kmp_destroy_futex_lock(kmp_futex_lock_t *lck) { lck->lk.poll = 0; }
507 static void __kmp_destroy_futex_lock_with_checks(kmp_futex_lock_t *lck) {
508 char const *
const func =
"omp_destroy_lock";
509 if ((
sizeof(kmp_futex_lock_t) <= OMP_LOCK_T_SIZE) &&
510 __kmp_is_futex_lock_nestable(lck)) {
511 KMP_FATAL(LockNestableUsedAsSimple, func);
513 if (__kmp_get_futex_lock_owner(lck) != -1) {
514 KMP_FATAL(LockStillOwned, func);
516 __kmp_destroy_futex_lock(lck);
521 int __kmp_acquire_nested_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
522 KMP_DEBUG_ASSERT(gtid >= 0);
524 if (__kmp_get_futex_lock_owner(lck) == gtid) {
525 lck->lk.depth_locked += 1;
526 return KMP_LOCK_ACQUIRED_NEXT;
528 __kmp_acquire_futex_lock_timed_template(lck, gtid);
529 ANNOTATE_FUTEX_ACQUIRED(lck);
530 lck->lk.depth_locked = 1;
531 return KMP_LOCK_ACQUIRED_FIRST;
535 static int __kmp_acquire_nested_futex_lock_with_checks(kmp_futex_lock_t *lck,
537 char const *
const func =
"omp_set_nest_lock";
538 if (!__kmp_is_futex_lock_nestable(lck)) {
539 KMP_FATAL(LockSimpleUsedAsNestable, func);
541 return __kmp_acquire_nested_futex_lock(lck, gtid);
544 int __kmp_test_nested_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
547 KMP_DEBUG_ASSERT(gtid >= 0);
549 if (__kmp_get_futex_lock_owner(lck) == gtid) {
550 retval = ++lck->lk.depth_locked;
551 }
else if (!__kmp_test_futex_lock(lck, gtid)) {
555 retval = lck->lk.depth_locked = 1;
560 static int __kmp_test_nested_futex_lock_with_checks(kmp_futex_lock_t *lck,
562 char const *
const func =
"omp_test_nest_lock";
563 if (!__kmp_is_futex_lock_nestable(lck)) {
564 KMP_FATAL(LockSimpleUsedAsNestable, func);
566 return __kmp_test_nested_futex_lock(lck, gtid);
569 int __kmp_release_nested_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
570 KMP_DEBUG_ASSERT(gtid >= 0);
573 if (--(lck->lk.depth_locked) == 0) {
574 __kmp_release_futex_lock(lck, gtid);
575 return KMP_LOCK_RELEASED;
577 return KMP_LOCK_STILL_HELD;
580 static int __kmp_release_nested_futex_lock_with_checks(kmp_futex_lock_t *lck,
582 char const *
const func =
"omp_unset_nest_lock";
584 if (!__kmp_is_futex_lock_nestable(lck)) {
585 KMP_FATAL(LockSimpleUsedAsNestable, func);
587 if (__kmp_get_futex_lock_owner(lck) == -1) {
588 KMP_FATAL(LockUnsettingFree, func);
590 if (__kmp_get_futex_lock_owner(lck) != gtid) {
591 KMP_FATAL(LockUnsettingSetByAnother, func);
593 return __kmp_release_nested_futex_lock(lck, gtid);
596 void __kmp_init_nested_futex_lock(kmp_futex_lock_t *lck) {
597 __kmp_init_futex_lock(lck);
598 lck->lk.depth_locked = 0;
601 void __kmp_destroy_nested_futex_lock(kmp_futex_lock_t *lck) {
602 __kmp_destroy_futex_lock(lck);
603 lck->lk.depth_locked = 0;
606 static void __kmp_destroy_nested_futex_lock_with_checks(kmp_futex_lock_t *lck) {
607 char const *
const func =
"omp_destroy_nest_lock";
608 if (!__kmp_is_futex_lock_nestable(lck)) {
609 KMP_FATAL(LockSimpleUsedAsNestable, func);
611 if (__kmp_get_futex_lock_owner(lck) != -1) {
612 KMP_FATAL(LockStillOwned, func);
614 __kmp_destroy_nested_futex_lock(lck);
617 #endif // KMP_USE_FUTEX 622 static kmp_int32 __kmp_get_ticket_lock_owner(kmp_ticket_lock_t *lck) {
623 return std::atomic_load_explicit(&lck->lk.owner_id,
624 std::memory_order_relaxed) -
628 static inline bool __kmp_is_ticket_lock_nestable(kmp_ticket_lock_t *lck) {
629 return std::atomic_load_explicit(&lck->lk.depth_locked,
630 std::memory_order_relaxed) != -1;
633 static kmp_uint32 __kmp_bakery_check(
void *now_serving, kmp_uint32 my_ticket) {
634 return std::atomic_load_explicit((std::atomic<unsigned> *)now_serving,
635 std::memory_order_acquire) == my_ticket;
638 __forceinline
static int 639 __kmp_acquire_ticket_lock_timed_template(kmp_ticket_lock_t *lck,
641 kmp_uint32 my_ticket = std::atomic_fetch_add_explicit(
642 &lck->lk.next_ticket, 1U, std::memory_order_relaxed);
644 #ifdef USE_LOCK_PROFILE 645 if (std::atomic_load_explicit(&lck->lk.now_serving,
646 std::memory_order_relaxed) != my_ticket)
647 __kmp_printf(
"LOCK CONTENTION: %p\n", lck);
651 if (std::atomic_load_explicit(&lck->lk.now_serving,
652 std::memory_order_acquire) == my_ticket) {
653 return KMP_LOCK_ACQUIRED_FIRST;
655 KMP_WAIT_YIELD_PTR(&lck->lk.now_serving, my_ticket, __kmp_bakery_check, lck);
656 return KMP_LOCK_ACQUIRED_FIRST;
659 int __kmp_acquire_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
660 int retval = __kmp_acquire_ticket_lock_timed_template(lck, gtid);
661 ANNOTATE_TICKET_ACQUIRED(lck);
665 static int __kmp_acquire_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
667 char const *
const func =
"omp_set_lock";
669 if (!std::atomic_load_explicit(&lck->lk.initialized,
670 std::memory_order_relaxed)) {
671 KMP_FATAL(LockIsUninitialized, func);
673 if (lck->lk.self != lck) {
674 KMP_FATAL(LockIsUninitialized, func);
676 if (__kmp_is_ticket_lock_nestable(lck)) {
677 KMP_FATAL(LockNestableUsedAsSimple, func);
679 if ((gtid >= 0) && (__kmp_get_ticket_lock_owner(lck) == gtid)) {
680 KMP_FATAL(LockIsAlreadyOwned, func);
683 __kmp_acquire_ticket_lock(lck, gtid);
685 std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1,
686 std::memory_order_relaxed);
687 return KMP_LOCK_ACQUIRED_FIRST;
690 int __kmp_test_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
691 kmp_uint32 my_ticket = std::atomic_load_explicit(&lck->lk.next_ticket,
692 std::memory_order_relaxed);
694 if (std::atomic_load_explicit(&lck->lk.now_serving,
695 std::memory_order_relaxed) == my_ticket) {
696 kmp_uint32 next_ticket = my_ticket + 1;
697 if (std::atomic_compare_exchange_strong_explicit(
698 &lck->lk.next_ticket, &my_ticket, next_ticket,
699 std::memory_order_acquire, std::memory_order_acquire)) {
706 static int __kmp_test_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
708 char const *
const func =
"omp_test_lock";
710 if (!std::atomic_load_explicit(&lck->lk.initialized,
711 std::memory_order_relaxed)) {
712 KMP_FATAL(LockIsUninitialized, func);
714 if (lck->lk.self != lck) {
715 KMP_FATAL(LockIsUninitialized, func);
717 if (__kmp_is_ticket_lock_nestable(lck)) {
718 KMP_FATAL(LockNestableUsedAsSimple, func);
721 int retval = __kmp_test_ticket_lock(lck, gtid);
724 std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1,
725 std::memory_order_relaxed);
730 int __kmp_release_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
731 kmp_uint32 distance = std::atomic_load_explicit(&lck->lk.next_ticket,
732 std::memory_order_relaxed) -
733 std::atomic_load_explicit(&lck->lk.now_serving,
734 std::memory_order_relaxed);
736 ANNOTATE_TICKET_RELEASED(lck);
737 std::atomic_fetch_add_explicit(&lck->lk.now_serving, 1U,
738 std::memory_order_release);
741 (kmp_uint32)(__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc));
742 return KMP_LOCK_RELEASED;
745 static int __kmp_release_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
747 char const *
const func =
"omp_unset_lock";
749 if (!std::atomic_load_explicit(&lck->lk.initialized,
750 std::memory_order_relaxed)) {
751 KMP_FATAL(LockIsUninitialized, func);
753 if (lck->lk.self != lck) {
754 KMP_FATAL(LockIsUninitialized, func);
756 if (__kmp_is_ticket_lock_nestable(lck)) {
757 KMP_FATAL(LockNestableUsedAsSimple, func);
759 if (__kmp_get_ticket_lock_owner(lck) == -1) {
760 KMP_FATAL(LockUnsettingFree, func);
762 if ((gtid >= 0) && (__kmp_get_ticket_lock_owner(lck) >= 0) &&
763 (__kmp_get_ticket_lock_owner(lck) != gtid)) {
764 KMP_FATAL(LockUnsettingSetByAnother, func);
766 std::atomic_store_explicit(&lck->lk.owner_id, 0, std::memory_order_relaxed);
767 return __kmp_release_ticket_lock(lck, gtid);
770 void __kmp_init_ticket_lock(kmp_ticket_lock_t *lck) {
771 lck->lk.location = NULL;
773 std::atomic_store_explicit(&lck->lk.next_ticket, 0U,
774 std::memory_order_relaxed);
775 std::atomic_store_explicit(&lck->lk.now_serving, 0U,
776 std::memory_order_relaxed);
777 std::atomic_store_explicit(
778 &lck->lk.owner_id, 0,
779 std::memory_order_relaxed);
780 std::atomic_store_explicit(
781 &lck->lk.depth_locked, -1,
782 std::memory_order_relaxed);
783 std::atomic_store_explicit(&lck->lk.initialized,
true,
784 std::memory_order_release);
787 void __kmp_destroy_ticket_lock(kmp_ticket_lock_t *lck) {
788 std::atomic_store_explicit(&lck->lk.initialized,
false,
789 std::memory_order_release);
791 lck->lk.location = NULL;
792 std::atomic_store_explicit(&lck->lk.next_ticket, 0U,
793 std::memory_order_relaxed);
794 std::atomic_store_explicit(&lck->lk.now_serving, 0U,
795 std::memory_order_relaxed);
796 std::atomic_store_explicit(&lck->lk.owner_id, 0, std::memory_order_relaxed);
797 std::atomic_store_explicit(&lck->lk.depth_locked, -1,
798 std::memory_order_relaxed);
801 static void __kmp_destroy_ticket_lock_with_checks(kmp_ticket_lock_t *lck) {
802 char const *
const func =
"omp_destroy_lock";
804 if (!std::atomic_load_explicit(&lck->lk.initialized,
805 std::memory_order_relaxed)) {
806 KMP_FATAL(LockIsUninitialized, func);
808 if (lck->lk.self != lck) {
809 KMP_FATAL(LockIsUninitialized, func);
811 if (__kmp_is_ticket_lock_nestable(lck)) {
812 KMP_FATAL(LockNestableUsedAsSimple, func);
814 if (__kmp_get_ticket_lock_owner(lck) != -1) {
815 KMP_FATAL(LockStillOwned, func);
817 __kmp_destroy_ticket_lock(lck);
822 int __kmp_acquire_nested_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
823 KMP_DEBUG_ASSERT(gtid >= 0);
825 if (__kmp_get_ticket_lock_owner(lck) == gtid) {
826 std::atomic_fetch_add_explicit(&lck->lk.depth_locked, 1,
827 std::memory_order_relaxed);
828 return KMP_LOCK_ACQUIRED_NEXT;
830 __kmp_acquire_ticket_lock_timed_template(lck, gtid);
831 ANNOTATE_TICKET_ACQUIRED(lck);
832 std::atomic_store_explicit(&lck->lk.depth_locked, 1,
833 std::memory_order_relaxed);
834 std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1,
835 std::memory_order_relaxed);
836 return KMP_LOCK_ACQUIRED_FIRST;
840 static int __kmp_acquire_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
842 char const *
const func =
"omp_set_nest_lock";
844 if (!std::atomic_load_explicit(&lck->lk.initialized,
845 std::memory_order_relaxed)) {
846 KMP_FATAL(LockIsUninitialized, func);
848 if (lck->lk.self != lck) {
849 KMP_FATAL(LockIsUninitialized, func);
851 if (!__kmp_is_ticket_lock_nestable(lck)) {
852 KMP_FATAL(LockSimpleUsedAsNestable, func);
854 return __kmp_acquire_nested_ticket_lock(lck, gtid);
857 int __kmp_test_nested_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
860 KMP_DEBUG_ASSERT(gtid >= 0);
862 if (__kmp_get_ticket_lock_owner(lck) == gtid) {
863 retval = std::atomic_fetch_add_explicit(&lck->lk.depth_locked, 1,
864 std::memory_order_relaxed) +
866 }
else if (!__kmp_test_ticket_lock(lck, gtid)) {
869 std::atomic_store_explicit(&lck->lk.depth_locked, 1,
870 std::memory_order_relaxed);
871 std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1,
872 std::memory_order_relaxed);
878 static int __kmp_test_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
880 char const *
const func =
"omp_test_nest_lock";
882 if (!std::atomic_load_explicit(&lck->lk.initialized,
883 std::memory_order_relaxed)) {
884 KMP_FATAL(LockIsUninitialized, func);
886 if (lck->lk.self != lck) {
887 KMP_FATAL(LockIsUninitialized, func);
889 if (!__kmp_is_ticket_lock_nestable(lck)) {
890 KMP_FATAL(LockSimpleUsedAsNestable, func);
892 return __kmp_test_nested_ticket_lock(lck, gtid);
895 int __kmp_release_nested_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
896 KMP_DEBUG_ASSERT(gtid >= 0);
898 if ((std::atomic_fetch_add_explicit(&lck->lk.depth_locked, -1,
899 std::memory_order_relaxed) -
901 std::atomic_store_explicit(&lck->lk.owner_id, 0, std::memory_order_relaxed);
902 __kmp_release_ticket_lock(lck, gtid);
903 return KMP_LOCK_RELEASED;
905 return KMP_LOCK_STILL_HELD;
908 static int __kmp_release_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
910 char const *
const func =
"omp_unset_nest_lock";
912 if (!std::atomic_load_explicit(&lck->lk.initialized,
913 std::memory_order_relaxed)) {
914 KMP_FATAL(LockIsUninitialized, func);
916 if (lck->lk.self != lck) {
917 KMP_FATAL(LockIsUninitialized, func);
919 if (!__kmp_is_ticket_lock_nestable(lck)) {
920 KMP_FATAL(LockSimpleUsedAsNestable, func);
922 if (__kmp_get_ticket_lock_owner(lck) == -1) {
923 KMP_FATAL(LockUnsettingFree, func);
925 if (__kmp_get_ticket_lock_owner(lck) != gtid) {
926 KMP_FATAL(LockUnsettingSetByAnother, func);
928 return __kmp_release_nested_ticket_lock(lck, gtid);
931 void __kmp_init_nested_ticket_lock(kmp_ticket_lock_t *lck) {
932 __kmp_init_ticket_lock(lck);
933 std::atomic_store_explicit(&lck->lk.depth_locked, 0,
934 std::memory_order_relaxed);
938 void __kmp_destroy_nested_ticket_lock(kmp_ticket_lock_t *lck) {
939 __kmp_destroy_ticket_lock(lck);
940 std::atomic_store_explicit(&lck->lk.depth_locked, 0,
941 std::memory_order_relaxed);
945 __kmp_destroy_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck) {
946 char const *
const func =
"omp_destroy_nest_lock";
948 if (!std::atomic_load_explicit(&lck->lk.initialized,
949 std::memory_order_relaxed)) {
950 KMP_FATAL(LockIsUninitialized, func);
952 if (lck->lk.self != lck) {
953 KMP_FATAL(LockIsUninitialized, func);
955 if (!__kmp_is_ticket_lock_nestable(lck)) {
956 KMP_FATAL(LockSimpleUsedAsNestable, func);
958 if (__kmp_get_ticket_lock_owner(lck) != -1) {
959 KMP_FATAL(LockStillOwned, func);
961 __kmp_destroy_nested_ticket_lock(lck);
966 static const ident_t *__kmp_get_ticket_lock_location(kmp_ticket_lock_t *lck) {
967 return lck->lk.location;
970 static void __kmp_set_ticket_lock_location(kmp_ticket_lock_t *lck,
972 lck->lk.location = loc;
975 static kmp_lock_flags_t __kmp_get_ticket_lock_flags(kmp_ticket_lock_t *lck) {
976 return lck->lk.flags;
979 static void __kmp_set_ticket_lock_flags(kmp_ticket_lock_t *lck,
980 kmp_lock_flags_t flags) {
981 lck->lk.flags = flags;
1039 #ifdef DEBUG_QUEUING_LOCKS 1042 #define TRACE_BUF_ELE 1024 1043 static char traces[TRACE_BUF_ELE][128] = {0};
1045 #define TRACE_LOCK(X, Y) \ 1046 KMP_SNPRINTF(traces[tc++ % TRACE_BUF_ELE], 128, "t%d at %s\n", X, Y); 1047 #define TRACE_LOCK_T(X, Y, Z) \ 1048 KMP_SNPRINTF(traces[tc++ % TRACE_BUF_ELE], 128, "t%d at %s%d\n", X, Y, Z); 1049 #define TRACE_LOCK_HT(X, Y, Z, Q) \ 1050 KMP_SNPRINTF(traces[tc++ % TRACE_BUF_ELE], 128, "t%d at %s %d,%d\n", X, Y, \ 1053 static void __kmp_dump_queuing_lock(kmp_info_t *this_thr, kmp_int32 gtid,
1054 kmp_queuing_lock_t *lck, kmp_int32 head_id,
1055 kmp_int32 tail_id) {
1058 __kmp_printf_no_lock(
"\n__kmp_dump_queuing_lock: TRACE BEGINS HERE! \n");
1060 i = tc % TRACE_BUF_ELE;
1061 __kmp_printf_no_lock(
"%s\n", traces[i]);
1062 i = (i + 1) % TRACE_BUF_ELE;
1063 while (i != (tc % TRACE_BUF_ELE)) {
1064 __kmp_printf_no_lock(
"%s", traces[i]);
1065 i = (i + 1) % TRACE_BUF_ELE;
1067 __kmp_printf_no_lock(
"\n");
1069 __kmp_printf_no_lock(
"\n__kmp_dump_queuing_lock: gtid+1:%d, spin_here:%d, " 1070 "next_wait:%d, head_id:%d, tail_id:%d\n",
1071 gtid + 1, this_thr->th.th_spin_here,
1072 this_thr->th.th_next_waiting, head_id, tail_id);
1074 __kmp_printf_no_lock(
"\t\thead: %d ", lck->lk.head_id);
1076 if (lck->lk.head_id >= 1) {
1077 t = __kmp_threads[lck->lk.head_id - 1]->th.th_next_waiting;
1079 __kmp_printf_no_lock(
"-> %d ", t);
1080 t = __kmp_threads[t - 1]->th.th_next_waiting;
1083 __kmp_printf_no_lock(
"; tail: %d ", lck->lk.tail_id);
1084 __kmp_printf_no_lock(
"\n\n");
1089 static kmp_int32 __kmp_get_queuing_lock_owner(kmp_queuing_lock_t *lck) {
1090 return TCR_4(lck->lk.owner_id) - 1;
1093 static inline bool __kmp_is_queuing_lock_nestable(kmp_queuing_lock_t *lck) {
1094 return lck->lk.depth_locked != -1;
1098 template <
bool takeTime>
1101 __forceinline
static int 1102 __kmp_acquire_queuing_lock_timed_template(kmp_queuing_lock_t *lck,
1104 kmp_info_t *this_thr = __kmp_thread_from_gtid(gtid);
1105 volatile kmp_int32 *head_id_p = &lck->lk.head_id;
1106 volatile kmp_int32 *tail_id_p = &lck->lk.tail_id;
1107 volatile kmp_uint32 *spin_here_p;
1108 kmp_int32 need_mf = 1;
1111 omp_state_t prev_state = omp_state_undefined;
1115 (
"__kmp_acquire_queuing_lock: lck:%p, T#%d entering\n", lck, gtid));
1117 KMP_FSYNC_PREPARE(lck);
1118 KMP_DEBUG_ASSERT(this_thr != NULL);
1119 spin_here_p = &this_thr->th.th_spin_here;
1121 #ifdef DEBUG_QUEUING_LOCKS 1122 TRACE_LOCK(gtid + 1,
"acq ent");
1124 __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p);
1125 if (this_thr->th.th_next_waiting != 0)
1126 __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p);
1128 KMP_DEBUG_ASSERT(!*spin_here_p);
1129 KMP_DEBUG_ASSERT(this_thr->th.th_next_waiting == 0);
1138 *spin_here_p = TRUE;
1150 #ifdef DEBUG_QUEUING_LOCKS 1152 TRACE_LOCK_HT(gtid + 1,
"acq read: ", head, tail);
1161 enqueued = KMP_COMPARE_AND_STORE_ACQ64((
volatile kmp_int64 *)tail_id_p,
1163 KMP_PACK_64(gtid + 1, gtid + 1));
1164 #ifdef DEBUG_QUEUING_LOCKS 1166 TRACE_LOCK(gtid + 1,
"acq enq: (-1,0)->(tid,tid)");
1172 KMP_DEBUG_ASSERT(tail != gtid + 1);
1174 #ifdef DEBUG_QUEUING_LOCKS 1175 TRACE_LOCK_HT(gtid + 1,
"acq read: ", head, tail);
1183 enqueued = KMP_COMPARE_AND_STORE_ACQ32(tail_id_p, tail, gtid + 1);
1185 #ifdef DEBUG_QUEUING_LOCKS 1187 TRACE_LOCK(gtid + 1,
"acq enq: (h,t)->(h,tid)");
1194 kmp_int32 grabbed_lock;
1196 #ifdef DEBUG_QUEUING_LOCKS 1198 TRACE_LOCK_HT(gtid + 1,
"acq read: ", head, tail);
1204 grabbed_lock = KMP_COMPARE_AND_STORE_ACQ32(head_id_p, 0, -1);
1208 *spin_here_p = FALSE;
1212 (
"__kmp_acquire_queuing_lock: lck:%p, T#%d exiting: no queuing\n",
1214 #ifdef DEBUG_QUEUING_LOCKS 1215 TRACE_LOCK_HT(gtid + 1,
"acq exit: ", head, 0);
1219 if (ompt_enabled.enabled && prev_state != omp_state_undefined) {
1221 this_thr->th.ompt_thread_info.state = prev_state;
1222 this_thr->th.ompt_thread_info.wait_id = 0;
1226 KMP_FSYNC_ACQUIRED(lck);
1227 return KMP_LOCK_ACQUIRED_FIRST;
1234 if (ompt_enabled.enabled && prev_state == omp_state_undefined) {
1236 prev_state = this_thr->th.ompt_thread_info.state;
1237 this_thr->th.ompt_thread_info.wait_id = (uint64_t)lck;
1238 this_thr->th.ompt_thread_info.state = omp_state_wait_lock;
1244 kmp_info_t *tail_thr = __kmp_thread_from_gtid(tail - 1);
1245 KMP_ASSERT(tail_thr != NULL);
1246 tail_thr->th.th_next_waiting = gtid + 1;
1250 (
"__kmp_acquire_queuing_lock: lck:%p, T#%d waiting for lock\n",
1256 KMP_WAIT_YIELD(spin_here_p, FALSE, KMP_EQ, lck);
1258 #ifdef DEBUG_QUEUING_LOCKS 1259 TRACE_LOCK(gtid + 1,
"acq spin");
1261 if (this_thr->th.th_next_waiting != 0)
1262 __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p);
1264 KMP_DEBUG_ASSERT(this_thr->th.th_next_waiting == 0);
1265 KA_TRACE(1000, (
"__kmp_acquire_queuing_lock: lck:%p, T#%d exiting: after " 1266 "waiting on queue\n",
1269 #ifdef DEBUG_QUEUING_LOCKS 1270 TRACE_LOCK(gtid + 1,
"acq exit 2");
1275 this_thr->th.ompt_thread_info.state = prev_state;
1276 this_thr->th.ompt_thread_info.wait_id = 0;
1280 return KMP_LOCK_ACQUIRED_FIRST;
1286 KMP_YIELD(TCR_4(__kmp_nth) >
1287 (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc));
1288 #ifdef DEBUG_QUEUING_LOCKS 1289 TRACE_LOCK(gtid + 1,
"acq retry");
1292 KMP_ASSERT2(0,
"should not get here");
1293 return KMP_LOCK_ACQUIRED_FIRST;
1296 int __kmp_acquire_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
1297 KMP_DEBUG_ASSERT(gtid >= 0);
1299 int retval = __kmp_acquire_queuing_lock_timed_template<false>(lck, gtid);
1300 ANNOTATE_QUEUING_ACQUIRED(lck);
1304 static int __kmp_acquire_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
1306 char const *
const func =
"omp_set_lock";
1307 if (lck->lk.initialized != lck) {
1308 KMP_FATAL(LockIsUninitialized, func);
1310 if (__kmp_is_queuing_lock_nestable(lck)) {
1311 KMP_FATAL(LockNestableUsedAsSimple, func);
1313 if (__kmp_get_queuing_lock_owner(lck) == gtid) {
1314 KMP_FATAL(LockIsAlreadyOwned, func);
1317 __kmp_acquire_queuing_lock(lck, gtid);
1319 lck->lk.owner_id = gtid + 1;
1320 return KMP_LOCK_ACQUIRED_FIRST;
1323 int __kmp_test_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
1324 volatile kmp_int32 *head_id_p = &lck->lk.head_id;
1327 kmp_info_t *this_thr;
1330 KA_TRACE(1000, (
"__kmp_test_queuing_lock: T#%d entering\n", gtid));
1331 KMP_DEBUG_ASSERT(gtid >= 0);
1333 this_thr = __kmp_thread_from_gtid(gtid);
1334 KMP_DEBUG_ASSERT(this_thr != NULL);
1335 KMP_DEBUG_ASSERT(!this_thr->th.th_spin_here);
1342 if (KMP_COMPARE_AND_STORE_ACQ32(head_id_p, 0, -1)) {
1344 (
"__kmp_test_queuing_lock: T#%d exiting: holding lock\n", gtid));
1345 KMP_FSYNC_ACQUIRED(lck);
1346 ANNOTATE_QUEUING_ACQUIRED(lck);
1352 (
"__kmp_test_queuing_lock: T#%d exiting: without lock\n", gtid));
1356 static int __kmp_test_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
1358 char const *
const func =
"omp_test_lock";
1359 if (lck->lk.initialized != lck) {
1360 KMP_FATAL(LockIsUninitialized, func);
1362 if (__kmp_is_queuing_lock_nestable(lck)) {
1363 KMP_FATAL(LockNestableUsedAsSimple, func);
1366 int retval = __kmp_test_queuing_lock(lck, gtid);
1369 lck->lk.owner_id = gtid + 1;
1374 int __kmp_release_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
1375 kmp_info_t *this_thr;
1376 volatile kmp_int32 *head_id_p = &lck->lk.head_id;
1377 volatile kmp_int32 *tail_id_p = &lck->lk.tail_id;
1380 (
"__kmp_release_queuing_lock: lck:%p, T#%d entering\n", lck, gtid));
1381 KMP_DEBUG_ASSERT(gtid >= 0);
1382 this_thr = __kmp_thread_from_gtid(gtid);
1383 KMP_DEBUG_ASSERT(this_thr != NULL);
1384 #ifdef DEBUG_QUEUING_LOCKS 1385 TRACE_LOCK(gtid + 1,
"rel ent");
1387 if (this_thr->th.th_spin_here)
1388 __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p);
1389 if (this_thr->th.th_next_waiting != 0)
1390 __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p);
1392 KMP_DEBUG_ASSERT(!this_thr->th.th_spin_here);
1393 KMP_DEBUG_ASSERT(this_thr->th.th_next_waiting == 0);
1395 KMP_FSYNC_RELEASING(lck);
1396 ANNOTATE_QUEUING_RELEASED(lck);
1405 #ifdef DEBUG_QUEUING_LOCKS 1407 TRACE_LOCK_HT(gtid + 1,
"rel read: ", head, tail);
1409 __kmp_dump_queuing_lock(this_thr, gtid, lck, head, tail);
1411 KMP_DEBUG_ASSERT(head !=
1416 if (KMP_COMPARE_AND_STORE_REL32(head_id_p, -1, 0)) {
1419 (
"__kmp_release_queuing_lock: lck:%p, T#%d exiting: queue empty\n",
1421 #ifdef DEBUG_QUEUING_LOCKS 1422 TRACE_LOCK_HT(gtid + 1,
"rel exit: ", 0, 0);
1428 return KMP_LOCK_RELEASED;
1435 #ifdef DEBUG_QUEUING_LOCKS 1437 __kmp_dump_queuing_lock(this_thr, gtid, lck, head, tail);
1439 KMP_DEBUG_ASSERT(head > 0);
1442 dequeued = KMP_COMPARE_AND_STORE_REL64(
1443 RCAST(
volatile kmp_int64 *, tail_id_p), KMP_PACK_64(head, head),
1444 KMP_PACK_64(-1, 0));
1445 #ifdef DEBUG_QUEUING_LOCKS 1446 TRACE_LOCK(gtid + 1,
"rel deq: (h,h)->(-1,0)");
1450 volatile kmp_int32 *waiting_id_p;
1451 kmp_info_t *head_thr = __kmp_thread_from_gtid(head - 1);
1452 KMP_DEBUG_ASSERT(head_thr != NULL);
1453 waiting_id_p = &head_thr->th.th_next_waiting;
1456 #ifdef DEBUG_QUEUING_LOCKS 1457 if (head <= 0 || tail <= 0)
1458 __kmp_dump_queuing_lock(this_thr, gtid, lck, head, tail);
1460 KMP_DEBUG_ASSERT(head > 0 && tail > 0);
1466 *head_id_p = KMP_WAIT_YIELD((
volatile kmp_uint32 *)waiting_id_p, 0,
1468 #ifdef DEBUG_QUEUING_LOCKS 1469 TRACE_LOCK(gtid + 1,
"rel deq: (h,t)->(h',t)");
1476 kmp_info_t *head_thr = __kmp_thread_from_gtid(head - 1);
1477 KMP_DEBUG_ASSERT(head_thr != NULL);
1480 #ifdef DEBUG_QUEUING_LOCKS 1481 if (head <= 0 || tail <= 0)
1482 __kmp_dump_queuing_lock(this_thr, gtid, lck, head, tail);
1484 KMP_DEBUG_ASSERT(head > 0 && tail > 0);
1488 head_thr->th.th_next_waiting = 0;
1489 #ifdef DEBUG_QUEUING_LOCKS 1490 TRACE_LOCK_T(gtid + 1,
"rel nw=0 for t=", head);
1495 head_thr->th.th_spin_here = FALSE;
1497 KA_TRACE(1000, (
"__kmp_release_queuing_lock: lck:%p, T#%d exiting: after " 1500 #ifdef DEBUG_QUEUING_LOCKS 1501 TRACE_LOCK(gtid + 1,
"rel exit 2");
1503 return KMP_LOCK_RELEASED;
1508 #ifdef DEBUG_QUEUING_LOCKS 1509 TRACE_LOCK(gtid + 1,
"rel retry");
1513 KMP_ASSERT2(0,
"should not get here");
1514 return KMP_LOCK_RELEASED;
1517 static int __kmp_release_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
1519 char const *
const func =
"omp_unset_lock";
1521 if (lck->lk.initialized != lck) {
1522 KMP_FATAL(LockIsUninitialized, func);
1524 if (__kmp_is_queuing_lock_nestable(lck)) {
1525 KMP_FATAL(LockNestableUsedAsSimple, func);
1527 if (__kmp_get_queuing_lock_owner(lck) == -1) {
1528 KMP_FATAL(LockUnsettingFree, func);
1530 if (__kmp_get_queuing_lock_owner(lck) != gtid) {
1531 KMP_FATAL(LockUnsettingSetByAnother, func);
1533 lck->lk.owner_id = 0;
1534 return __kmp_release_queuing_lock(lck, gtid);
1537 void __kmp_init_queuing_lock(kmp_queuing_lock_t *lck) {
1538 lck->lk.location = NULL;
1539 lck->lk.head_id = 0;
1540 lck->lk.tail_id = 0;
1541 lck->lk.next_ticket = 0;
1542 lck->lk.now_serving = 0;
1543 lck->lk.owner_id = 0;
1544 lck->lk.depth_locked = -1;
1545 lck->lk.initialized = lck;
1547 KA_TRACE(1000, (
"__kmp_init_queuing_lock: lock %p initialized\n", lck));
1550 void __kmp_destroy_queuing_lock(kmp_queuing_lock_t *lck) {
1551 lck->lk.initialized = NULL;
1552 lck->lk.location = NULL;
1553 lck->lk.head_id = 0;
1554 lck->lk.tail_id = 0;
1555 lck->lk.next_ticket = 0;
1556 lck->lk.now_serving = 0;
1557 lck->lk.owner_id = 0;
1558 lck->lk.depth_locked = -1;
1561 static void __kmp_destroy_queuing_lock_with_checks(kmp_queuing_lock_t *lck) {
1562 char const *
const func =
"omp_destroy_lock";
1563 if (lck->lk.initialized != lck) {
1564 KMP_FATAL(LockIsUninitialized, func);
1566 if (__kmp_is_queuing_lock_nestable(lck)) {
1567 KMP_FATAL(LockNestableUsedAsSimple, func);
1569 if (__kmp_get_queuing_lock_owner(lck) != -1) {
1570 KMP_FATAL(LockStillOwned, func);
1572 __kmp_destroy_queuing_lock(lck);
1577 int __kmp_acquire_nested_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
1578 KMP_DEBUG_ASSERT(gtid >= 0);
1580 if (__kmp_get_queuing_lock_owner(lck) == gtid) {
1581 lck->lk.depth_locked += 1;
1582 return KMP_LOCK_ACQUIRED_NEXT;
1584 __kmp_acquire_queuing_lock_timed_template<false>(lck, gtid);
1585 ANNOTATE_QUEUING_ACQUIRED(lck);
1587 lck->lk.depth_locked = 1;
1589 lck->lk.owner_id = gtid + 1;
1590 return KMP_LOCK_ACQUIRED_FIRST;
1595 __kmp_acquire_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
1597 char const *
const func =
"omp_set_nest_lock";
1598 if (lck->lk.initialized != lck) {
1599 KMP_FATAL(LockIsUninitialized, func);
1601 if (!__kmp_is_queuing_lock_nestable(lck)) {
1602 KMP_FATAL(LockSimpleUsedAsNestable, func);
1604 return __kmp_acquire_nested_queuing_lock(lck, gtid);
1607 int __kmp_test_nested_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
1610 KMP_DEBUG_ASSERT(gtid >= 0);
1612 if (__kmp_get_queuing_lock_owner(lck) == gtid) {
1613 retval = ++lck->lk.depth_locked;
1614 }
else if (!__kmp_test_queuing_lock(lck, gtid)) {
1618 retval = lck->lk.depth_locked = 1;
1620 lck->lk.owner_id = gtid + 1;
1625 static int __kmp_test_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
1627 char const *
const func =
"omp_test_nest_lock";
1628 if (lck->lk.initialized != lck) {
1629 KMP_FATAL(LockIsUninitialized, func);
1631 if (!__kmp_is_queuing_lock_nestable(lck)) {
1632 KMP_FATAL(LockSimpleUsedAsNestable, func);
1634 return __kmp_test_nested_queuing_lock(lck, gtid);
1637 int __kmp_release_nested_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
1638 KMP_DEBUG_ASSERT(gtid >= 0);
1641 if (--(lck->lk.depth_locked) == 0) {
1643 lck->lk.owner_id = 0;
1644 __kmp_release_queuing_lock(lck, gtid);
1645 return KMP_LOCK_RELEASED;
1647 return KMP_LOCK_STILL_HELD;
1651 __kmp_release_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
1653 char const *
const func =
"omp_unset_nest_lock";
1655 if (lck->lk.initialized != lck) {
1656 KMP_FATAL(LockIsUninitialized, func);
1658 if (!__kmp_is_queuing_lock_nestable(lck)) {
1659 KMP_FATAL(LockSimpleUsedAsNestable, func);
1661 if (__kmp_get_queuing_lock_owner(lck) == -1) {
1662 KMP_FATAL(LockUnsettingFree, func);
1664 if (__kmp_get_queuing_lock_owner(lck) != gtid) {
1665 KMP_FATAL(LockUnsettingSetByAnother, func);
1667 return __kmp_release_nested_queuing_lock(lck, gtid);
1670 void __kmp_init_nested_queuing_lock(kmp_queuing_lock_t *lck) {
1671 __kmp_init_queuing_lock(lck);
1672 lck->lk.depth_locked = 0;
1675 void __kmp_destroy_nested_queuing_lock(kmp_queuing_lock_t *lck) {
1676 __kmp_destroy_queuing_lock(lck);
1677 lck->lk.depth_locked = 0;
1681 __kmp_destroy_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck) {
1682 char const *
const func =
"omp_destroy_nest_lock";
1683 if (lck->lk.initialized != lck) {
1684 KMP_FATAL(LockIsUninitialized, func);
1686 if (!__kmp_is_queuing_lock_nestable(lck)) {
1687 KMP_FATAL(LockSimpleUsedAsNestable, func);
1689 if (__kmp_get_queuing_lock_owner(lck) != -1) {
1690 KMP_FATAL(LockStillOwned, func);
1692 __kmp_destroy_nested_queuing_lock(lck);
1697 static const ident_t *__kmp_get_queuing_lock_location(kmp_queuing_lock_t *lck) {
1698 return lck->lk.location;
1701 static void __kmp_set_queuing_lock_location(kmp_queuing_lock_t *lck,
1703 lck->lk.location = loc;
1706 static kmp_lock_flags_t __kmp_get_queuing_lock_flags(kmp_queuing_lock_t *lck) {
1707 return lck->lk.flags;
1710 static void __kmp_set_queuing_lock_flags(kmp_queuing_lock_t *lck,
1711 kmp_lock_flags_t flags) {
1712 lck->lk.flags = flags;
1715 #if KMP_USE_ADAPTIVE_LOCKS 1719 #if KMP_COMPILER_ICC && __INTEL_COMPILER >= 1300 1721 #include <immintrin.h> 1722 #define SOFT_ABORT_MASK (_XABORT_RETRY | _XABORT_CONFLICT | _XABORT_EXPLICIT) 1727 #define _XBEGIN_STARTED (~0u) 1728 #define _XABORT_EXPLICIT (1 << 0) 1729 #define _XABORT_RETRY (1 << 1) 1730 #define _XABORT_CONFLICT (1 << 2) 1731 #define _XABORT_CAPACITY (1 << 3) 1732 #define _XABORT_DEBUG (1 << 4) 1733 #define _XABORT_NESTED (1 << 5) 1734 #define _XABORT_CODE(x) ((unsigned char)(((x) >> 24) & 0xFF)) 1737 #define SOFT_ABORT_MASK (_XABORT_RETRY | _XABORT_CONFLICT | _XABORT_EXPLICIT) 1739 #define STRINGIZE_INTERNAL(arg) #arg 1740 #define STRINGIZE(arg) STRINGIZE_INTERNAL(arg) 1746 static __inline
int _xbegin() {
1774 #endif // KMP_ARCH_X86_64 1783 __asm__
volatile(
"1: .byte 0xC7; .byte 0xF8;\n" 1786 "1: movl %%eax,%0\n" 1788 :
"+r"(res)::
"memory",
"%eax");
1789 #endif // KMP_OS_WINDOWS 1794 static __inline
void _xend() {
1802 __asm__
volatile(
".byte 0x0f; .byte 0x01; .byte 0xd5" :::
"memory");
1811 #define _xabort(ARG) _asm _emit 0xc6 _asm _emit 0xf8 _asm _emit ARG 1813 #define _xabort(ARG) \ 1814 __asm__ volatile(".byte 0xC6; .byte 0xF8; .byte " STRINGIZE(ARG):::"memory"); 1817 #endif // KMP_COMPILER_ICC && __INTEL_COMPILER >= 1300 1820 #if KMP_DEBUG_ADAPTIVE_LOCKS 1825 static kmp_adaptive_lock_statistics_t destroyedStats;
1828 static kmp_adaptive_lock_info_t liveLocks;
1831 static kmp_bootstrap_lock_t chain_lock =
1832 KMP_BOOTSTRAP_LOCK_INITIALIZER(chain_lock);
1835 void __kmp_init_speculative_stats() {
1836 kmp_adaptive_lock_info_t *lck = &liveLocks;
1838 memset(CCAST(kmp_adaptive_lock_statistics_t *, &(lck->stats)), 0,
1839 sizeof(lck->stats));
1840 lck->stats.next = lck;
1841 lck->stats.prev = lck;
1843 KMP_ASSERT(lck->stats.next->stats.prev == lck);
1844 KMP_ASSERT(lck->stats.prev->stats.next == lck);
1846 __kmp_init_bootstrap_lock(&chain_lock);
1850 static void __kmp_remember_lock(kmp_adaptive_lock_info_t *lck) {
1851 __kmp_acquire_bootstrap_lock(&chain_lock);
1853 lck->stats.next = liveLocks.stats.next;
1854 lck->stats.prev = &liveLocks;
1856 liveLocks.stats.next = lck;
1857 lck->stats.next->stats.prev = lck;
1859 KMP_ASSERT(lck->stats.next->stats.prev == lck);
1860 KMP_ASSERT(lck->stats.prev->stats.next == lck);
1862 __kmp_release_bootstrap_lock(&chain_lock);
1865 static void __kmp_forget_lock(kmp_adaptive_lock_info_t *lck) {
1866 KMP_ASSERT(lck->stats.next->stats.prev == lck);
1867 KMP_ASSERT(lck->stats.prev->stats.next == lck);
1869 kmp_adaptive_lock_info_t *n = lck->stats.next;
1870 kmp_adaptive_lock_info_t *p = lck->stats.prev;
1876 static void __kmp_zero_speculative_stats(kmp_adaptive_lock_info_t *lck) {
1877 memset(CCAST(kmp_adaptive_lock_statistics_t *, &lck->stats), 0,
1878 sizeof(lck->stats));
1879 __kmp_remember_lock(lck);
1882 static void __kmp_add_stats(kmp_adaptive_lock_statistics_t *t,
1883 kmp_adaptive_lock_info_t *lck) {
1884 kmp_adaptive_lock_statistics_t
volatile *s = &lck->stats;
1886 t->nonSpeculativeAcquireAttempts += lck->acquire_attempts;
1887 t->successfulSpeculations += s->successfulSpeculations;
1888 t->hardFailedSpeculations += s->hardFailedSpeculations;
1889 t->softFailedSpeculations += s->softFailedSpeculations;
1890 t->nonSpeculativeAcquires += s->nonSpeculativeAcquires;
1891 t->lemmingYields += s->lemmingYields;
1894 static void __kmp_accumulate_speculative_stats(kmp_adaptive_lock_info_t *lck) {
1895 __kmp_acquire_bootstrap_lock(&chain_lock);
1897 __kmp_add_stats(&destroyedStats, lck);
1898 __kmp_forget_lock(lck);
1900 __kmp_release_bootstrap_lock(&chain_lock);
1903 static float percent(kmp_uint32 count, kmp_uint32 total) {
1904 return (total == 0) ? 0.0 : (100.0 * count) / total;
1907 static FILE *__kmp_open_stats_file() {
1908 if (strcmp(__kmp_speculative_statsfile,
"-") == 0)
1911 size_t buffLen = KMP_STRLEN(__kmp_speculative_statsfile) + 20;
1912 char buffer[buffLen];
1913 KMP_SNPRINTF(&buffer[0], buffLen, __kmp_speculative_statsfile,
1914 (kmp_int32)getpid());
1915 FILE *result = fopen(&buffer[0],
"w");
1918 return result ? result : stdout;
1921 void __kmp_print_speculative_stats() {
1922 kmp_adaptive_lock_statistics_t total = destroyedStats;
1923 kmp_adaptive_lock_info_t *lck;
1925 for (lck = liveLocks.stats.next; lck != &liveLocks; lck = lck->stats.next) {
1926 __kmp_add_stats(&total, lck);
1928 kmp_adaptive_lock_statistics_t *t = &total;
1929 kmp_uint32 totalSections =
1930 t->nonSpeculativeAcquires + t->successfulSpeculations;
1931 kmp_uint32 totalSpeculations = t->successfulSpeculations +
1932 t->hardFailedSpeculations +
1933 t->softFailedSpeculations;
1934 if (totalSections <= 0)
1937 FILE *statsFile = __kmp_open_stats_file();
1939 fprintf(statsFile,
"Speculative lock statistics (all approximate!)\n");
1940 fprintf(statsFile,
" Lock parameters: \n" 1941 " max_soft_retries : %10d\n" 1942 " max_badness : %10d\n",
1943 __kmp_adaptive_backoff_params.max_soft_retries,
1944 __kmp_adaptive_backoff_params.max_badness);
1945 fprintf(statsFile,
" Non-speculative acquire attempts : %10d\n",
1946 t->nonSpeculativeAcquireAttempts);
1947 fprintf(statsFile,
" Total critical sections : %10d\n",
1949 fprintf(statsFile,
" Successful speculations : %10d (%5.1f%%)\n",
1950 t->successfulSpeculations,
1951 percent(t->successfulSpeculations, totalSections));
1952 fprintf(statsFile,
" Non-speculative acquires : %10d (%5.1f%%)\n",
1953 t->nonSpeculativeAcquires,
1954 percent(t->nonSpeculativeAcquires, totalSections));
1955 fprintf(statsFile,
" Lemming yields : %10d\n\n",
1958 fprintf(statsFile,
" Speculative acquire attempts : %10d\n",
1960 fprintf(statsFile,
" Successes : %10d (%5.1f%%)\n",
1961 t->successfulSpeculations,
1962 percent(t->successfulSpeculations, totalSpeculations));
1963 fprintf(statsFile,
" Soft failures : %10d (%5.1f%%)\n",
1964 t->softFailedSpeculations,
1965 percent(t->softFailedSpeculations, totalSpeculations));
1966 fprintf(statsFile,
" Hard failures : %10d (%5.1f%%)\n",
1967 t->hardFailedSpeculations,
1968 percent(t->hardFailedSpeculations, totalSpeculations));
1970 if (statsFile != stdout)
1974 #define KMP_INC_STAT(lck, stat) (lck->lk.adaptive.stats.stat++) 1976 #define KMP_INC_STAT(lck, stat) 1978 #endif // KMP_DEBUG_ADAPTIVE_LOCKS 1980 static inline bool __kmp_is_unlocked_queuing_lock(kmp_queuing_lock_t *lck) {
1983 bool res = lck->lk.head_id == 0;
1987 #if KMP_COMPILER_ICC 1990 __sync_synchronize();
1997 static __inline
void 1998 __kmp_update_badness_after_success(kmp_adaptive_lock_t *lck) {
2000 lck->lk.adaptive.badness = 0;
2001 KMP_INC_STAT(lck, successfulSpeculations);
2005 static __inline
void __kmp_step_badness(kmp_adaptive_lock_t *lck) {
2006 kmp_uint32 newBadness = (lck->lk.adaptive.badness << 1) | 1;
2007 if (newBadness > lck->lk.adaptive.max_badness) {
2010 lck->lk.adaptive.badness = newBadness;
2015 static __inline
int __kmp_should_speculate(kmp_adaptive_lock_t *lck,
2017 kmp_uint32 badness = lck->lk.adaptive.badness;
2018 kmp_uint32 attempts = lck->lk.adaptive.acquire_attempts;
2019 int res = (attempts & badness) == 0;
2025 static int __kmp_test_adaptive_lock_only(kmp_adaptive_lock_t *lck,
2027 int retries = lck->lk.adaptive.max_soft_retries;
2034 kmp_uint32 status = _xbegin();
2039 if (status == _XBEGIN_STARTED) {
2044 if (!__kmp_is_unlocked_queuing_lock(GET_QLK_PTR(lck))) {
2048 KMP_ASSERT2(0,
"should not get here");
2053 if (status & SOFT_ABORT_MASK) {
2054 KMP_INC_STAT(lck, softFailedSpeculations);
2057 KMP_INC_STAT(lck, hardFailedSpeculations);
2062 }
while (retries--);
2066 __kmp_step_badness(lck);
2073 static int __kmp_test_adaptive_lock(kmp_adaptive_lock_t *lck, kmp_int32 gtid) {
2075 if (__kmp_should_speculate(lck, gtid) &&
2076 __kmp_test_adaptive_lock_only(lck, gtid))
2081 lck->lk.adaptive.acquire_attempts++;
2084 if (__kmp_test_queuing_lock(GET_QLK_PTR(lck), gtid)) {
2085 KMP_INC_STAT(lck, nonSpeculativeAcquires);
2092 static int __kmp_test_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck,
2094 char const *
const func =
"omp_test_lock";
2095 if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) {
2096 KMP_FATAL(LockIsUninitialized, func);
2099 int retval = __kmp_test_adaptive_lock(lck, gtid);
2102 lck->lk.qlk.owner_id = gtid + 1;
2118 static void __kmp_acquire_adaptive_lock(kmp_adaptive_lock_t *lck,
2120 if (__kmp_should_speculate(lck, gtid)) {
2121 if (__kmp_is_unlocked_queuing_lock(GET_QLK_PTR(lck))) {
2122 if (__kmp_test_adaptive_lock_only(lck, gtid))
2131 while (!__kmp_is_unlocked_queuing_lock(GET_QLK_PTR(lck))) {
2132 KMP_INC_STAT(lck, lemmingYields);
2136 if (__kmp_test_adaptive_lock_only(lck, gtid))
2143 lck->lk.adaptive.acquire_attempts++;
2145 __kmp_acquire_queuing_lock_timed_template<FALSE>(GET_QLK_PTR(lck), gtid);
2147 KMP_INC_STAT(lck, nonSpeculativeAcquires);
2148 ANNOTATE_QUEUING_ACQUIRED(lck);
2151 static void __kmp_acquire_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck,
2153 char const *
const func =
"omp_set_lock";
2154 if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) {
2155 KMP_FATAL(LockIsUninitialized, func);
2157 if (__kmp_get_queuing_lock_owner(GET_QLK_PTR(lck)) == gtid) {
2158 KMP_FATAL(LockIsAlreadyOwned, func);
2161 __kmp_acquire_adaptive_lock(lck, gtid);
2163 lck->lk.qlk.owner_id = gtid + 1;
2166 static int __kmp_release_adaptive_lock(kmp_adaptive_lock_t *lck,
2168 if (__kmp_is_unlocked_queuing_lock(GET_QLK_PTR(
2173 __kmp_update_badness_after_success(lck);
2176 __kmp_release_queuing_lock(GET_QLK_PTR(lck), gtid);
2178 return KMP_LOCK_RELEASED;
2181 static int __kmp_release_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck,
2183 char const *
const func =
"omp_unset_lock";
2185 if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) {
2186 KMP_FATAL(LockIsUninitialized, func);
2188 if (__kmp_get_queuing_lock_owner(GET_QLK_PTR(lck)) == -1) {
2189 KMP_FATAL(LockUnsettingFree, func);
2191 if (__kmp_get_queuing_lock_owner(GET_QLK_PTR(lck)) != gtid) {
2192 KMP_FATAL(LockUnsettingSetByAnother, func);
2194 lck->lk.qlk.owner_id = 0;
2195 __kmp_release_adaptive_lock(lck, gtid);
2196 return KMP_LOCK_RELEASED;
2199 static void __kmp_init_adaptive_lock(kmp_adaptive_lock_t *lck) {
2200 __kmp_init_queuing_lock(GET_QLK_PTR(lck));
2201 lck->lk.adaptive.badness = 0;
2202 lck->lk.adaptive.acquire_attempts = 0;
2203 lck->lk.adaptive.max_soft_retries =
2204 __kmp_adaptive_backoff_params.max_soft_retries;
2205 lck->lk.adaptive.max_badness = __kmp_adaptive_backoff_params.max_badness;
2206 #if KMP_DEBUG_ADAPTIVE_LOCKS 2207 __kmp_zero_speculative_stats(&lck->lk.adaptive);
2209 KA_TRACE(1000, (
"__kmp_init_adaptive_lock: lock %p initialized\n", lck));
2212 static void __kmp_destroy_adaptive_lock(kmp_adaptive_lock_t *lck) {
2213 #if KMP_DEBUG_ADAPTIVE_LOCKS 2214 __kmp_accumulate_speculative_stats(&lck->lk.adaptive);
2216 __kmp_destroy_queuing_lock(GET_QLK_PTR(lck));
2220 static void __kmp_destroy_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck) {
2221 char const *
const func =
"omp_destroy_lock";
2222 if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) {
2223 KMP_FATAL(LockIsUninitialized, func);
2225 if (__kmp_get_queuing_lock_owner(GET_QLK_PTR(lck)) != -1) {
2226 KMP_FATAL(LockStillOwned, func);
2228 __kmp_destroy_adaptive_lock(lck);
2231 #endif // KMP_USE_ADAPTIVE_LOCKS 2237 static kmp_int32 __kmp_get_drdpa_lock_owner(kmp_drdpa_lock_t *lck) {
2238 return lck->lk.owner_id - 1;
2241 static inline bool __kmp_is_drdpa_lock_nestable(kmp_drdpa_lock_t *lck) {
2242 return lck->lk.depth_locked != -1;
2245 __forceinline
static int 2246 __kmp_acquire_drdpa_lock_timed_template(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2247 kmp_uint64 ticket = KMP_ATOMIC_INC(&lck->lk.next_ticket);
2248 kmp_uint64 mask = lck->lk.mask;
2249 std::atomic<kmp_uint64> *polls = lck->lk.polls;
2251 #ifdef USE_LOCK_PROFILE 2252 if (polls[ticket & mask] != ticket)
2253 __kmp_printf(
"LOCK CONTENTION: %p\n", lck);
2267 KMP_FSYNC_PREPARE(lck);
2268 KMP_INIT_YIELD(spins);
2269 while (polls[ticket & mask] < ticket) {
2274 KMP_YIELD(TCR_4(__kmp_nth) >
2275 (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc));
2276 KMP_YIELD_SPIN(spins);
2285 mask = lck->lk.mask;
2286 polls = lck->lk.polls;
2290 KMP_FSYNC_ACQUIRED(lck);
2291 KA_TRACE(1000, (
"__kmp_acquire_drdpa_lock: ticket #%lld acquired lock %p\n",
2293 lck->lk.now_serving = ticket;
2300 if ((lck->lk.old_polls != NULL) && (ticket >= lck->lk.cleanup_ticket)) {
2301 __kmp_free(lck->lk.old_polls);
2302 lck->lk.old_polls = NULL;
2303 lck->lk.cleanup_ticket = 0;
2309 if (lck->lk.old_polls == NULL) {
2310 bool reconfigure =
false;
2311 std::atomic<kmp_uint64> *old_polls = polls;
2312 kmp_uint32 num_polls = TCR_4(lck->lk.num_polls);
2314 if (TCR_4(__kmp_nth) >
2315 (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) {
2318 if (num_polls > 1) {
2320 num_polls = TCR_4(lck->lk.num_polls);
2323 polls = (std::atomic<kmp_uint64> *)__kmp_allocate(num_polls *
2331 kmp_uint64 num_waiting = TCR_8(lck->lk.next_ticket) - ticket - 1;
2332 if (num_waiting > num_polls) {
2333 kmp_uint32 old_num_polls = num_polls;
2336 mask = (mask << 1) | 1;
2338 }
while (num_polls <= num_waiting);
2344 polls = (std::atomic<kmp_uint64> *)__kmp_allocate(num_polls *
2347 for (i = 0; i < old_num_polls; i++) {
2348 polls[i].store(old_polls[i]);
2363 KA_TRACE(1000, (
"__kmp_acquire_drdpa_lock: ticket #%lld reconfiguring " 2364 "lock %p to %d polls\n",
2365 ticket, lck, num_polls));
2367 lck->lk.old_polls = old_polls;
2368 lck->lk.polls = polls;
2372 lck->lk.num_polls = num_polls;
2373 lck->lk.mask = mask;
2381 lck->lk.cleanup_ticket = lck->lk.next_ticket;
2384 return KMP_LOCK_ACQUIRED_FIRST;
2387 int __kmp_acquire_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2388 int retval = __kmp_acquire_drdpa_lock_timed_template(lck, gtid);
2389 ANNOTATE_DRDPA_ACQUIRED(lck);
2393 static int __kmp_acquire_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
2395 char const *
const func =
"omp_set_lock";
2396 if (lck->lk.initialized != lck) {
2397 KMP_FATAL(LockIsUninitialized, func);
2399 if (__kmp_is_drdpa_lock_nestable(lck)) {
2400 KMP_FATAL(LockNestableUsedAsSimple, func);
2402 if ((gtid >= 0) && (__kmp_get_drdpa_lock_owner(lck) == gtid)) {
2403 KMP_FATAL(LockIsAlreadyOwned, func);
2406 __kmp_acquire_drdpa_lock(lck, gtid);
2408 lck->lk.owner_id = gtid + 1;
2409 return KMP_LOCK_ACQUIRED_FIRST;
2412 int __kmp_test_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2415 kmp_uint64 ticket = lck->lk.next_ticket;
2416 std::atomic<kmp_uint64> *polls = lck->lk.polls;
2417 kmp_uint64 mask = lck->lk.mask;
2418 if (polls[ticket & mask] == ticket) {
2419 kmp_uint64 next_ticket = ticket + 1;
2420 if (__kmp_atomic_compare_store_acq(&lck->lk.next_ticket, ticket,
2422 KMP_FSYNC_ACQUIRED(lck);
2423 KA_TRACE(1000, (
"__kmp_test_drdpa_lock: ticket #%lld acquired lock %p\n",
2425 lck->lk.now_serving = ticket;
2439 static int __kmp_test_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
2441 char const *
const func =
"omp_test_lock";
2442 if (lck->lk.initialized != lck) {
2443 KMP_FATAL(LockIsUninitialized, func);
2445 if (__kmp_is_drdpa_lock_nestable(lck)) {
2446 KMP_FATAL(LockNestableUsedAsSimple, func);
2449 int retval = __kmp_test_drdpa_lock(lck, gtid);
2452 lck->lk.owner_id = gtid + 1;
2457 int __kmp_release_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2460 kmp_uint64 ticket = lck->lk.now_serving + 1;
2461 std::atomic<kmp_uint64> *polls = lck->lk.polls;
2462 kmp_uint64 mask = lck->lk.mask;
2463 KA_TRACE(1000, (
"__kmp_release_drdpa_lock: ticket #%lld released lock %p\n",
2465 KMP_FSYNC_RELEASING(lck);
2466 ANNOTATE_DRDPA_RELEASED(lck);
2467 polls[ticket & mask] = ticket;
2468 return KMP_LOCK_RELEASED;
2471 static int __kmp_release_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
2473 char const *
const func =
"omp_unset_lock";
2475 if (lck->lk.initialized != lck) {
2476 KMP_FATAL(LockIsUninitialized, func);
2478 if (__kmp_is_drdpa_lock_nestable(lck)) {
2479 KMP_FATAL(LockNestableUsedAsSimple, func);
2481 if (__kmp_get_drdpa_lock_owner(lck) == -1) {
2482 KMP_FATAL(LockUnsettingFree, func);
2484 if ((gtid >= 0) && (__kmp_get_drdpa_lock_owner(lck) >= 0) &&
2485 (__kmp_get_drdpa_lock_owner(lck) != gtid)) {
2486 KMP_FATAL(LockUnsettingSetByAnother, func);
2488 lck->lk.owner_id = 0;
2489 return __kmp_release_drdpa_lock(lck, gtid);
2492 void __kmp_init_drdpa_lock(kmp_drdpa_lock_t *lck) {
2493 lck->lk.location = NULL;
2495 lck->lk.num_polls = 1;
2496 lck->lk.polls = (std::atomic<kmp_uint64> *)__kmp_allocate(
2497 lck->lk.num_polls *
sizeof(*(lck->lk.polls)));
2498 lck->lk.cleanup_ticket = 0;
2499 lck->lk.old_polls = NULL;
2500 lck->lk.next_ticket = 0;
2501 lck->lk.now_serving = 0;
2502 lck->lk.owner_id = 0;
2503 lck->lk.depth_locked = -1;
2504 lck->lk.initialized = lck;
2506 KA_TRACE(1000, (
"__kmp_init_drdpa_lock: lock %p initialized\n", lck));
2509 void __kmp_destroy_drdpa_lock(kmp_drdpa_lock_t *lck) {
2510 lck->lk.initialized = NULL;
2511 lck->lk.location = NULL;
2512 if (lck->lk.polls.load() != NULL) {
2513 __kmp_free(lck->lk.polls.load());
2514 lck->lk.polls = NULL;
2516 if (lck->lk.old_polls != NULL) {
2517 __kmp_free(lck->lk.old_polls);
2518 lck->lk.old_polls = NULL;
2521 lck->lk.num_polls = 0;
2522 lck->lk.cleanup_ticket = 0;
2523 lck->lk.next_ticket = 0;
2524 lck->lk.now_serving = 0;
2525 lck->lk.owner_id = 0;
2526 lck->lk.depth_locked = -1;
2529 static void __kmp_destroy_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) {
2530 char const *
const func =
"omp_destroy_lock";
2531 if (lck->lk.initialized != lck) {
2532 KMP_FATAL(LockIsUninitialized, func);
2534 if (__kmp_is_drdpa_lock_nestable(lck)) {
2535 KMP_FATAL(LockNestableUsedAsSimple, func);
2537 if (__kmp_get_drdpa_lock_owner(lck) != -1) {
2538 KMP_FATAL(LockStillOwned, func);
2540 __kmp_destroy_drdpa_lock(lck);
2545 int __kmp_acquire_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2546 KMP_DEBUG_ASSERT(gtid >= 0);
2548 if (__kmp_get_drdpa_lock_owner(lck) == gtid) {
2549 lck->lk.depth_locked += 1;
2550 return KMP_LOCK_ACQUIRED_NEXT;
2552 __kmp_acquire_drdpa_lock_timed_template(lck, gtid);
2553 ANNOTATE_DRDPA_ACQUIRED(lck);
2555 lck->lk.depth_locked = 1;
2557 lck->lk.owner_id = gtid + 1;
2558 return KMP_LOCK_ACQUIRED_FIRST;
2562 static void __kmp_acquire_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
2564 char const *
const func =
"omp_set_nest_lock";
2565 if (lck->lk.initialized != lck) {
2566 KMP_FATAL(LockIsUninitialized, func);
2568 if (!__kmp_is_drdpa_lock_nestable(lck)) {
2569 KMP_FATAL(LockSimpleUsedAsNestable, func);
2571 __kmp_acquire_nested_drdpa_lock(lck, gtid);
2574 int __kmp_test_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2577 KMP_DEBUG_ASSERT(gtid >= 0);
2579 if (__kmp_get_drdpa_lock_owner(lck) == gtid) {
2580 retval = ++lck->lk.depth_locked;
2581 }
else if (!__kmp_test_drdpa_lock(lck, gtid)) {
2585 retval = lck->lk.depth_locked = 1;
2587 lck->lk.owner_id = gtid + 1;
2592 static int __kmp_test_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
2594 char const *
const func =
"omp_test_nest_lock";
2595 if (lck->lk.initialized != lck) {
2596 KMP_FATAL(LockIsUninitialized, func);
2598 if (!__kmp_is_drdpa_lock_nestable(lck)) {
2599 KMP_FATAL(LockSimpleUsedAsNestable, func);
2601 return __kmp_test_nested_drdpa_lock(lck, gtid);
2604 int __kmp_release_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2605 KMP_DEBUG_ASSERT(gtid >= 0);
2608 if (--(lck->lk.depth_locked) == 0) {
2610 lck->lk.owner_id = 0;
2611 __kmp_release_drdpa_lock(lck, gtid);
2612 return KMP_LOCK_RELEASED;
2614 return KMP_LOCK_STILL_HELD;
2617 static int __kmp_release_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
2619 char const *
const func =
"omp_unset_nest_lock";
2621 if (lck->lk.initialized != lck) {
2622 KMP_FATAL(LockIsUninitialized, func);
2624 if (!__kmp_is_drdpa_lock_nestable(lck)) {
2625 KMP_FATAL(LockSimpleUsedAsNestable, func);
2627 if (__kmp_get_drdpa_lock_owner(lck) == -1) {
2628 KMP_FATAL(LockUnsettingFree, func);
2630 if (__kmp_get_drdpa_lock_owner(lck) != gtid) {
2631 KMP_FATAL(LockUnsettingSetByAnother, func);
2633 return __kmp_release_nested_drdpa_lock(lck, gtid);
2636 void __kmp_init_nested_drdpa_lock(kmp_drdpa_lock_t *lck) {
2637 __kmp_init_drdpa_lock(lck);
2638 lck->lk.depth_locked = 0;
2641 void __kmp_destroy_nested_drdpa_lock(kmp_drdpa_lock_t *lck) {
2642 __kmp_destroy_drdpa_lock(lck);
2643 lck->lk.depth_locked = 0;
2646 static void __kmp_destroy_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) {
2647 char const *
const func =
"omp_destroy_nest_lock";
2648 if (lck->lk.initialized != lck) {
2649 KMP_FATAL(LockIsUninitialized, func);
2651 if (!__kmp_is_drdpa_lock_nestable(lck)) {
2652 KMP_FATAL(LockSimpleUsedAsNestable, func);
2654 if (__kmp_get_drdpa_lock_owner(lck) != -1) {
2655 KMP_FATAL(LockStillOwned, func);
2657 __kmp_destroy_nested_drdpa_lock(lck);
2662 static const ident_t *__kmp_get_drdpa_lock_location(kmp_drdpa_lock_t *lck) {
2663 return lck->lk.location;
2666 static void __kmp_set_drdpa_lock_location(kmp_drdpa_lock_t *lck,
2668 lck->lk.location = loc;
2671 static kmp_lock_flags_t __kmp_get_drdpa_lock_flags(kmp_drdpa_lock_t *lck) {
2672 return lck->lk.flags;
2675 static void __kmp_set_drdpa_lock_flags(kmp_drdpa_lock_t *lck,
2676 kmp_lock_flags_t flags) {
2677 lck->lk.flags = flags;
2681 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 2682 #define __kmp_tsc() __kmp_hardware_timestamp() 2684 kmp_backoff_t __kmp_spin_backoff_params = {1, 4096, 100};
2687 extern kmp_uint64 __kmp_now_nsec();
2688 kmp_backoff_t __kmp_spin_backoff_params = {1, 256, 100};
2689 #define __kmp_tsc() __kmp_now_nsec() 2699 static inline bool before(kmp_uint64 a, kmp_uint64 b) {
2700 return ((kmp_int64)b - (kmp_int64)a) > 0;
2704 void __kmp_spin_backoff(kmp_backoff_t *boff) {
2707 for (i = boff->step; i > 0; i--) {
2708 kmp_uint64 goal = __kmp_tsc() + boff->min_tick;
2711 }
while (before(__kmp_tsc(), goal));
2713 boff->step = (boff->step << 1 | 1) & (boff->max_backoff - 1);
2716 #if KMP_USE_DYNAMIC_LOCK 2720 static void __kmp_init_direct_lock(kmp_dyna_lock_t *lck,
2721 kmp_dyna_lockseq_t seq) {
2722 TCW_4(*lck, KMP_GET_D_TAG(seq));
2725 (
"__kmp_init_direct_lock: initialized direct lock with type#%d\n", seq));
2731 #define HLE_ACQUIRE ".byte 0xf2;" 2732 #define HLE_RELEASE ".byte 0xf3;" 2734 static inline kmp_uint32 swap4(kmp_uint32
volatile *p, kmp_uint32 v) {
2735 __asm__
volatile(HLE_ACQUIRE
"xchg %1,%0" :
"+r"(v),
"+m"(*p) : :
"memory");
2739 static void __kmp_destroy_hle_lock(kmp_dyna_lock_t *lck) { TCW_4(*lck, 0); }
2741 static void __kmp_destroy_hle_lock_with_checks(kmp_dyna_lock_t *lck) {
2745 static void __kmp_acquire_hle_lock(kmp_dyna_lock_t *lck, kmp_int32 gtid) {
2747 if (swap4(lck, KMP_LOCK_BUSY(1, hle)) != KMP_LOCK_FREE(hle)) {
2750 while (*(kmp_uint32
volatile *)lck != KMP_LOCK_FREE(hle)) {
2751 for (
int i = delay; i != 0; --i)
2753 delay = ((delay << 1) | 1) & 7;
2755 }
while (swap4(lck, KMP_LOCK_BUSY(1, hle)) != KMP_LOCK_FREE(hle));
2759 static void __kmp_acquire_hle_lock_with_checks(kmp_dyna_lock_t *lck,
2761 __kmp_acquire_hle_lock(lck, gtid);
2764 static int __kmp_release_hle_lock(kmp_dyna_lock_t *lck, kmp_int32 gtid) {
2765 __asm__
volatile(HLE_RELEASE
"movl %1,%0" 2767 :
"r"(KMP_LOCK_FREE(hle))
2769 return KMP_LOCK_RELEASED;
2772 static int __kmp_release_hle_lock_with_checks(kmp_dyna_lock_t *lck,
2774 return __kmp_release_hle_lock(lck, gtid);
2777 static int __kmp_test_hle_lock(kmp_dyna_lock_t *lck, kmp_int32 gtid) {
2778 return swap4(lck, KMP_LOCK_BUSY(1, hle)) == KMP_LOCK_FREE(hle);
2781 static int __kmp_test_hle_lock_with_checks(kmp_dyna_lock_t *lck,
2783 return __kmp_test_hle_lock(lck, gtid);
2786 static void __kmp_init_rtm_lock(kmp_queuing_lock_t *lck) {
2787 __kmp_init_queuing_lock(lck);
2790 static void __kmp_destroy_rtm_lock(kmp_queuing_lock_t *lck) {
2791 __kmp_destroy_queuing_lock(lck);
2794 static void __kmp_destroy_rtm_lock_with_checks(kmp_queuing_lock_t *lck) {
2795 __kmp_destroy_queuing_lock_with_checks(lck);
2798 static void __kmp_acquire_rtm_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
2799 unsigned retries = 3, status;
2802 if (status == _XBEGIN_STARTED) {
2803 if (__kmp_is_unlocked_queuing_lock(lck))
2807 if ((status & _XABORT_EXPLICIT) && _XABORT_CODE(status) == 0xff) {
2809 while (!__kmp_is_unlocked_queuing_lock(lck))
2811 }
else if (!(status & _XABORT_RETRY))
2813 }
while (retries--);
2816 __kmp_acquire_queuing_lock(lck, gtid);
2819 static void __kmp_acquire_rtm_lock_with_checks(kmp_queuing_lock_t *lck,
2821 __kmp_acquire_rtm_lock(lck, gtid);
2824 static int __kmp_release_rtm_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
2825 if (__kmp_is_unlocked_queuing_lock(lck)) {
2830 __kmp_release_queuing_lock(lck, gtid);
2832 return KMP_LOCK_RELEASED;
2835 static int __kmp_release_rtm_lock_with_checks(kmp_queuing_lock_t *lck,
2837 return __kmp_release_rtm_lock(lck, gtid);
2840 static int __kmp_test_rtm_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
2841 unsigned retries = 3, status;
2844 if (status == _XBEGIN_STARTED && __kmp_is_unlocked_queuing_lock(lck)) {
2847 if (!(status & _XABORT_RETRY))
2849 }
while (retries--);
2851 return (__kmp_is_unlocked_queuing_lock(lck)) ? 1 : 0;
2854 static int __kmp_test_rtm_lock_with_checks(kmp_queuing_lock_t *lck,
2856 return __kmp_test_rtm_lock(lck, gtid);
2859 #endif // KMP_USE_TSX 2862 static void __kmp_init_indirect_lock(kmp_dyna_lock_t *l,
2863 kmp_dyna_lockseq_t tag);
2864 static void __kmp_destroy_indirect_lock(kmp_dyna_lock_t *lock);
2865 static int __kmp_set_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32);
2866 static int __kmp_unset_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32);
2867 static int __kmp_test_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32);
2868 static int __kmp_set_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
2870 static int __kmp_unset_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
2872 static int __kmp_test_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
2879 #define expand(l, op) 0, __kmp_init_direct_lock, 2880 void (*__kmp_direct_init[])(kmp_dyna_lock_t *, kmp_dyna_lockseq_t) = {
2881 __kmp_init_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, init)};
2885 #define expand(l, op) 0, (void (*)(kmp_dyna_lock_t *))__kmp_##op##_##l##_lock, 2886 static void (*direct_destroy[])(kmp_dyna_lock_t *) = {
2887 __kmp_destroy_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, destroy)};
2889 #define expand(l, op) \ 2890 0, (void (*)(kmp_dyna_lock_t *))__kmp_destroy_##l##_lock_with_checks, 2891 static void (*direct_destroy_check[])(kmp_dyna_lock_t *) = {
2892 __kmp_destroy_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, destroy)};
2896 #define expand(l, op) \ 2897 0, (int (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock, 2898 static int (*direct_set[])(kmp_dyna_lock_t *, kmp_int32) = {
2899 __kmp_set_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, acquire)};
2901 #define expand(l, op) \ 2902 0, (int (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock_with_checks, 2903 static int (*direct_set_check[])(kmp_dyna_lock_t *, kmp_int32) = {
2904 __kmp_set_indirect_lock_with_checks, 0,
2905 KMP_FOREACH_D_LOCK(expand, acquire)};
2909 #define expand(l, op) \ 2910 0, (int (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock, 2911 static int (*direct_unset[])(kmp_dyna_lock_t *, kmp_int32) = {
2912 __kmp_unset_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, release)};
2913 static int (*direct_test[])(kmp_dyna_lock_t *, kmp_int32) = {
2914 __kmp_test_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, test)};
2916 #define expand(l, op) \ 2917 0, (int (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock_with_checks, 2918 static int (*direct_unset_check[])(kmp_dyna_lock_t *, kmp_int32) = {
2919 __kmp_unset_indirect_lock_with_checks, 0,
2920 KMP_FOREACH_D_LOCK(expand, release)};
2921 static int (*direct_test_check[])(kmp_dyna_lock_t *, kmp_int32) = {
2922 __kmp_test_indirect_lock_with_checks, 0, KMP_FOREACH_D_LOCK(expand, test)};
2926 void (*(*__kmp_direct_destroy))(kmp_dyna_lock_t *) = 0;
2927 int (*(*__kmp_direct_set))(kmp_dyna_lock_t *, kmp_int32) = 0;
2928 int (*(*__kmp_direct_unset))(kmp_dyna_lock_t *, kmp_int32) = 0;
2929 int (*(*__kmp_direct_test))(kmp_dyna_lock_t *, kmp_int32) = 0;
2932 #define expand(l, op) (void (*)(kmp_user_lock_p)) __kmp_##op##_##l##_##lock, 2933 void (*__kmp_indirect_init[])(kmp_user_lock_p) = {
2934 KMP_FOREACH_I_LOCK(expand, init)};
2937 #define expand(l, op) (void (*)(kmp_user_lock_p)) __kmp_##op##_##l##_##lock, 2938 static void (*indirect_destroy[])(kmp_user_lock_p) = {
2939 KMP_FOREACH_I_LOCK(expand, destroy)};
2941 #define expand(l, op) \ 2942 (void (*)(kmp_user_lock_p)) __kmp_##op##_##l##_##lock_with_checks, 2943 static void (*indirect_destroy_check[])(kmp_user_lock_p) = {
2944 KMP_FOREACH_I_LOCK(expand, destroy)};
2948 #define expand(l, op) \ 2949 (int (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock, 2950 static int (*indirect_set[])(kmp_user_lock_p,
2951 kmp_int32) = {KMP_FOREACH_I_LOCK(expand, acquire)};
2953 #define expand(l, op) \ 2954 (int (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock_with_checks, 2955 static int (*indirect_set_check[])(kmp_user_lock_p, kmp_int32) = {
2956 KMP_FOREACH_I_LOCK(expand, acquire)};
2960 #define expand(l, op) \ 2961 (int (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock, 2962 static int (*indirect_unset[])(kmp_user_lock_p, kmp_int32) = {
2963 KMP_FOREACH_I_LOCK(expand, release)};
2964 static int (*indirect_test[])(kmp_user_lock_p,
2965 kmp_int32) = {KMP_FOREACH_I_LOCK(expand, test)};
2967 #define expand(l, op) \ 2968 (int (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock_with_checks, 2969 static int (*indirect_unset_check[])(kmp_user_lock_p, kmp_int32) = {
2970 KMP_FOREACH_I_LOCK(expand, release)};
2971 static int (*indirect_test_check[])(kmp_user_lock_p, kmp_int32) = {
2972 KMP_FOREACH_I_LOCK(expand, test)};
2976 void (*(*__kmp_indirect_destroy))(kmp_user_lock_p) = 0;
2977 int (*(*__kmp_indirect_set))(kmp_user_lock_p, kmp_int32) = 0;
2978 int (*(*__kmp_indirect_unset))(kmp_user_lock_p, kmp_int32) = 0;
2979 int (*(*__kmp_indirect_test))(kmp_user_lock_p, kmp_int32) = 0;
2982 kmp_indirect_lock_table_t __kmp_i_lock_table;
2985 static kmp_uint32 __kmp_indirect_lock_size[KMP_NUM_I_LOCKS] = {0};
2988 void (*__kmp_indirect_set_location[KMP_NUM_I_LOCKS])(kmp_user_lock_p,
2990 void (*__kmp_indirect_set_flags[KMP_NUM_I_LOCKS])(kmp_user_lock_p,
2991 kmp_lock_flags_t) = {0};
2992 const ident_t *(*__kmp_indirect_get_location[KMP_NUM_I_LOCKS])(
2993 kmp_user_lock_p) = {0};
2994 kmp_lock_flags_t (*__kmp_indirect_get_flags[KMP_NUM_I_LOCKS])(
2995 kmp_user_lock_p) = {0};
2998 static kmp_indirect_lock_t *__kmp_indirect_lock_pool[KMP_NUM_I_LOCKS] = {0};
3005 kmp_indirect_lock_t *__kmp_allocate_indirect_lock(
void **user_lock,
3007 kmp_indirect_locktag_t tag) {
3008 kmp_indirect_lock_t *lck;
3009 kmp_lock_index_t idx;
3011 __kmp_acquire_lock(&__kmp_global_lock, gtid);
3013 if (__kmp_indirect_lock_pool[tag] != NULL) {
3015 lck = __kmp_indirect_lock_pool[tag];
3016 if (OMP_LOCK_T_SIZE <
sizeof(
void *))
3017 idx = lck->lock->pool.index;
3018 __kmp_indirect_lock_pool[tag] = (kmp_indirect_lock_t *)lck->lock->pool.next;
3019 KA_TRACE(20, (
"__kmp_allocate_indirect_lock: reusing an existing lock %p\n",
3022 idx = __kmp_i_lock_table.next;
3024 if (idx == __kmp_i_lock_table.size) {
3026 int row = __kmp_i_lock_table.size / KMP_I_LOCK_CHUNK;
3027 kmp_indirect_lock_t **new_table = (kmp_indirect_lock_t **)__kmp_allocate(
3028 2 * row *
sizeof(kmp_indirect_lock_t *));
3029 KMP_MEMCPY(new_table, __kmp_i_lock_table.table,
3030 row *
sizeof(kmp_indirect_lock_t *));
3031 kmp_indirect_lock_t **old_table = __kmp_i_lock_table.table;
3032 __kmp_i_lock_table.table = new_table;
3033 __kmp_free(old_table);
3035 for (
int i = row; i < 2 * row; ++i)
3036 *(__kmp_i_lock_table.table + i) = (kmp_indirect_lock_t *)__kmp_allocate(
3037 KMP_I_LOCK_CHUNK *
sizeof(kmp_indirect_lock_t));
3038 __kmp_i_lock_table.size = 2 * idx;
3040 __kmp_i_lock_table.next++;
3041 lck = KMP_GET_I_LOCK(idx);
3043 lck->lock = (kmp_user_lock_p)__kmp_allocate(__kmp_indirect_lock_size[tag]);
3045 (
"__kmp_allocate_indirect_lock: allocated a new lock %p\n", lck));
3048 __kmp_release_lock(&__kmp_global_lock, gtid);
3052 if (OMP_LOCK_T_SIZE <
sizeof(
void *)) {
3053 *((kmp_lock_index_t *)user_lock) = idx
3056 *((kmp_indirect_lock_t **)user_lock) = lck;
3063 static __forceinline kmp_indirect_lock_t *
3064 __kmp_lookup_indirect_lock(
void **user_lock,
const char *func) {
3065 if (__kmp_env_consistency_check) {
3066 kmp_indirect_lock_t *lck = NULL;
3067 if (user_lock == NULL) {
3068 KMP_FATAL(LockIsUninitialized, func);
3070 if (OMP_LOCK_T_SIZE <
sizeof(
void *)) {
3071 kmp_lock_index_t idx = KMP_EXTRACT_I_INDEX(user_lock);
3072 if (idx >= __kmp_i_lock_table.size) {
3073 KMP_FATAL(LockIsUninitialized, func);
3075 lck = KMP_GET_I_LOCK(idx);
3077 lck = *((kmp_indirect_lock_t **)user_lock);
3080 KMP_FATAL(LockIsUninitialized, func);
3084 if (OMP_LOCK_T_SIZE <
sizeof(
void *)) {
3085 return KMP_GET_I_LOCK(KMP_EXTRACT_I_INDEX(user_lock));
3087 return *((kmp_indirect_lock_t **)user_lock);
3092 static void __kmp_init_indirect_lock(kmp_dyna_lock_t *lock,
3093 kmp_dyna_lockseq_t seq) {
3094 #if KMP_USE_ADAPTIVE_LOCKS 3095 if (seq == lockseq_adaptive && !__kmp_cpuinfo.rtm) {
3096 KMP_WARNING(AdaptiveNotSupported,
"kmp_lockseq_t",
"adaptive");
3097 seq = lockseq_queuing;
3101 if (seq == lockseq_rtm && !__kmp_cpuinfo.rtm) {
3102 seq = lockseq_queuing;
3105 kmp_indirect_locktag_t tag = KMP_GET_I_TAG(seq);
3106 kmp_indirect_lock_t *l =
3107 __kmp_allocate_indirect_lock((
void **)lock, __kmp_entry_gtid(), tag);
3108 KMP_I_LOCK_FUNC(l, init)(l->lock);
3110 20, (
"__kmp_init_indirect_lock: initialized indirect lock with type#%d\n",
3114 static void __kmp_destroy_indirect_lock(kmp_dyna_lock_t *lock) {
3115 kmp_uint32 gtid = __kmp_entry_gtid();
3116 kmp_indirect_lock_t *l =
3117 __kmp_lookup_indirect_lock((
void **)lock,
"omp_destroy_lock");
3118 KMP_I_LOCK_FUNC(l, destroy)(l->lock);
3119 kmp_indirect_locktag_t tag = l->type;
3121 __kmp_acquire_lock(&__kmp_global_lock, gtid);
3124 l->lock->pool.next = (kmp_user_lock_p)__kmp_indirect_lock_pool[tag];
3125 if (OMP_LOCK_T_SIZE <
sizeof(
void *)) {
3126 l->lock->pool.index = KMP_EXTRACT_I_INDEX(lock);
3128 __kmp_indirect_lock_pool[tag] = l;
3130 __kmp_release_lock(&__kmp_global_lock, gtid);
3133 static int __kmp_set_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32 gtid) {
3134 kmp_indirect_lock_t *l = KMP_LOOKUP_I_LOCK(lock);
3135 return KMP_I_LOCK_FUNC(l,
set)(l->lock, gtid);
3138 static int __kmp_unset_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32 gtid) {
3139 kmp_indirect_lock_t *l = KMP_LOOKUP_I_LOCK(lock);
3140 return KMP_I_LOCK_FUNC(l, unset)(l->lock, gtid);
3143 static int __kmp_test_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32 gtid) {
3144 kmp_indirect_lock_t *l = KMP_LOOKUP_I_LOCK(lock);
3145 return KMP_I_LOCK_FUNC(l, test)(l->lock, gtid);
3148 static int __kmp_set_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
3150 kmp_indirect_lock_t *l =
3151 __kmp_lookup_indirect_lock((
void **)lock,
"omp_set_lock");
3152 return KMP_I_LOCK_FUNC(l,
set)(l->lock, gtid);
3155 static int __kmp_unset_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
3157 kmp_indirect_lock_t *l =
3158 __kmp_lookup_indirect_lock((
void **)lock,
"omp_unset_lock");
3159 return KMP_I_LOCK_FUNC(l, unset)(l->lock, gtid);
3162 static int __kmp_test_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
3164 kmp_indirect_lock_t *l =
3165 __kmp_lookup_indirect_lock((
void **)lock,
"omp_test_lock");
3166 return KMP_I_LOCK_FUNC(l, test)(l->lock, gtid);
3169 kmp_dyna_lockseq_t __kmp_user_lock_seq = lockseq_queuing;
3172 kmp_int32 __kmp_get_user_lock_owner(kmp_user_lock_p lck, kmp_uint32 seq) {
3175 case lockseq_nested_tas:
3176 return __kmp_get_tas_lock_owner((kmp_tas_lock_t *)lck);
3179 case lockseq_nested_futex:
3180 return __kmp_get_futex_lock_owner((kmp_futex_lock_t *)lck);
3182 case lockseq_ticket:
3183 case lockseq_nested_ticket:
3184 return __kmp_get_ticket_lock_owner((kmp_ticket_lock_t *)lck);
3185 case lockseq_queuing:
3186 case lockseq_nested_queuing:
3187 #if KMP_USE_ADAPTIVE_LOCKS 3188 case lockseq_adaptive:
3190 return __kmp_get_queuing_lock_owner((kmp_queuing_lock_t *)lck);
3192 case lockseq_nested_drdpa:
3193 return __kmp_get_drdpa_lock_owner((kmp_drdpa_lock_t *)lck);
3200 void __kmp_init_dynamic_user_locks() {
3202 if (__kmp_env_consistency_check) {
3203 __kmp_direct_set = direct_set_check;
3204 __kmp_direct_unset = direct_unset_check;
3205 __kmp_direct_test = direct_test_check;
3206 __kmp_direct_destroy = direct_destroy_check;
3207 __kmp_indirect_set = indirect_set_check;
3208 __kmp_indirect_unset = indirect_unset_check;
3209 __kmp_indirect_test = indirect_test_check;
3210 __kmp_indirect_destroy = indirect_destroy_check;
3212 __kmp_direct_set = direct_set;
3213 __kmp_direct_unset = direct_unset;
3214 __kmp_direct_test = direct_test;
3215 __kmp_direct_destroy = direct_destroy;
3216 __kmp_indirect_set = indirect_set;
3217 __kmp_indirect_unset = indirect_unset;
3218 __kmp_indirect_test = indirect_test;
3219 __kmp_indirect_destroy = indirect_destroy;
3224 if (__kmp_init_user_locks)
3228 __kmp_i_lock_table.size = KMP_I_LOCK_CHUNK;
3229 __kmp_i_lock_table.table =
3230 (kmp_indirect_lock_t **)__kmp_allocate(
sizeof(kmp_indirect_lock_t *));
3231 *(__kmp_i_lock_table.table) = (kmp_indirect_lock_t *)__kmp_allocate(
3232 KMP_I_LOCK_CHUNK *
sizeof(kmp_indirect_lock_t));
3233 __kmp_i_lock_table.next = 0;
3236 __kmp_indirect_lock_size[locktag_ticket] =
sizeof(kmp_ticket_lock_t);
3237 __kmp_indirect_lock_size[locktag_queuing] =
sizeof(kmp_queuing_lock_t);
3238 #if KMP_USE_ADAPTIVE_LOCKS 3239 __kmp_indirect_lock_size[locktag_adaptive] =
sizeof(kmp_adaptive_lock_t);
3241 __kmp_indirect_lock_size[locktag_drdpa] =
sizeof(kmp_drdpa_lock_t);
3243 __kmp_indirect_lock_size[locktag_rtm] =
sizeof(kmp_queuing_lock_t);
3245 __kmp_indirect_lock_size[locktag_nested_tas] =
sizeof(kmp_tas_lock_t);
3247 __kmp_indirect_lock_size[locktag_nested_futex] =
sizeof(kmp_futex_lock_t);
3249 __kmp_indirect_lock_size[locktag_nested_ticket] =
sizeof(kmp_ticket_lock_t);
3250 __kmp_indirect_lock_size[locktag_nested_queuing] =
sizeof(kmp_queuing_lock_t);
3251 __kmp_indirect_lock_size[locktag_nested_drdpa] =
sizeof(kmp_drdpa_lock_t);
3254 #define fill_jumps(table, expand, sep) \ 3256 table[locktag##sep##ticket] = expand(ticket); \ 3257 table[locktag##sep##queuing] = expand(queuing); \ 3258 table[locktag##sep##drdpa] = expand(drdpa); \ 3261 #if KMP_USE_ADAPTIVE_LOCKS 3262 #define fill_table(table, expand) \ 3264 fill_jumps(table, expand, _); \ 3265 table[locktag_adaptive] = expand(queuing); \ 3266 fill_jumps(table, expand, _nested_); \ 3269 #define fill_table(table, expand) \ 3271 fill_jumps(table, expand, _); \ 3272 fill_jumps(table, expand, _nested_); \ 3274 #endif // KMP_USE_ADAPTIVE_LOCKS 3277 (void (*)(kmp_user_lock_p, const ident_t *)) __kmp_set_##l##_lock_location 3278 fill_table(__kmp_indirect_set_location, expand);
3281 (void (*)(kmp_user_lock_p, kmp_lock_flags_t)) __kmp_set_##l##_lock_flags 3282 fill_table(__kmp_indirect_set_flags, expand);
3285 (const ident_t *(*)(kmp_user_lock_p)) __kmp_get_##l##_lock_location 3286 fill_table(__kmp_indirect_get_location, expand);
3289 (kmp_lock_flags_t(*)(kmp_user_lock_p)) __kmp_get_##l##_lock_flags 3290 fill_table(__kmp_indirect_get_flags, expand);
3293 __kmp_init_user_locks = TRUE;
3297 void __kmp_cleanup_indirect_user_locks() {
3303 for (k = 0; k < KMP_NUM_I_LOCKS; ++k) {
3304 kmp_indirect_lock_t *l = __kmp_indirect_lock_pool[k];
3306 kmp_indirect_lock_t *ll = l;
3307 l = (kmp_indirect_lock_t *)l->lock->pool.next;
3308 KA_TRACE(20, (
"__kmp_cleanup_indirect_user_locks: freeing %p from pool\n",
3310 __kmp_free(ll->lock);
3313 __kmp_indirect_lock_pool[k] = NULL;
3316 for (i = 0; i < __kmp_i_lock_table.next; i++) {
3317 kmp_indirect_lock_t *l = KMP_GET_I_LOCK(i);
3318 if (l->lock != NULL) {
3320 KMP_I_LOCK_FUNC(l, destroy)(l->lock);
3323 (
"__kmp_cleanup_indirect_user_locks: destroy/freeing %p from table\n",
3325 __kmp_free(l->lock);
3329 for (i = 0; i < __kmp_i_lock_table.size / KMP_I_LOCK_CHUNK; i++)
3330 __kmp_free(__kmp_i_lock_table.table[i]);
3331 __kmp_free(__kmp_i_lock_table.table);
3333 __kmp_init_user_locks = FALSE;
3336 enum kmp_lock_kind __kmp_user_lock_kind = lk_default;
3337 int __kmp_num_locks_in_block = 1;
3339 #else // KMP_USE_DYNAMIC_LOCK 3341 static void __kmp_init_tas_lock_with_checks(kmp_tas_lock_t *lck) {
3342 __kmp_init_tas_lock(lck);
3345 static void __kmp_init_nested_tas_lock_with_checks(kmp_tas_lock_t *lck) {
3346 __kmp_init_nested_tas_lock(lck);
3350 static void __kmp_init_futex_lock_with_checks(kmp_futex_lock_t *lck) {
3351 __kmp_init_futex_lock(lck);
3354 static void __kmp_init_nested_futex_lock_with_checks(kmp_futex_lock_t *lck) {
3355 __kmp_init_nested_futex_lock(lck);
3359 static int __kmp_is_ticket_lock_initialized(kmp_ticket_lock_t *lck) {
3360 return lck == lck->lk.initialized;
3363 static void __kmp_init_ticket_lock_with_checks(kmp_ticket_lock_t *lck) {
3364 __kmp_init_ticket_lock(lck);
3367 static void __kmp_init_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck) {
3368 __kmp_init_nested_ticket_lock(lck);
3371 static int __kmp_is_queuing_lock_initialized(kmp_queuing_lock_t *lck) {
3372 return lck == lck->lk.initialized;
3375 static void __kmp_init_queuing_lock_with_checks(kmp_queuing_lock_t *lck) {
3376 __kmp_init_queuing_lock(lck);
3380 __kmp_init_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck) {
3381 __kmp_init_nested_queuing_lock(lck);
3384 #if KMP_USE_ADAPTIVE_LOCKS 3385 static void __kmp_init_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck) {
3386 __kmp_init_adaptive_lock(lck);
3390 static int __kmp_is_drdpa_lock_initialized(kmp_drdpa_lock_t *lck) {
3391 return lck == lck->lk.initialized;
3394 static void __kmp_init_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) {
3395 __kmp_init_drdpa_lock(lck);
3398 static void __kmp_init_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) {
3399 __kmp_init_nested_drdpa_lock(lck);
3406 enum kmp_lock_kind __kmp_user_lock_kind = lk_default;
3408 size_t __kmp_base_user_lock_size = 0;
3409 size_t __kmp_user_lock_size = 0;
3411 kmp_int32 (*__kmp_get_user_lock_owner_)(kmp_user_lock_p lck) = NULL;
3412 int (*__kmp_acquire_user_lock_with_checks_)(kmp_user_lock_p lck,
3413 kmp_int32 gtid) = NULL;
3415 int (*__kmp_test_user_lock_with_checks_)(kmp_user_lock_p lck,
3416 kmp_int32 gtid) = NULL;
3417 int (*__kmp_release_user_lock_with_checks_)(kmp_user_lock_p lck,
3418 kmp_int32 gtid) = NULL;
3419 void (*__kmp_init_user_lock_with_checks_)(kmp_user_lock_p lck) = NULL;
3420 void (*__kmp_destroy_user_lock_)(kmp_user_lock_p lck) = NULL;
3421 void (*__kmp_destroy_user_lock_with_checks_)(kmp_user_lock_p lck) = NULL;
3422 int (*__kmp_acquire_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
3423 kmp_int32 gtid) = NULL;
3425 int (*__kmp_test_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
3426 kmp_int32 gtid) = NULL;
3427 int (*__kmp_release_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
3428 kmp_int32 gtid) = NULL;
3429 void (*__kmp_init_nested_user_lock_with_checks_)(kmp_user_lock_p lck) = NULL;
3430 void (*__kmp_destroy_nested_user_lock_with_checks_)(kmp_user_lock_p lck) = NULL;
3432 int (*__kmp_is_user_lock_initialized_)(kmp_user_lock_p lck) = NULL;
3433 const ident_t *(*__kmp_get_user_lock_location_)(kmp_user_lock_p lck) = NULL;
3434 void (*__kmp_set_user_lock_location_)(kmp_user_lock_p lck,
3436 kmp_lock_flags_t (*__kmp_get_user_lock_flags_)(kmp_user_lock_p lck) = NULL;
3437 void (*__kmp_set_user_lock_flags_)(kmp_user_lock_p lck,
3438 kmp_lock_flags_t flags) = NULL;
3440 void __kmp_set_user_lock_vptrs(kmp_lock_kind_t user_lock_kind) {
3441 switch (user_lock_kind) {
3447 __kmp_base_user_lock_size =
sizeof(kmp_base_tas_lock_t);
3448 __kmp_user_lock_size =
sizeof(kmp_tas_lock_t);
3450 __kmp_get_user_lock_owner_ =
3451 (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_tas_lock_owner);
3453 if (__kmp_env_consistency_check) {
3454 KMP_BIND_USER_LOCK_WITH_CHECKS(tas);
3455 KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(tas);
3457 KMP_BIND_USER_LOCK(tas);
3458 KMP_BIND_NESTED_USER_LOCK(tas);
3461 __kmp_destroy_user_lock_ =
3462 (void (*)(kmp_user_lock_p))(&__kmp_destroy_tas_lock);
3464 __kmp_is_user_lock_initialized_ = (int (*)(kmp_user_lock_p))NULL;
3466 __kmp_get_user_lock_location_ = (
const ident_t *(*)(kmp_user_lock_p))NULL;
3468 __kmp_set_user_lock_location_ =
3469 (void (*)(kmp_user_lock_p,
const ident_t *))NULL;
3471 __kmp_get_user_lock_flags_ = (kmp_lock_flags_t(*)(kmp_user_lock_p))NULL;
3473 __kmp_set_user_lock_flags_ =
3474 (void (*)(kmp_user_lock_p, kmp_lock_flags_t))NULL;
3480 __kmp_base_user_lock_size =
sizeof(kmp_base_futex_lock_t);
3481 __kmp_user_lock_size =
sizeof(kmp_futex_lock_t);
3483 __kmp_get_user_lock_owner_ =
3484 (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_futex_lock_owner);
3486 if (__kmp_env_consistency_check) {
3487 KMP_BIND_USER_LOCK_WITH_CHECKS(futex);
3488 KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(futex);
3490 KMP_BIND_USER_LOCK(futex);
3491 KMP_BIND_NESTED_USER_LOCK(futex);
3494 __kmp_destroy_user_lock_ =
3495 (void (*)(kmp_user_lock_p))(&__kmp_destroy_futex_lock);
3497 __kmp_is_user_lock_initialized_ = (int (*)(kmp_user_lock_p))NULL;
3499 __kmp_get_user_lock_location_ = (
const ident_t *(*)(kmp_user_lock_p))NULL;
3501 __kmp_set_user_lock_location_ =
3502 (void (*)(kmp_user_lock_p,
const ident_t *))NULL;
3504 __kmp_get_user_lock_flags_ = (kmp_lock_flags_t(*)(kmp_user_lock_p))NULL;
3506 __kmp_set_user_lock_flags_ =
3507 (void (*)(kmp_user_lock_p, kmp_lock_flags_t))NULL;
3510 #endif // KMP_USE_FUTEX 3513 __kmp_base_user_lock_size =
sizeof(kmp_base_ticket_lock_t);
3514 __kmp_user_lock_size =
sizeof(kmp_ticket_lock_t);
3516 __kmp_get_user_lock_owner_ =
3517 (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_ticket_lock_owner);
3519 if (__kmp_env_consistency_check) {
3520 KMP_BIND_USER_LOCK_WITH_CHECKS(ticket);
3521 KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(ticket);
3523 KMP_BIND_USER_LOCK(ticket);
3524 KMP_BIND_NESTED_USER_LOCK(ticket);
3527 __kmp_destroy_user_lock_ =
3528 (void (*)(kmp_user_lock_p))(&__kmp_destroy_ticket_lock);
3530 __kmp_is_user_lock_initialized_ =
3531 (int (*)(kmp_user_lock_p))(&__kmp_is_ticket_lock_initialized);
3533 __kmp_get_user_lock_location_ =
3534 (
const ident_t *(*)(kmp_user_lock_p))(&__kmp_get_ticket_lock_location);
3536 __kmp_set_user_lock_location_ = (void (*)(
3537 kmp_user_lock_p,
const ident_t *))(&__kmp_set_ticket_lock_location);
3539 __kmp_get_user_lock_flags_ =
3540 (kmp_lock_flags_t(*)(kmp_user_lock_p))(&__kmp_get_ticket_lock_flags);
3542 __kmp_set_user_lock_flags_ = (void (*)(kmp_user_lock_p, kmp_lock_flags_t))(
3543 &__kmp_set_ticket_lock_flags);
3547 __kmp_base_user_lock_size =
sizeof(kmp_base_queuing_lock_t);
3548 __kmp_user_lock_size =
sizeof(kmp_queuing_lock_t);
3550 __kmp_get_user_lock_owner_ =
3551 (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_owner);
3553 if (__kmp_env_consistency_check) {
3554 KMP_BIND_USER_LOCK_WITH_CHECKS(queuing);
3555 KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(queuing);
3557 KMP_BIND_USER_LOCK(queuing);
3558 KMP_BIND_NESTED_USER_LOCK(queuing);
3561 __kmp_destroy_user_lock_ =
3562 (void (*)(kmp_user_lock_p))(&__kmp_destroy_queuing_lock);
3564 __kmp_is_user_lock_initialized_ =
3565 (int (*)(kmp_user_lock_p))(&__kmp_is_queuing_lock_initialized);
3567 __kmp_get_user_lock_location_ =
3568 (
const ident_t *(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_location);
3570 __kmp_set_user_lock_location_ = (void (*)(
3571 kmp_user_lock_p,
const ident_t *))(&__kmp_set_queuing_lock_location);
3573 __kmp_get_user_lock_flags_ =
3574 (kmp_lock_flags_t(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_flags);
3576 __kmp_set_user_lock_flags_ = (void (*)(kmp_user_lock_p, kmp_lock_flags_t))(
3577 &__kmp_set_queuing_lock_flags);
3580 #if KMP_USE_ADAPTIVE_LOCKS 3582 __kmp_base_user_lock_size =
sizeof(kmp_base_adaptive_lock_t);
3583 __kmp_user_lock_size =
sizeof(kmp_adaptive_lock_t);
3585 __kmp_get_user_lock_owner_ =
3586 (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_owner);
3588 if (__kmp_env_consistency_check) {
3589 KMP_BIND_USER_LOCK_WITH_CHECKS(adaptive);
3591 KMP_BIND_USER_LOCK(adaptive);
3594 __kmp_destroy_user_lock_ =
3595 (void (*)(kmp_user_lock_p))(&__kmp_destroy_adaptive_lock);
3597 __kmp_is_user_lock_initialized_ =
3598 (int (*)(kmp_user_lock_p))(&__kmp_is_queuing_lock_initialized);
3600 __kmp_get_user_lock_location_ =
3601 (
const ident_t *(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_location);
3603 __kmp_set_user_lock_location_ = (void (*)(
3604 kmp_user_lock_p,
const ident_t *))(&__kmp_set_queuing_lock_location);
3606 __kmp_get_user_lock_flags_ =
3607 (kmp_lock_flags_t(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_flags);
3609 __kmp_set_user_lock_flags_ = (void (*)(kmp_user_lock_p, kmp_lock_flags_t))(
3610 &__kmp_set_queuing_lock_flags);
3613 #endif // KMP_USE_ADAPTIVE_LOCKS 3616 __kmp_base_user_lock_size =
sizeof(kmp_base_drdpa_lock_t);
3617 __kmp_user_lock_size =
sizeof(kmp_drdpa_lock_t);
3619 __kmp_get_user_lock_owner_ =
3620 (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_drdpa_lock_owner);
3622 if (__kmp_env_consistency_check) {
3623 KMP_BIND_USER_LOCK_WITH_CHECKS(drdpa);
3624 KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(drdpa);
3626 KMP_BIND_USER_LOCK(drdpa);
3627 KMP_BIND_NESTED_USER_LOCK(drdpa);
3630 __kmp_destroy_user_lock_ =
3631 (void (*)(kmp_user_lock_p))(&__kmp_destroy_drdpa_lock);
3633 __kmp_is_user_lock_initialized_ =
3634 (int (*)(kmp_user_lock_p))(&__kmp_is_drdpa_lock_initialized);
3636 __kmp_get_user_lock_location_ =
3637 (
const ident_t *(*)(kmp_user_lock_p))(&__kmp_get_drdpa_lock_location);
3639 __kmp_set_user_lock_location_ = (void (*)(
3640 kmp_user_lock_p,
const ident_t *))(&__kmp_set_drdpa_lock_location);
3642 __kmp_get_user_lock_flags_ =
3643 (kmp_lock_flags_t(*)(kmp_user_lock_p))(&__kmp_get_drdpa_lock_flags);
3645 __kmp_set_user_lock_flags_ = (void (*)(kmp_user_lock_p, kmp_lock_flags_t))(
3646 &__kmp_set_drdpa_lock_flags);
3654 kmp_lock_table_t __kmp_user_lock_table = {1, 0, NULL};
3655 kmp_user_lock_p __kmp_lock_pool = NULL;
3658 kmp_block_of_locks *__kmp_lock_blocks = NULL;
3659 int __kmp_num_locks_in_block = 1;
3661 static kmp_lock_index_t __kmp_lock_table_insert(kmp_user_lock_p lck) {
3663 kmp_lock_index_t index;
3664 if (__kmp_user_lock_table.used >= __kmp_user_lock_table.allocated) {
3665 kmp_lock_index_t size;
3666 kmp_user_lock_p *table;
3668 if (__kmp_user_lock_table.allocated == 0) {
3671 size = __kmp_user_lock_table.allocated * 2;
3673 table = (kmp_user_lock_p *)__kmp_allocate(
sizeof(kmp_user_lock_p) * size);
3674 KMP_MEMCPY(table + 1, __kmp_user_lock_table.table + 1,
3675 sizeof(kmp_user_lock_p) * (__kmp_user_lock_table.used - 1));
3676 table[0] = (kmp_user_lock_p)__kmp_user_lock_table.table;
3681 __kmp_user_lock_table.table = table;
3682 __kmp_user_lock_table.allocated = size;
3684 KMP_DEBUG_ASSERT(__kmp_user_lock_table.used <
3685 __kmp_user_lock_table.allocated);
3686 index = __kmp_user_lock_table.used;
3687 __kmp_user_lock_table.table[index] = lck;
3688 ++__kmp_user_lock_table.used;
3692 static kmp_user_lock_p __kmp_lock_block_allocate() {
3694 static int last_index = 0;
3695 if ((last_index >= __kmp_num_locks_in_block) || (__kmp_lock_blocks == NULL)) {
3699 KMP_DEBUG_ASSERT(__kmp_user_lock_size > 0);
3700 size_t space_for_locks = __kmp_user_lock_size * __kmp_num_locks_in_block;
3702 (
char *)__kmp_allocate(space_for_locks +
sizeof(kmp_block_of_locks));
3704 kmp_block_of_locks *new_block =
3705 (kmp_block_of_locks *)(&buffer[space_for_locks]);
3706 new_block->next_block = __kmp_lock_blocks;
3707 new_block->locks = (
void *)buffer;
3710 __kmp_lock_blocks = new_block;
3712 kmp_user_lock_p ret = (kmp_user_lock_p)(&(
3713 ((
char *)(__kmp_lock_blocks->locks))[last_index * __kmp_user_lock_size]));
3720 kmp_user_lock_p __kmp_user_lock_allocate(
void **user_lock, kmp_int32 gtid,
3721 kmp_lock_flags_t flags) {
3722 kmp_user_lock_p lck;
3723 kmp_lock_index_t index;
3724 KMP_DEBUG_ASSERT(user_lock);
3726 __kmp_acquire_lock(&__kmp_global_lock, gtid);
3728 if (__kmp_lock_pool == NULL) {
3733 ANNOTATE_IGNORE_WRITES_BEGIN();
3734 if (__kmp_num_locks_in_block <= 1) {
3735 lck = (kmp_user_lock_p)__kmp_allocate(__kmp_user_lock_size);
3737 lck = __kmp_lock_block_allocate();
3739 ANNOTATE_IGNORE_WRITES_END();
3743 index = __kmp_lock_table_insert(lck);
3746 lck = __kmp_lock_pool;
3747 index = __kmp_lock_pool->pool.index;
3748 __kmp_lock_pool = __kmp_lock_pool->pool.next;
3753 if (OMP_LOCK_T_SIZE <
sizeof(
void *)) {
3754 *((kmp_lock_index_t *)user_lock) = index;
3756 *((kmp_user_lock_p *)user_lock) = lck;
3760 __kmp_set_user_lock_flags(lck, flags);
3762 __kmp_release_lock(&__kmp_global_lock, gtid);
3768 void __kmp_user_lock_free(
void **user_lock, kmp_int32 gtid,
3769 kmp_user_lock_p lck) {
3770 KMP_DEBUG_ASSERT(user_lock != NULL);
3771 KMP_DEBUG_ASSERT(lck != NULL);
3773 __kmp_acquire_lock(&__kmp_global_lock, gtid);
3775 lck->pool.next = __kmp_lock_pool;
3776 __kmp_lock_pool = lck;
3777 if (OMP_LOCK_T_SIZE <
sizeof(
void *)) {
3778 kmp_lock_index_t index = *((kmp_lock_index_t *)user_lock);
3779 KMP_DEBUG_ASSERT(0 < index && index <= __kmp_user_lock_table.used);
3780 lck->pool.index = index;
3783 __kmp_release_lock(&__kmp_global_lock, gtid);
3786 kmp_user_lock_p __kmp_lookup_user_lock(
void **user_lock,
char const *func) {
3787 kmp_user_lock_p lck = NULL;
3789 if (__kmp_env_consistency_check) {
3790 if (user_lock == NULL) {
3791 KMP_FATAL(LockIsUninitialized, func);
3795 if (OMP_LOCK_T_SIZE <
sizeof(
void *)) {
3796 kmp_lock_index_t index = *((kmp_lock_index_t *)user_lock);
3797 if (__kmp_env_consistency_check) {
3798 if (!(0 < index && index < __kmp_user_lock_table.used)) {
3799 KMP_FATAL(LockIsUninitialized, func);
3802 KMP_DEBUG_ASSERT(0 < index && index < __kmp_user_lock_table.used);
3803 KMP_DEBUG_ASSERT(__kmp_user_lock_size > 0);
3804 lck = __kmp_user_lock_table.table[index];
3806 lck = *((kmp_user_lock_p *)user_lock);
3809 if (__kmp_env_consistency_check) {
3811 KMP_FATAL(LockIsUninitialized, func);
3818 void __kmp_cleanup_user_locks(
void) {
3821 __kmp_lock_pool = NULL;
3823 #define IS_CRITICAL(lck) \ 3824 ((__kmp_get_user_lock_flags_ != NULL) && \ 3825 ((*__kmp_get_user_lock_flags_)(lck)&kmp_lf_critical_section)) 3850 while (__kmp_user_lock_table.used > 1) {
3855 kmp_user_lock_p lck =
3856 __kmp_user_lock_table.table[--__kmp_user_lock_table.used];
3858 if ((__kmp_is_user_lock_initialized_ != NULL) &&
3859 (*__kmp_is_user_lock_initialized_)(lck)) {
3863 if (__kmp_env_consistency_check && (!IS_CRITICAL(lck)) &&
3864 ((loc = __kmp_get_user_lock_location(lck)) != NULL) &&
3866 kmp_str_loc_t str_loc = __kmp_str_loc_init(loc->
psource, 0);
3867 KMP_WARNING(CnsLockNotDestroyed, str_loc.file, str_loc.line);
3868 __kmp_str_loc_free(&str_loc);
3872 if (IS_CRITICAL(lck)) {
3875 (
"__kmp_cleanup_user_locks: free critical section lock %p (%p)\n",
3876 lck, *(
void **)lck));
3878 KA_TRACE(20, (
"__kmp_cleanup_user_locks: free lock %p (%p)\n", lck,
3884 __kmp_destroy_user_lock(lck);
3888 if (__kmp_lock_blocks == NULL) {
3896 kmp_user_lock_p *table_ptr = __kmp_user_lock_table.table;
3897 __kmp_user_lock_table.table = NULL;
3898 __kmp_user_lock_table.allocated = 0;
3900 while (table_ptr != NULL) {
3903 kmp_user_lock_p *next = (kmp_user_lock_p *)(table_ptr[0]);
3904 __kmp_free(table_ptr);
3909 kmp_block_of_locks_t *block_ptr = __kmp_lock_blocks;
3910 __kmp_lock_blocks = NULL;
3912 while (block_ptr != NULL) {
3913 kmp_block_of_locks_t *next = block_ptr->next_block;
3914 __kmp_free(block_ptr->locks);
3919 TCW_4(__kmp_init_user_locks, FALSE);
3922 #endif // KMP_USE_DYNAMIC_LOCK