13 #ifndef KMP_AFFINITY_H 14 #define KMP_AFFINITY_H 19 #if KMP_AFFINITY_SUPPORTED 21 class KMPHwlocAffinity :
public KMPAffinity {
23 class Mask :
public KMPAffinity::Mask {
28 mask = hwloc_bitmap_alloc();
31 ~Mask() { hwloc_bitmap_free(mask); }
32 void set(
int i)
override { hwloc_bitmap_set(mask, i); }
33 bool is_set(
int i)
const override {
return hwloc_bitmap_isset(mask, i); }
34 void clear(
int i)
override { hwloc_bitmap_clr(mask, i); }
35 void zero()
override { hwloc_bitmap_zero(mask); }
36 void copy(
const KMPAffinity::Mask *src)
override {
37 const Mask *convert =
static_cast<const Mask *
>(src);
38 hwloc_bitmap_copy(mask, convert->mask);
40 void bitwise_and(
const KMPAffinity::Mask *rhs)
override {
41 const Mask *convert =
static_cast<const Mask *
>(rhs);
42 hwloc_bitmap_and(mask, mask, convert->mask);
44 void bitwise_or(
const KMPAffinity::Mask *rhs)
override {
45 const Mask *convert =
static_cast<const Mask *
>(rhs);
46 hwloc_bitmap_or(mask, mask, convert->mask);
48 void bitwise_not()
override { hwloc_bitmap_not(mask, mask); }
49 int begin()
const override {
return hwloc_bitmap_first(mask); }
50 int end()
const override {
return -1; }
51 int next(
int previous)
const override {
52 return hwloc_bitmap_next(mask, previous);
54 int get_system_affinity(
bool abort_on_error)
override {
55 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
56 "Illegal get affinity operation when not capable");
58 hwloc_get_cpubind(__kmp_hwloc_topology, mask, HWLOC_CPUBIND_THREAD);
64 __kmp_fatal(KMP_MSG(FatalSysError), KMP_ERR(error), __kmp_msg_null);
68 int set_system_affinity(
bool abort_on_error)
const override {
69 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
70 "Illegal set affinity operation when not capable");
72 hwloc_set_cpubind(__kmp_hwloc_topology, mask, HWLOC_CPUBIND_THREAD);
78 __kmp_fatal(KMP_MSG(FatalSysError), KMP_ERR(error), __kmp_msg_null);
83 int set_process_affinity(
bool abort_on_error)
const override {
84 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
85 "Illegal set process affinity operation when not capable");
87 const hwloc_topology_support *support =
88 hwloc_topology_get_support(__kmp_hwloc_topology);
89 if (support->cpubind->set_proc_cpubind) {
91 retval = hwloc_set_cpubind(__kmp_hwloc_topology, mask,
92 HWLOC_CPUBIND_PROCESS);
97 __kmp_fatal(KMP_MSG(FatalSysError), KMP_ERR(error), __kmp_msg_null);
102 int get_proc_group()
const override {
105 if (__kmp_num_proc_groups == 1) {
108 for (
int i = 0; i < __kmp_num_proc_groups; i++) {
110 unsigned long first_32_bits = hwloc_bitmap_to_ith_ulong(mask, i * 2);
111 unsigned long second_32_bits =
112 hwloc_bitmap_to_ith_ulong(mask, i * 2 + 1);
113 if (first_32_bits == 0 && second_32_bits == 0) {
125 void determine_capable(
const char *var)
override {
126 const hwloc_topology_support *topology_support;
127 if (__kmp_hwloc_topology == NULL) {
128 if (hwloc_topology_init(&__kmp_hwloc_topology) < 0) {
129 __kmp_hwloc_error = TRUE;
130 if (__kmp_affinity_verbose)
131 KMP_WARNING(AffHwlocErrorOccurred, var,
"hwloc_topology_init()");
133 if (hwloc_topology_load(__kmp_hwloc_topology) < 0) {
134 __kmp_hwloc_error = TRUE;
135 if (__kmp_affinity_verbose)
136 KMP_WARNING(AffHwlocErrorOccurred, var,
"hwloc_topology_load()");
139 topology_support = hwloc_topology_get_support(__kmp_hwloc_topology);
144 if (topology_support && topology_support->cpubind->set_thisthread_cpubind &&
145 topology_support->cpubind->get_thisthread_cpubind &&
146 topology_support->discovery->pu && !__kmp_hwloc_error) {
148 KMP_AFFINITY_ENABLE(TRUE);
151 __kmp_hwloc_error = TRUE;
152 KMP_AFFINITY_DISABLE();
155 void bind_thread(
int which)
override {
156 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
157 "Illegal set affinity operation when not capable");
158 KMPAffinity::Mask *mask;
159 KMP_CPU_ALLOC_ON_STACK(mask);
161 KMP_CPU_SET(which, mask);
162 __kmp_set_system_affinity(mask, TRUE);
163 KMP_CPU_FREE_FROM_STACK(mask);
165 KMPAffinity::Mask *allocate_mask()
override {
return new Mask(); }
166 void deallocate_mask(KMPAffinity::Mask *m)
override {
delete m; }
167 KMPAffinity::Mask *allocate_mask_array(
int num)
override {
168 return new Mask[num];
170 void deallocate_mask_array(KMPAffinity::Mask *array)
override {
171 Mask *hwloc_array =
static_cast<Mask *
>(array);
172 delete[] hwloc_array;
174 KMPAffinity::Mask *index_mask_array(KMPAffinity::Mask *array,
175 int index)
override {
176 Mask *hwloc_array =
static_cast<Mask *
>(array);
177 return &(hwloc_array[index]);
179 api_type get_api_type()
const override {
return HWLOC; }
183 #if KMP_OS_LINUX || KMP_OS_FREEBSD 189 #include <sys/syscall.h> 190 #if KMP_ARCH_X86 || KMP_ARCH_ARM 191 #ifndef __NR_sched_setaffinity 192 #define __NR_sched_setaffinity 241 193 #elif __NR_sched_setaffinity != 241 194 #error Wrong code for setaffinity system call. 196 #ifndef __NR_sched_getaffinity 197 #define __NR_sched_getaffinity 242 198 #elif __NR_sched_getaffinity != 242 199 #error Wrong code for getaffinity system call. 201 #elif KMP_ARCH_AARCH64 202 #ifndef __NR_sched_setaffinity 203 #define __NR_sched_setaffinity 122 204 #elif __NR_sched_setaffinity != 122 205 #error Wrong code for setaffinity system call. 207 #ifndef __NR_sched_getaffinity 208 #define __NR_sched_getaffinity 123 209 #elif __NR_sched_getaffinity != 123 210 #error Wrong code for getaffinity system call. 212 #elif KMP_ARCH_RISCV64 213 #ifndef __NR_sched_setaffinity 214 #define __NR_sched_setaffinity 122 215 #elif __NR_sched_setaffinity != 122 216 #error Wrong code for setaffinity system call. 218 #ifndef __NR_sched_getaffinity 219 #define __NR_sched_getaffinity 123 220 #elif __NR_sched_getaffinity != 123 221 #error Wrong code for getaffinity system call. 223 #elif KMP_ARCH_X86_64 224 #ifndef __NR_sched_setaffinity 225 #define __NR_sched_setaffinity 203 226 #elif __NR_sched_setaffinity != 203 227 #error Wrong code for setaffinity system call. 229 #ifndef __NR_sched_getaffinity 230 #define __NR_sched_getaffinity 204 231 #elif __NR_sched_getaffinity != 204 232 #error Wrong code for getaffinity system call. 235 #ifndef __NR_sched_setaffinity 236 #define __NR_sched_setaffinity 222 237 #elif __NR_sched_setaffinity != 222 238 #error Wrong code for setaffinity system call. 240 #ifndef __NR_sched_getaffinity 241 #define __NR_sched_getaffinity 223 242 #elif __NR_sched_getaffinity != 223 243 #error Wrong code for getaffinity system call. 246 # ifndef __NR_sched_setaffinity 247 # define __NR_sched_setaffinity 4239 248 # elif __NR_sched_setaffinity != 4239 249 # error Wrong code for setaffinity system call. 251 # ifndef __NR_sched_getaffinity 252 # define __NR_sched_getaffinity 4240 253 # elif __NR_sched_getaffinity != 4240 254 # error Wrong code for getaffinity system call. 256 # elif KMP_ARCH_MIPS64 257 # ifndef __NR_sched_setaffinity 258 # define __NR_sched_setaffinity 5195 259 # elif __NR_sched_setaffinity != 5195 260 # error Wrong code for setaffinity system call. 262 # ifndef __NR_sched_getaffinity 263 # define __NR_sched_getaffinity 5196 264 # elif __NR_sched_getaffinity != 5196 265 # error Wrong code for getaffinity system call. 267 #elif KMP_ARCH_LOONGARCH64 268 #ifndef __NR_sched_setaffinity 269 #define __NR_sched_setaffinity 122 270 #elif __NR_sched_setaffinity != 122 271 #error Wrong code for setaffinity system call. 273 #ifndef __NR_sched_getaffinity 274 #define __NR_sched_getaffinity 123 275 #elif __NR_sched_getaffinity != 123 276 #error Wrong code for getaffinity system call. 279 #error Unknown or unsupported architecture 283 #include <pthread_np.h> 285 class KMPNativeAffinity :
public KMPAffinity {
286 class Mask :
public KMPAffinity::Mask {
287 typedef unsigned long mask_t;
288 typedef decltype(__kmp_affin_mask_size) mask_size_type;
289 static const unsigned int BITS_PER_MASK_T =
sizeof(mask_t) * CHAR_BIT;
290 static const mask_t ONE = 1;
291 mask_size_type get_num_mask_types()
const {
292 return __kmp_affin_mask_size /
sizeof(mask_t);
297 Mask() { mask = (mask_t *)__kmp_allocate(__kmp_affin_mask_size); }
302 void set(
int i)
override {
303 mask[i / BITS_PER_MASK_T] |= (ONE << (i % BITS_PER_MASK_T));
305 bool is_set(
int i)
const override {
306 return (mask[i / BITS_PER_MASK_T] & (ONE << (i % BITS_PER_MASK_T)));
308 void clear(
int i)
override {
309 mask[i / BITS_PER_MASK_T] &= ~(ONE << (i % BITS_PER_MASK_T));
311 void zero()
override {
312 mask_size_type e = get_num_mask_types();
313 for (mask_size_type i = 0; i < e; ++i)
316 void copy(
const KMPAffinity::Mask *src)
override {
317 const Mask *convert =
static_cast<const Mask *
>(src);
318 mask_size_type e = get_num_mask_types();
319 for (mask_size_type i = 0; i < e; ++i)
320 mask[i] = convert->mask[i];
322 void bitwise_and(
const KMPAffinity::Mask *rhs)
override {
323 const Mask *convert =
static_cast<const Mask *
>(rhs);
324 mask_size_type e = get_num_mask_types();
325 for (mask_size_type i = 0; i < e; ++i)
326 mask[i] &= convert->mask[i];
328 void bitwise_or(
const KMPAffinity::Mask *rhs)
override {
329 const Mask *convert =
static_cast<const Mask *
>(rhs);
330 mask_size_type e = get_num_mask_types();
331 for (mask_size_type i = 0; i < e; ++i)
332 mask[i] |= convert->mask[i];
334 void bitwise_not()
override {
335 mask_size_type e = get_num_mask_types();
336 for (mask_size_type i = 0; i < e; ++i)
337 mask[i] = ~(mask[i]);
339 int begin()
const override {
341 while (retval < end() && !is_set(retval))
345 int end()
const override {
347 __kmp_type_convert(get_num_mask_types() * BITS_PER_MASK_T, &e);
350 int next(
int previous)
const override {
351 int retval = previous + 1;
352 while (retval < end() && !is_set(retval))
356 int get_system_affinity(
bool abort_on_error)
override {
357 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
358 "Illegal get affinity operation when not capable");
361 syscall(__NR_sched_getaffinity, 0, __kmp_affin_mask_size, mask);
363 int r = pthread_getaffinity_np(pthread_self(), __kmp_affin_mask_size,
364 reinterpret_cast<cpuset_t *>(mask));
365 int retval = (r == 0 ? 0 : -1);
371 if (abort_on_error) {
372 __kmp_fatal(KMP_MSG(FatalSysError), KMP_ERR(error), __kmp_msg_null);
376 int set_system_affinity(
bool abort_on_error)
const override {
377 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
378 "Illegal set affinity operation when not capable");
381 syscall(__NR_sched_setaffinity, 0, __kmp_affin_mask_size, mask);
383 int r = pthread_setaffinity_np(pthread_self(), __kmp_affin_mask_size,
384 reinterpret_cast<cpuset_t *>(mask));
385 int retval = (r == 0 ? 0 : -1);
391 if (abort_on_error) {
392 __kmp_fatal(KMP_MSG(FatalSysError), KMP_ERR(error), __kmp_msg_null);
397 void determine_capable(
const char *env_var)
override {
398 __kmp_affinity_determine_capable(env_var);
400 void bind_thread(
int which)
override { __kmp_affinity_bind_thread(which); }
401 KMPAffinity::Mask *allocate_mask()
override {
402 KMPNativeAffinity::Mask *retval =
new Mask();
405 void deallocate_mask(KMPAffinity::Mask *m)
override {
406 KMPNativeAffinity::Mask *native_mask =
407 static_cast<KMPNativeAffinity::Mask *
>(m);
410 KMPAffinity::Mask *allocate_mask_array(
int num)
override {
411 return new Mask[num];
413 void deallocate_mask_array(KMPAffinity::Mask *array)
override {
414 Mask *linux_array =
static_cast<Mask *
>(array);
415 delete[] linux_array;
417 KMPAffinity::Mask *index_mask_array(KMPAffinity::Mask *array,
418 int index)
override {
419 Mask *linux_array =
static_cast<Mask *
>(array);
420 return &(linux_array[index]);
422 api_type get_api_type()
const override {
return NATIVE_OS; }
427 class KMPNativeAffinity :
public KMPAffinity {
428 class Mask :
public KMPAffinity::Mask {
429 typedef ULONG_PTR mask_t;
430 static const int BITS_PER_MASK_T =
sizeof(mask_t) * CHAR_BIT;
435 mask = (mask_t *)__kmp_allocate(
sizeof(mask_t) * __kmp_num_proc_groups);
441 void set(
int i)
override {
442 mask[i / BITS_PER_MASK_T] |= ((mask_t)1 << (i % BITS_PER_MASK_T));
444 bool is_set(
int i)
const override {
445 return (mask[i / BITS_PER_MASK_T] & ((mask_t)1 << (i % BITS_PER_MASK_T)));
447 void clear(
int i)
override {
448 mask[i / BITS_PER_MASK_T] &= ~((mask_t)1 << (i % BITS_PER_MASK_T));
450 void zero()
override {
451 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
454 void copy(
const KMPAffinity::Mask *src)
override {
455 const Mask *convert =
static_cast<const Mask *
>(src);
456 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
457 mask[i] = convert->mask[i];
459 void bitwise_and(
const KMPAffinity::Mask *rhs)
override {
460 const Mask *convert =
static_cast<const Mask *
>(rhs);
461 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
462 mask[i] &= convert->mask[i];
464 void bitwise_or(
const KMPAffinity::Mask *rhs)
override {
465 const Mask *convert =
static_cast<const Mask *
>(rhs);
466 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
467 mask[i] |= convert->mask[i];
469 void bitwise_not()
override {
470 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
471 mask[i] = ~(mask[i]);
473 int begin()
const override {
475 while (retval < end() && !is_set(retval))
479 int end()
const override {
return __kmp_num_proc_groups * BITS_PER_MASK_T; }
480 int next(
int previous)
const override {
481 int retval = previous + 1;
482 while (retval < end() && !is_set(retval))
486 int set_process_affinity(
bool abort_on_error)
const override {
487 if (__kmp_num_proc_groups <= 1) {
488 if (!SetProcessAffinityMask(GetCurrentProcess(), *mask)) {
489 DWORD error = GetLastError();
490 if (abort_on_error) {
491 __kmp_fatal(KMP_MSG(CantSetThreadAffMask), KMP_ERR(error),
499 int set_system_affinity(
bool abort_on_error)
const override {
500 if (__kmp_num_proc_groups > 1) {
503 int group = get_proc_group();
505 if (abort_on_error) {
506 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
513 ga.Mask = mask[group];
514 ga.Reserved[0] = ga.Reserved[1] = ga.Reserved[2] = 0;
516 KMP_DEBUG_ASSERT(__kmp_SetThreadGroupAffinity != NULL);
517 if (__kmp_SetThreadGroupAffinity(GetCurrentThread(), &ga, NULL) == 0) {
518 DWORD error = GetLastError();
519 if (abort_on_error) {
520 __kmp_fatal(KMP_MSG(CantSetThreadAffMask), KMP_ERR(error),
526 if (!SetThreadAffinityMask(GetCurrentThread(), *mask)) {
527 DWORD error = GetLastError();
528 if (abort_on_error) {
529 __kmp_fatal(KMP_MSG(CantSetThreadAffMask), KMP_ERR(error),
537 int get_system_affinity(
bool abort_on_error)
override {
538 if (__kmp_num_proc_groups > 1) {
541 KMP_DEBUG_ASSERT(__kmp_GetThreadGroupAffinity != NULL);
542 if (__kmp_GetThreadGroupAffinity(GetCurrentThread(), &ga) == 0) {
543 DWORD error = GetLastError();
544 if (abort_on_error) {
545 __kmp_fatal(KMP_MSG(FunctionError,
"GetThreadGroupAffinity()"),
546 KMP_ERR(error), __kmp_msg_null);
550 if ((ga.Group < 0) || (ga.Group > __kmp_num_proc_groups) ||
554 mask[ga.Group] = ga.Mask;
556 mask_t newMask, sysMask, retval;
557 if (!GetProcessAffinityMask(GetCurrentProcess(), &newMask, &sysMask)) {
558 DWORD error = GetLastError();
559 if (abort_on_error) {
560 __kmp_fatal(KMP_MSG(FunctionError,
"GetProcessAffinityMask()"),
561 KMP_ERR(error), __kmp_msg_null);
565 retval = SetThreadAffinityMask(GetCurrentThread(), newMask);
567 DWORD error = GetLastError();
568 if (abort_on_error) {
569 __kmp_fatal(KMP_MSG(FunctionError,
"SetThreadAffinityMask()"),
570 KMP_ERR(error), __kmp_msg_null);
574 newMask = SetThreadAffinityMask(GetCurrentThread(), retval);
576 DWORD error = GetLastError();
577 if (abort_on_error) {
578 __kmp_fatal(KMP_MSG(FunctionError,
"SetThreadAffinityMask()"),
579 KMP_ERR(error), __kmp_msg_null);
586 int get_proc_group()
const override {
588 if (__kmp_num_proc_groups == 1) {
591 for (
int i = 0; i < __kmp_num_proc_groups; i++) {
601 void determine_capable(
const char *env_var)
override {
602 __kmp_affinity_determine_capable(env_var);
604 void bind_thread(
int which)
override { __kmp_affinity_bind_thread(which); }
605 KMPAffinity::Mask *allocate_mask()
override {
return new Mask(); }
606 void deallocate_mask(KMPAffinity::Mask *m)
override {
delete m; }
607 KMPAffinity::Mask *allocate_mask_array(
int num)
override {
608 return new Mask[num];
610 void deallocate_mask_array(KMPAffinity::Mask *array)
override {
611 Mask *windows_array =
static_cast<Mask *
>(array);
612 delete[] windows_array;
614 KMPAffinity::Mask *index_mask_array(KMPAffinity::Mask *array,
615 int index)
override {
616 Mask *windows_array =
static_cast<Mask *
>(array);
617 return &(windows_array[index]);
619 api_type get_api_type()
const override {
return NATIVE_OS; }
624 class kmp_hw_thread_t {
626 static const int UNKNOWN_ID = -1;
627 static int compare_ids(
const void *a,
const void *b);
628 static int compare_compact(
const void *a,
const void *b);
629 int ids[KMP_HW_LAST];
630 int sub_ids[KMP_HW_LAST];
635 for (
int i = 0; i < (int)KMP_HW_LAST; ++i)
641 class kmp_topology_t {
667 kmp_hw_thread_t *hw_threads;
673 kmp_hw_t equivalent[KMP_HW_LAST];
681 void _gather_enumeration_information();
685 void _remove_radix1_layers();
688 void _discover_uniformity();
699 void _set_last_level_cache();
703 kmp_topology_t() =
delete;
704 kmp_topology_t(
const kmp_topology_t &t) =
delete;
705 kmp_topology_t(kmp_topology_t &&t) =
delete;
706 kmp_topology_t &operator=(
const kmp_topology_t &t) =
delete;
707 kmp_topology_t &operator=(kmp_topology_t &&t) =
delete;
709 static kmp_topology_t *allocate(
int nproc,
int ndepth,
const kmp_hw_t *types);
710 static void deallocate(kmp_topology_t *);
713 kmp_hw_thread_t &at(
int index) {
714 KMP_DEBUG_ASSERT(index >= 0 && index < num_hw_threads);
715 return hw_threads[index];
717 const kmp_hw_thread_t &at(
int index)
const {
718 KMP_DEBUG_ASSERT(index >= 0 && index < num_hw_threads);
719 return hw_threads[index];
721 int get_num_hw_threads()
const {
return num_hw_threads; }
723 qsort(hw_threads, num_hw_threads,
sizeof(kmp_hw_thread_t),
724 kmp_hw_thread_t::compare_ids);
728 bool check_ids()
const;
732 void canonicalize(
int pkgs,
int cores_per_pkg,
int thr_per_core,
int cores);
735 bool filter_hw_subset();
736 bool is_close(
int hwt1,
int hwt2,
int level)
const;
737 bool is_uniform()
const {
return flags.uniform; }
740 kmp_hw_t get_equivalent_type(kmp_hw_t type)
const {
return equivalent[type]; }
742 void set_equivalent_type(kmp_hw_t type1, kmp_hw_t type2) {
743 KMP_DEBUG_ASSERT_VALID_HW_TYPE(type1);
744 KMP_DEBUG_ASSERT_VALID_HW_TYPE(type2);
745 kmp_hw_t real_type2 = equivalent[type2];
746 if (real_type2 == KMP_HW_UNKNOWN)
748 equivalent[type1] = real_type2;
751 KMP_FOREACH_HW_TYPE(type) {
752 if (equivalent[type] == type1) {
753 equivalent[type] = real_type2;
759 int calculate_ratio(
int level1,
int level2)
const {
760 KMP_DEBUG_ASSERT(level1 >= 0 && level1 < depth);
761 KMP_DEBUG_ASSERT(level2 >= 0 && level2 < depth);
763 for (
int level = level1; level > level2; --level)
767 int get_ratio(
int level)
const {
768 KMP_DEBUG_ASSERT(level >= 0 && level < depth);
771 int get_depth()
const {
return depth; };
772 kmp_hw_t get_type(
int level)
const {
773 KMP_DEBUG_ASSERT(level >= 0 && level < depth);
776 int get_level(kmp_hw_t type)
const {
777 KMP_DEBUG_ASSERT_VALID_HW_TYPE(type);
778 int eq_type = equivalent[type];
779 if (eq_type == KMP_HW_UNKNOWN)
781 for (
int i = 0; i < depth; ++i)
782 if (types[i] == eq_type)
786 int get_count(
int level)
const {
787 KMP_DEBUG_ASSERT(level >= 0 && level < depth);
790 #if KMP_AFFINITY_SUPPORTED 791 void sort_compact() {
792 qsort(hw_threads, num_hw_threads,
sizeof(kmp_hw_thread_t),
793 kmp_hw_thread_t::compare_compact);
796 void print(
const char *env_var =
"KMP_AFFINITY")
const;
800 class kmp_hw_subset_t {
815 KMP_BUILD_ASSERT(
sizeof(
set) * 8 >= KMP_HW_LAST);
819 kmp_hw_subset_t() =
delete;
820 kmp_hw_subset_t(
const kmp_hw_subset_t &t) =
delete;
821 kmp_hw_subset_t(kmp_hw_subset_t &&t) =
delete;
822 kmp_hw_subset_t &operator=(
const kmp_hw_subset_t &t) =
delete;
823 kmp_hw_subset_t &operator=(kmp_hw_subset_t &&t) =
delete;
825 static kmp_hw_subset_t *allocate() {
826 int initial_capacity = 5;
827 kmp_hw_subset_t *retval =
828 (kmp_hw_subset_t *)__kmp_allocate(
sizeof(kmp_hw_subset_t));
830 retval->capacity = initial_capacity;
832 retval->absolute =
false;
833 retval->items = (item_t *)__kmp_allocate(
sizeof(item_t) * initial_capacity);
836 static void deallocate(kmp_hw_subset_t *subset) {
837 __kmp_free(subset->items);
840 void set_absolute() { absolute =
true; }
841 bool is_absolute()
const {
return absolute; }
842 void push_back(
int num, kmp_hw_t type,
int offset) {
843 if (depth == capacity - 1) {
845 item_t *new_items = (item_t *)__kmp_allocate(
sizeof(item_t) * capacity);
846 for (
int i = 0; i < depth; ++i)
847 new_items[i] = items[i];
851 items[depth].num = num;
852 items[depth].type = type;
853 items[depth].offset = offset;
855 set |= (1ull << type);
857 int get_depth()
const {
return depth; }
858 const item_t &at(
int index)
const {
859 KMP_DEBUG_ASSERT(index >= 0 && index < depth);
862 item_t &at(
int index) {
863 KMP_DEBUG_ASSERT(index >= 0 && index < depth);
866 void remove(
int index) {
867 KMP_DEBUG_ASSERT(index >= 0 && index < depth);
868 set &= ~(1ull << items[index].type);
869 for (
int j = index + 1; j < depth; ++j) {
870 items[j - 1] = items[j];
874 bool specified(kmp_hw_t type)
const {
return ((
set & (1ull << type)) > 0); }
876 printf(
"**********************\n");
877 printf(
"*** kmp_hw_subset: ***\n");
878 printf(
"* depth: %d\n", depth);
879 printf(
"* items:\n");
880 for (
int i = 0; i < depth; ++i) {
881 printf(
"num: %d, type: %s, offset: %d\n", items[i].num,
882 __kmp_hw_get_keyword(items[i].type), items[i].offset);
884 printf(
"* set: 0x%llx\n",
set);
885 printf(
"* absolute: %d\n", absolute);
886 printf(
"**********************\n");
890 extern kmp_topology_t *__kmp_topology;
891 extern kmp_hw_subset_t *__kmp_hw_subset;
899 class hierarchy_info {
903 static const kmp_uint32 maxLeaves = 4;
904 static const kmp_uint32 minBranch = 4;
910 kmp_uint32 maxLevels;
917 kmp_uint32 base_num_threads;
918 enum init_status { initialized = 0, not_initialized = 1, initializing = 2 };
919 volatile kmp_int8 uninitialized;
921 volatile kmp_int8 resizing;
927 kmp_uint32 *numPerLevel;
928 kmp_uint32 *skipPerLevel;
930 void deriveLevels() {
931 int hier_depth = __kmp_topology->get_depth();
932 for (
int i = hier_depth - 1, level = 0; i >= 0; --i, ++level) {
933 numPerLevel[level] = __kmp_topology->get_ratio(i);
938 : maxLevels(7), depth(1), uninitialized(not_initialized), resizing(0) {}
941 if (!uninitialized && numPerLevel) {
942 __kmp_free(numPerLevel);
944 uninitialized = not_initialized;
948 void init(
int num_addrs) {
949 kmp_int8 bool_result = KMP_COMPARE_AND_STORE_ACQ8(
950 &uninitialized, not_initialized, initializing);
951 if (bool_result == 0) {
952 while (TCR_1(uninitialized) != initialized)
956 KMP_DEBUG_ASSERT(bool_result == 1);
966 (kmp_uint32 *)__kmp_allocate(maxLevels * 2 *
sizeof(kmp_uint32));
967 skipPerLevel = &(numPerLevel[maxLevels]);
968 for (kmp_uint32 i = 0; i < maxLevels;
975 if (__kmp_topology && __kmp_topology->get_depth() > 0) {
978 numPerLevel[0] = maxLeaves;
979 numPerLevel[1] = num_addrs / maxLeaves;
980 if (num_addrs % maxLeaves)
984 base_num_threads = num_addrs;
985 for (
int i = maxLevels - 1; i >= 0;
987 if (numPerLevel[i] != 1 || depth > 1)
990 kmp_uint32 branch = minBranch;
991 if (numPerLevel[0] == 1)
992 branch = num_addrs / maxLeaves;
993 if (branch < minBranch)
995 for (kmp_uint32 d = 0; d < depth - 1; ++d) {
996 while (numPerLevel[d] > branch ||
997 (d == 0 && numPerLevel[d] > maxLeaves)) {
998 if (numPerLevel[d] & 1)
1000 numPerLevel[d] = numPerLevel[d] >> 1;
1001 if (numPerLevel[d + 1] == 1)
1003 numPerLevel[d + 1] = numPerLevel[d + 1] << 1;
1005 if (numPerLevel[0] == 1) {
1006 branch = branch >> 1;
1012 for (kmp_uint32 i = 1; i < depth; ++i)
1013 skipPerLevel[i] = numPerLevel[i - 1] * skipPerLevel[i - 1];
1015 for (kmp_uint32 i = depth; i < maxLevels; ++i)
1016 skipPerLevel[i] = 2 * skipPerLevel[i - 1];
1018 uninitialized = initialized;
1022 void resize(kmp_uint32 nproc) {
1023 kmp_int8 bool_result = KMP_COMPARE_AND_STORE_ACQ8(&resizing, 0, 1);
1024 while (bool_result == 0) {
1026 if (nproc <= base_num_threads)
1029 bool_result = KMP_COMPARE_AND_STORE_ACQ8(&resizing, 0, 1);
1031 KMP_DEBUG_ASSERT(bool_result != 0);
1032 if (nproc <= base_num_threads)
1036 kmp_uint32 old_sz = skipPerLevel[depth - 1];
1037 kmp_uint32 incs = 0, old_maxLevels = maxLevels;
1039 for (kmp_uint32 i = depth; i < maxLevels && nproc > old_sz; ++i) {
1040 skipPerLevel[i] = 2 * skipPerLevel[i - 1];
1041 numPerLevel[i - 1] *= 2;
1045 if (nproc > old_sz) {
1046 while (nproc > old_sz) {
1054 kmp_uint32 *old_numPerLevel = numPerLevel;
1055 kmp_uint32 *old_skipPerLevel = skipPerLevel;
1056 numPerLevel = skipPerLevel = NULL;
1058 (kmp_uint32 *)__kmp_allocate(maxLevels * 2 *
sizeof(kmp_uint32));
1059 skipPerLevel = &(numPerLevel[maxLevels]);
1062 for (kmp_uint32 i = 0; i < old_maxLevels; ++i) {
1064 numPerLevel[i] = old_numPerLevel[i];
1065 skipPerLevel[i] = old_skipPerLevel[i];
1069 for (kmp_uint32 i = old_maxLevels; i < maxLevels; ++i) {
1072 skipPerLevel[i] = 1;
1076 __kmp_free(old_numPerLevel);
1080 for (kmp_uint32 i = old_maxLevels; i < maxLevels; ++i)
1081 skipPerLevel[i] = 2 * skipPerLevel[i - 1];
1083 base_num_threads = nproc;
1087 #endif // KMP_AFFINITY_H