14 #ifndef KMP_AFFINITY_H 15 #define KMP_AFFINITY_H 20 #if KMP_AFFINITY_SUPPORTED 22 class KMPHwlocAffinity :
public KMPAffinity {
24 class Mask :
public KMPAffinity::Mask {
29 mask = hwloc_bitmap_alloc();
32 ~Mask() { hwloc_bitmap_free(mask); }
33 void set(
int i)
override { hwloc_bitmap_set(mask, i); }
34 bool is_set(
int i)
const override {
return hwloc_bitmap_isset(mask, i); }
35 void clear(
int i)
override { hwloc_bitmap_clr(mask, i); }
36 void zero()
override { hwloc_bitmap_zero(mask); }
37 void copy(
const KMPAffinity::Mask *src)
override {
38 const Mask *convert =
static_cast<const Mask *
>(src);
39 hwloc_bitmap_copy(mask, convert->mask);
41 void bitwise_and(
const KMPAffinity::Mask *rhs)
override {
42 const Mask *convert =
static_cast<const Mask *
>(rhs);
43 hwloc_bitmap_and(mask, mask, convert->mask);
45 void bitwise_or(
const KMPAffinity::Mask *rhs)
override {
46 const Mask *convert =
static_cast<const Mask *
>(rhs);
47 hwloc_bitmap_or(mask, mask, convert->mask);
49 void bitwise_not()
override { hwloc_bitmap_not(mask, mask); }
50 int begin()
const override {
return hwloc_bitmap_first(mask); }
51 int end()
const override {
return -1; }
52 int next(
int previous)
const override {
53 return hwloc_bitmap_next(mask, previous);
55 int get_system_affinity(
bool abort_on_error)
override {
56 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
57 "Illegal get affinity operation when not capable");
59 hwloc_get_cpubind(__kmp_hwloc_topology, mask, HWLOC_CPUBIND_THREAD);
65 __kmp_fatal(KMP_MSG(FatalSysError), KMP_ERR(error), __kmp_msg_null);
69 int set_system_affinity(
bool abort_on_error)
const override {
70 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
71 "Illegal get affinity operation when not capable");
73 hwloc_set_cpubind(__kmp_hwloc_topology, mask, HWLOC_CPUBIND_THREAD);
79 __kmp_fatal(KMP_MSG(FatalSysError), KMP_ERR(error), __kmp_msg_null);
83 int get_proc_group()
const override {
86 if (__kmp_num_proc_groups == 1) {
89 for (
int i = 0; i < __kmp_num_proc_groups; i++) {
91 unsigned long first_32_bits = hwloc_bitmap_to_ith_ulong(mask, i * 2);
92 unsigned long second_32_bits =
93 hwloc_bitmap_to_ith_ulong(mask, i * 2 + 1);
94 if (first_32_bits == 0 && second_32_bits == 0) {
106 void determine_capable(
const char *var)
override {
107 const hwloc_topology_support *topology_support;
108 if (__kmp_hwloc_topology == NULL) {
109 if (hwloc_topology_init(&__kmp_hwloc_topology) < 0) {
110 __kmp_hwloc_error = TRUE;
111 if (__kmp_affinity_verbose)
112 KMP_WARNING(AffHwlocErrorOccurred, var,
"hwloc_topology_init()");
114 if (hwloc_topology_load(__kmp_hwloc_topology) < 0) {
115 __kmp_hwloc_error = TRUE;
116 if (__kmp_affinity_verbose)
117 KMP_WARNING(AffHwlocErrorOccurred, var,
"hwloc_topology_load()");
120 topology_support = hwloc_topology_get_support(__kmp_hwloc_topology);
125 if (topology_support && topology_support->cpubind->set_thisthread_cpubind &&
126 topology_support->cpubind->get_thisthread_cpubind &&
127 topology_support->discovery->pu && !__kmp_hwloc_error) {
129 KMP_AFFINITY_ENABLE(TRUE);
132 __kmp_hwloc_error = TRUE;
133 KMP_AFFINITY_DISABLE();
136 void bind_thread(
int which)
override {
137 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
138 "Illegal set affinity operation when not capable");
139 KMPAffinity::Mask *mask;
140 KMP_CPU_ALLOC_ON_STACK(mask);
142 KMP_CPU_SET(which, mask);
143 __kmp_set_system_affinity(mask, TRUE);
144 KMP_CPU_FREE_FROM_STACK(mask);
146 KMPAffinity::Mask *allocate_mask()
override {
return new Mask(); }
147 void deallocate_mask(KMPAffinity::Mask *m)
override {
delete m; }
148 KMPAffinity::Mask *allocate_mask_array(
int num)
override {
149 return new Mask[num];
151 void deallocate_mask_array(KMPAffinity::Mask *array)
override {
152 Mask *hwloc_array =
static_cast<Mask *
>(array);
153 delete[] hwloc_array;
155 KMPAffinity::Mask *index_mask_array(KMPAffinity::Mask *array,
156 int index)
override {
157 Mask *hwloc_array =
static_cast<Mask *
>(array);
158 return &(hwloc_array[index]);
160 api_type get_api_type()
const override {
return HWLOC; }
169 #include <sys/syscall.h> 170 #if KMP_ARCH_X86 || KMP_ARCH_ARM 171 #ifndef __NR_sched_setaffinity 172 #define __NR_sched_setaffinity 241 173 #elif __NR_sched_setaffinity != 241 174 #error Wrong code for setaffinity system call. 176 #ifndef __NR_sched_getaffinity 177 #define __NR_sched_getaffinity 242 178 #elif __NR_sched_getaffinity != 242 179 #error Wrong code for getaffinity system call. 181 #elif KMP_ARCH_AARCH64 182 #ifndef __NR_sched_setaffinity 183 #define __NR_sched_setaffinity 122 184 #elif __NR_sched_setaffinity != 122 185 #error Wrong code for setaffinity system call. 187 #ifndef __NR_sched_getaffinity 188 #define __NR_sched_getaffinity 123 189 #elif __NR_sched_getaffinity != 123 190 #error Wrong code for getaffinity system call. 192 #elif KMP_ARCH_X86_64 193 #ifndef __NR_sched_setaffinity 194 #define __NR_sched_setaffinity 203 195 #elif __NR_sched_setaffinity != 203 196 #error Wrong code for setaffinity system call. 198 #ifndef __NR_sched_getaffinity 199 #define __NR_sched_getaffinity 204 200 #elif __NR_sched_getaffinity != 204 201 #error Wrong code for getaffinity system call. 204 #ifndef __NR_sched_setaffinity 205 #define __NR_sched_setaffinity 222 206 #elif __NR_sched_setaffinity != 222 207 #error Wrong code for setaffinity system call. 209 #ifndef __NR_sched_getaffinity 210 #define __NR_sched_getaffinity 223 211 #elif __NR_sched_getaffinity != 223 212 #error Wrong code for getaffinity system call. 215 # ifndef __NR_sched_setaffinity 216 # define __NR_sched_setaffinity 4239 217 # elif __NR_sched_setaffinity != 4239 218 # error Wrong code for setaffinity system call. 220 # ifndef __NR_sched_getaffinity 221 # define __NR_sched_getaffinity 4240 222 # elif __NR_sched_getaffinity != 4240 223 # error Wrong code for getaffinity system call. 225 # elif KMP_ARCH_MIPS64 226 # ifndef __NR_sched_setaffinity 227 # define __NR_sched_setaffinity 5195 228 # elif __NR_sched_setaffinity != 5195 229 # error Wrong code for setaffinity system call. 231 # ifndef __NR_sched_getaffinity 232 # define __NR_sched_getaffinity 5196 233 # elif __NR_sched_getaffinity != 5196 234 # error Wrong code for getaffinity system call. 236 #elif KMP_ARCH_LOONGARCH64 237 #ifndef __NR_sched_setaffinity 238 #define __NR_sched_setaffinity 122 239 #elif __NR_sched_setaffinity != 122 240 #error Wrong code for setaffinity system call. 242 #ifndef __NR_sched_getaffinity 243 #define __NR_sched_getaffinity 123 244 #elif __NR_sched_getaffinity != 123 245 #error Wrong code for getaffinity system call. 248 #error Unknown or unsupported architecture 250 class KMPNativeAffinity :
public KMPAffinity {
251 class Mask :
public KMPAffinity::Mask {
252 typedef unsigned char mask_t;
253 static const int BITS_PER_MASK_T =
sizeof(mask_t) * CHAR_BIT;
257 Mask() { mask = (mask_t *)__kmp_allocate(__kmp_affin_mask_size); }
262 void set(
int i)
override {
263 mask[i / BITS_PER_MASK_T] |= ((mask_t)1 << (i % BITS_PER_MASK_T));
265 bool is_set(
int i)
const override {
266 return (mask[i / BITS_PER_MASK_T] & ((mask_t)1 << (i % BITS_PER_MASK_T)));
268 void clear(
int i)
override {
269 mask[i / BITS_PER_MASK_T] &= ~((mask_t)1 << (i % BITS_PER_MASK_T));
271 void zero()
override {
272 for (
size_t i = 0; i < __kmp_affin_mask_size; ++i)
275 void copy(
const KMPAffinity::Mask *src)
override {
276 const Mask *convert =
static_cast<const Mask *
>(src);
277 for (
size_t i = 0; i < __kmp_affin_mask_size; ++i)
278 mask[i] = convert->mask[i];
280 void bitwise_and(
const KMPAffinity::Mask *rhs)
override {
281 const Mask *convert =
static_cast<const Mask *
>(rhs);
282 for (
size_t i = 0; i < __kmp_affin_mask_size; ++i)
283 mask[i] &= convert->mask[i];
285 void bitwise_or(
const KMPAffinity::Mask *rhs)
override {
286 const Mask *convert =
static_cast<const Mask *
>(rhs);
287 for (
size_t i = 0; i < __kmp_affin_mask_size; ++i)
288 mask[i] |= convert->mask[i];
290 void bitwise_not()
override {
291 for (
size_t i = 0; i < __kmp_affin_mask_size; ++i)
292 mask[i] = ~(mask[i]);
294 int begin()
const override {
296 while (retval < end() && !is_set(retval))
300 int end()
const override {
return __kmp_affin_mask_size * BITS_PER_MASK_T; }
301 int next(
int previous)
const override {
302 int retval = previous + 1;
303 while (retval < end() && !is_set(retval))
307 int get_system_affinity(
bool abort_on_error)
override {
308 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
309 "Illegal get affinity operation when not capable");
311 syscall(__NR_sched_getaffinity, 0, __kmp_affin_mask_size, mask);
316 if (abort_on_error) {
317 __kmp_fatal(KMP_MSG(FatalSysError), KMP_ERR(error), __kmp_msg_null);
321 int set_system_affinity(
bool abort_on_error)
const override {
322 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
323 "Illegal get affinity operation when not capable");
325 syscall(__NR_sched_setaffinity, 0, __kmp_affin_mask_size, mask);
330 if (abort_on_error) {
331 __kmp_fatal(KMP_MSG(FatalSysError), KMP_ERR(error), __kmp_msg_null);
336 void determine_capable(
const char *env_var)
override {
337 __kmp_affinity_determine_capable(env_var);
339 void bind_thread(
int which)
override { __kmp_affinity_bind_thread(which); }
340 KMPAffinity::Mask *allocate_mask()
override {
341 KMPNativeAffinity::Mask *retval =
new Mask();
344 void deallocate_mask(KMPAffinity::Mask *m)
override {
345 KMPNativeAffinity::Mask *native_mask =
346 static_cast<KMPNativeAffinity::Mask *
>(m);
349 KMPAffinity::Mask *allocate_mask_array(
int num)
override {
350 return new Mask[num];
352 void deallocate_mask_array(KMPAffinity::Mask *array)
override {
353 Mask *linux_array =
static_cast<Mask *
>(array);
354 delete[] linux_array;
356 KMPAffinity::Mask *index_mask_array(KMPAffinity::Mask *array,
357 int index)
override {
358 Mask *linux_array =
static_cast<Mask *
>(array);
359 return &(linux_array[index]);
361 api_type get_api_type()
const override {
return NATIVE_OS; }
366 class KMPNativeAffinity :
public KMPAffinity {
367 class Mask :
public KMPAffinity::Mask {
368 typedef ULONG_PTR mask_t;
369 static const int BITS_PER_MASK_T =
sizeof(mask_t) * CHAR_BIT;
374 mask = (mask_t *)__kmp_allocate(
sizeof(mask_t) * __kmp_num_proc_groups);
380 void set(
int i)
override {
381 mask[i / BITS_PER_MASK_T] |= ((mask_t)1 << (i % BITS_PER_MASK_T));
383 bool is_set(
int i)
const override {
384 return (mask[i / BITS_PER_MASK_T] & ((mask_t)1 << (i % BITS_PER_MASK_T)));
386 void clear(
int i)
override {
387 mask[i / BITS_PER_MASK_T] &= ~((mask_t)1 << (i % BITS_PER_MASK_T));
389 void zero()
override {
390 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
393 void copy(
const KMPAffinity::Mask *src)
override {
394 const Mask *convert =
static_cast<const Mask *
>(src);
395 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
396 mask[i] = convert->mask[i];
398 void bitwise_and(
const KMPAffinity::Mask *rhs)
override {
399 const Mask *convert =
static_cast<const Mask *
>(rhs);
400 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
401 mask[i] &= convert->mask[i];
403 void bitwise_or(
const KMPAffinity::Mask *rhs)
override {
404 const Mask *convert =
static_cast<const Mask *
>(rhs);
405 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
406 mask[i] |= convert->mask[i];
408 void bitwise_not()
override {
409 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
410 mask[i] = ~(mask[i]);
412 int begin()
const override {
414 while (retval < end() && !is_set(retval))
418 int end()
const override {
return __kmp_num_proc_groups * BITS_PER_MASK_T; }
419 int next(
int previous)
const override {
420 int retval = previous + 1;
421 while (retval < end() && !is_set(retval))
425 int set_system_affinity(
bool abort_on_error)
const override {
426 if (__kmp_num_proc_groups > 1) {
429 int group = get_proc_group();
431 if (abort_on_error) {
432 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
439 ga.Mask = mask[group];
440 ga.Reserved[0] = ga.Reserved[1] = ga.Reserved[2] = 0;
442 KMP_DEBUG_ASSERT(__kmp_SetThreadGroupAffinity != NULL);
443 if (__kmp_SetThreadGroupAffinity(GetCurrentThread(), &ga, NULL) == 0) {
444 DWORD error = GetLastError();
445 if (abort_on_error) {
446 __kmp_fatal(KMP_MSG(CantSetThreadAffMask), KMP_ERR(error),
452 if (!SetThreadAffinityMask(GetCurrentThread(), *mask)) {
453 DWORD error = GetLastError();
454 if (abort_on_error) {
455 __kmp_fatal(KMP_MSG(CantSetThreadAffMask), KMP_ERR(error),
463 int get_system_affinity(
bool abort_on_error)
override {
464 if (__kmp_num_proc_groups > 1) {
467 KMP_DEBUG_ASSERT(__kmp_GetThreadGroupAffinity != NULL);
468 if (__kmp_GetThreadGroupAffinity(GetCurrentThread(), &ga) == 0) {
469 DWORD error = GetLastError();
470 if (abort_on_error) {
471 __kmp_fatal(KMP_MSG(FunctionError,
"GetThreadGroupAffinity()"),
472 KMP_ERR(error), __kmp_msg_null);
476 if ((ga.Group < 0) || (ga.Group > __kmp_num_proc_groups) ||
480 mask[ga.Group] = ga.Mask;
482 mask_t newMask, sysMask, retval;
483 if (!GetProcessAffinityMask(GetCurrentProcess(), &newMask, &sysMask)) {
484 DWORD error = GetLastError();
485 if (abort_on_error) {
486 __kmp_fatal(KMP_MSG(FunctionError,
"GetProcessAffinityMask()"),
487 KMP_ERR(error), __kmp_msg_null);
491 retval = SetThreadAffinityMask(GetCurrentThread(), newMask);
493 DWORD error = GetLastError();
494 if (abort_on_error) {
495 __kmp_fatal(KMP_MSG(FunctionError,
"SetThreadAffinityMask()"),
496 KMP_ERR(error), __kmp_msg_null);
500 newMask = SetThreadAffinityMask(GetCurrentThread(), retval);
502 DWORD error = GetLastError();
503 if (abort_on_error) {
504 __kmp_fatal(KMP_MSG(FunctionError,
"SetThreadAffinityMask()"),
505 KMP_ERR(error), __kmp_msg_null);
512 int get_proc_group()
const override {
514 if (__kmp_num_proc_groups == 1) {
517 for (
int i = 0; i < __kmp_num_proc_groups; i++) {
527 void determine_capable(
const char *env_var)
override {
528 __kmp_affinity_determine_capable(env_var);
530 void bind_thread(
int which)
override { __kmp_affinity_bind_thread(which); }
531 KMPAffinity::Mask *allocate_mask()
override {
return new Mask(); }
532 void deallocate_mask(KMPAffinity::Mask *m)
override {
delete m; }
533 KMPAffinity::Mask *allocate_mask_array(
int num)
override {
534 return new Mask[num];
536 void deallocate_mask_array(KMPAffinity::Mask *array)
override {
537 Mask *windows_array =
static_cast<Mask *
>(array);
538 delete[] windows_array;
540 KMPAffinity::Mask *index_mask_array(KMPAffinity::Mask *array,
541 int index)
override {
542 Mask *windows_array =
static_cast<Mask *
>(array);
543 return &(windows_array[index]);
545 api_type get_api_type()
const override {
return NATIVE_OS; }
552 static const unsigned maxDepth = 32;
553 unsigned labels[maxDepth];
554 unsigned childNums[maxDepth];
557 Address(
unsigned _depth) : depth(_depth), leader(FALSE) {}
558 Address &operator=(
const Address &b) {
560 for (
unsigned i = 0; i < depth; i++) {
561 labels[i] = b.labels[i];
562 childNums[i] = b.childNums[i];
567 bool operator==(
const Address &b)
const {
568 if (depth != b.depth)
570 for (
unsigned i = 0; i < depth; i++)
571 if (labels[i] != b.labels[i])
575 bool isClose(
const Address &b,
int level)
const {
576 if (depth != b.depth)
578 if ((
unsigned)level >= depth)
580 for (
unsigned i = 0; i < (depth - level); i++)
581 if (labels[i] != b.labels[i])
585 bool operator!=(
const Address &b)
const {
return !operator==(b); }
588 printf(
"Depth: %u --- ", depth);
589 for (i = 0; i < depth; i++) {
590 printf(
"%u ", labels[i]);
599 AddrUnsPair(Address _first,
unsigned _second)
600 : first(_first), second(_second) {}
601 AddrUnsPair &operator=(
const AddrUnsPair &b) {
609 printf(
" --- second = %u", second);
611 bool operator==(
const AddrUnsPair &b)
const {
612 if (first != b.first)
614 if (second != b.second)
618 bool operator!=(
const AddrUnsPair &b)
const {
return !operator==(b); }
621 static int __kmp_affinity_cmp_Address_labels(
const void *a,
const void *b) {
622 const Address *aa = &(((
const AddrUnsPair *)a)->first);
623 const Address *bb = &(((
const AddrUnsPair *)b)->first);
624 unsigned depth = aa->depth;
626 KMP_DEBUG_ASSERT(depth == bb->depth);
627 for (i = 0; i < depth; i++) {
628 if (aa->labels[i] < bb->labels[i])
630 if (aa->labels[i] > bb->labels[i])
642 class hierarchy_info {
646 static const kmp_uint32 maxLeaves = 4;
647 static const kmp_uint32 minBranch = 4;
653 kmp_uint32 maxLevels;
660 kmp_uint32 base_num_threads;
661 enum init_status { initialized = 0, not_initialized = 1, initializing = 2 };
662 volatile kmp_int8 uninitialized;
664 volatile kmp_int8 resizing;
670 kmp_uint32 *numPerLevel;
671 kmp_uint32 *skipPerLevel;
673 void deriveLevels(AddrUnsPair *adr2os,
int num_addrs) {
674 int hier_depth = adr2os[0].first.depth;
676 for (
int i = hier_depth - 1; i >= 0; --i) {
678 for (
int j = 0; j < num_addrs; ++j) {
679 int next = adr2os[j].first.childNums[i];
683 numPerLevel[level] = max + 1;
689 : maxLevels(7), depth(1), uninitialized(not_initialized), resizing(0) {}
692 if (!uninitialized && numPerLevel) {
693 __kmp_free(numPerLevel);
695 uninitialized = not_initialized;
699 void init(AddrUnsPair *adr2os,
int num_addrs) {
700 kmp_int8 bool_result = KMP_COMPARE_AND_STORE_ACQ8(
701 &uninitialized, not_initialized, initializing);
702 if (bool_result == 0) {
703 while (TCR_1(uninitialized) != initialized)
707 KMP_DEBUG_ASSERT(bool_result == 1);
717 (kmp_uint32 *)__kmp_allocate(maxLevels * 2 *
sizeof(kmp_uint32));
718 skipPerLevel = &(numPerLevel[maxLevels]);
719 for (kmp_uint32 i = 0; i < maxLevels;
727 qsort(adr2os, num_addrs,
sizeof(*adr2os),
728 __kmp_affinity_cmp_Address_labels);
729 deriveLevels(adr2os, num_addrs);
731 numPerLevel[0] = maxLeaves;
732 numPerLevel[1] = num_addrs / maxLeaves;
733 if (num_addrs % maxLeaves)
737 base_num_threads = num_addrs;
738 for (
int i = maxLevels - 1; i >= 0;
740 if (numPerLevel[i] != 1 || depth > 1)
743 kmp_uint32 branch = minBranch;
744 if (numPerLevel[0] == 1)
745 branch = num_addrs / maxLeaves;
746 if (branch < minBranch)
748 for (kmp_uint32 d = 0; d < depth - 1; ++d) {
749 while (numPerLevel[d] > branch ||
750 (d == 0 && numPerLevel[d] > maxLeaves)) {
751 if (numPerLevel[d] & 1)
753 numPerLevel[d] = numPerLevel[d] >> 1;
754 if (numPerLevel[d + 1] == 1)
756 numPerLevel[d + 1] = numPerLevel[d + 1] << 1;
758 if (numPerLevel[0] == 1) {
759 branch = branch >> 1;
765 for (kmp_uint32 i = 1; i < depth; ++i)
766 skipPerLevel[i] = numPerLevel[i - 1] * skipPerLevel[i - 1];
768 for (kmp_uint32 i = depth; i < maxLevels; ++i)
769 skipPerLevel[i] = 2 * skipPerLevel[i - 1];
771 uninitialized = initialized;
775 void resize(kmp_uint32 nproc) {
776 kmp_int8 bool_result = KMP_COMPARE_AND_STORE_ACQ8(&resizing, 0, 1);
777 while (bool_result == 0) {
779 if (nproc <= base_num_threads)
782 bool_result = KMP_COMPARE_AND_STORE_ACQ8(&resizing, 0, 1);
784 KMP_DEBUG_ASSERT(bool_result != 0);
785 if (nproc <= base_num_threads)
789 kmp_uint32 old_sz = skipPerLevel[depth - 1];
790 kmp_uint32 incs = 0, old_maxLevels = maxLevels;
792 for (kmp_uint32 i = depth; i < maxLevels && nproc > old_sz; ++i) {
793 skipPerLevel[i] = 2 * skipPerLevel[i - 1];
794 numPerLevel[i - 1] *= 2;
798 if (nproc > old_sz) {
799 while (nproc > old_sz) {
807 kmp_uint32 *old_numPerLevel = numPerLevel;
808 kmp_uint32 *old_skipPerLevel = skipPerLevel;
809 numPerLevel = skipPerLevel = NULL;
811 (kmp_uint32 *)__kmp_allocate(maxLevels * 2 *
sizeof(kmp_uint32));
812 skipPerLevel = &(numPerLevel[maxLevels]);
815 for (kmp_uint32 i = 0; i < old_maxLevels;
817 numPerLevel[i] = old_numPerLevel[i];
818 skipPerLevel[i] = old_skipPerLevel[i];
822 for (kmp_uint32 i = old_maxLevels; i < maxLevels;
829 __kmp_free(old_numPerLevel);
833 for (kmp_uint32 i = old_maxLevels; i < maxLevels; ++i)
834 skipPerLevel[i] = 2 * skipPerLevel[i - 1];
836 base_num_threads = nproc;
840 #endif // KMP_AFFINITY_H