diff --git a/doc/main/tbb_userguide/Guiding_Task_Scheduler_Execution.rst b/doc/main/tbb_userguide/Guiding_Task_Scheduler_Execution.rst index 7204d41cb0..075945463c 100644 --- a/doc/main/tbb_userguide/Guiding_Task_Scheduler_Execution.rst +++ b/doc/main/tbb_userguide/Guiding_Task_Scheduler_Execution.rst @@ -40,7 +40,9 @@ The execution on systems with non-uniform memory access (NUMA https://en.wikiped may cause a performance penalty if threads from one NUMA node access the memory allocated on a different NUMA node. To reduce this overhead, the work may be divided among several ``task_arena`` instances, whose execution preference is set to different NUMA nodes. To set execution preference, -assign a NUMA node identifier to the ``task_arena::constraints::numa_id`` field. +assign a NUMA node identifier to the ``task_arena::constraints::numa_id`` field or use +``tbb::create_numa_task_arenas`` function to create the set of task arenas, +one per NUMA node on the system. .. literalinclude:: ./examples/guiding_task_scheduler_execution.cpp :language: c++ diff --git a/doc/main/tbb_userguide/examples/guiding_task_scheduler_execution.cpp b/doc/main/tbb_userguide/examples/guiding_task_scheduler_execution.cpp index fb211f209a..466467762a 100644 --- a/doc/main/tbb_userguide/examples/guiding_task_scheduler_execution.cpp +++ b/doc/main/tbb_userguide/examples/guiding_task_scheduler_execution.cpp @@ -22,26 +22,21 @@ void set_numa_node_example() { /*begin_set_numa_node_example*/ - std::vector numa_nodes = tbb::info::numa_nodes(); - std::vector arenas(numa_nodes.size()); - std::vector task_groups(numa_nodes.size()); - - // Since the library creates one less worker thread than the number of total cores, - // all but one of the arenas are created without reserving a slot for an external thread, - // allowing them to be fully populated. - for(unsigned j = 1; j < numa_nodes.size(); j++) { - arenas[j].initialize(tbb::task_arena::constraints(numa_nodes[j]), /*reserved_slots*/ 0); - arenas[j].enqueue([](){/*some parallel work*/}, task_groups[j]); + std::vector numa_arenas = tbb::create_numa_task_arenas(); + std::vector task_groups(numa_arenas.size()); + + // Enqueue work to all but the first arena + for(unsigned j = 1; j < numa_arenas.size(); j++) { + numa_arenas[j].enqueue([](){/*some parallel work*/}, task_groups[j]); } - // The main thread executes in the arena with the reserved slot. - arenas[0].initialize(tbb::task_arena::constraints(numa_nodes[0])); - arenas[0].execute([&task_groups](){ - task_groups[0].run([](){/*some parallel work*/}); + // The main thread directly executes the work in the remaining arena + numa_arenas[0].execute([&task_groups](){ + task_groups[0].run_and_wait([](){/*some parallel work*/}); }); - for(unsigned j = 0; j < numa_nodes.size(); j++) { - arenas[j].wait_for(task_groups[j]); + for(unsigned j = 1; j < numa_arenas.size(); j++) { + numa_arenas[j].wait_for(task_groups[j]); } /*end_set_numa_node_example*/ }