40 #include <rtems/score/schedulerstrongapa.h> 41 #include <rtems/score/schedulersmpimpl.h> 42 #include <rtems/score/assert.h> 63 Scheduler_Context *context,
65 Priority_Control new_priority
68 Scheduler_SMP_Node *smp_node;
71 smp_node = _Scheduler_SMP_Node_downcast( node );
72 _Scheduler_SMP_Node_update_priority( smp_node, new_priority );
80 Scheduler_Context *context
84 const Chain_Node *tail;
89 tail = _Chain_Immutable_tail( &self->Ready );
90 next = _Chain_First( &self->Ready );
92 while ( next != tail ) {
96 _Scheduler_SMP_Node_state( &node->
Base.Base ) ==
97 SCHEDULER_SMP_NODE_READY
102 next = _Chain_Next( next );
110 Scheduler_Node *executing,
111 const Per_CPU_Control *cpu
114 self->CPU[ _Per_CPU_Get_index( cpu ) ].executing = executing;
119 const Per_CPU_Control *cpu
122 return self->CPU[ _Per_CPU_Get_index( cpu ) ].executing;
126 Scheduler_Context *context,
127 Scheduler_Node *scheduled_base,
128 Scheduler_Node *victim_base,
129 Per_CPU_Control *victim_cpu
142 _Scheduler_SMP_Allocate_processor_exact(
144 &( scheduled->
Base.Base ),
160 Scheduler_Node *highest_ready;
162 const Chain_Node *tail;
165 Priority_Control min_priority_num;
166 Priority_Control curr_priority;
167 Per_CPU_Control *assigned_cpu;
168 Scheduler_SMP_Node_state curr_state;
169 Per_CPU_Control *curr_CPU;
176 min_priority_num = UINT64_MAX;
178 while ( front <= rear ) {
179 curr_CPU = CPU[ front++ ].
cpu;
181 tail = _Chain_Immutable_tail( &self->Ready );
182 next = _Chain_First( &self->Ready );
184 while ( next != tail ) {
188 _Processor_mask_Is_set( &node->
Affinity, _Per_CPU_Get_index( curr_CPU ) )
190 curr_state = _Scheduler_SMP_Node_state( &node->
Base.Base );
192 if ( curr_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
193 assigned_cpu = _Thread_Get_CPU( node->
Base.Base.user );
195 if ( CPU[ _Per_CPU_Get_index( assigned_cpu ) ].visited ==
false ) {
196 CPU[ ++rear ].
cpu = assigned_cpu;
197 CPU[ _Per_CPU_Get_index( assigned_cpu ) ].
visited =
true;
207 }
else if ( curr_state == SCHEDULER_SMP_NODE_READY ) {
208 curr_priority = _Scheduler_Node_get_priority( &node->
Base.Base );
209 curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority );
212 min_priority_num == UINT64_MAX ||
213 curr_priority < min_priority_num
215 min_priority_num = curr_priority;
216 highest_ready = &node->
Base.Base;
226 next = _Chain_Next( next );
230 return highest_ready;
234 Scheduler_Context *context,
235 Scheduler_Node *ready_to_scheduled
238 Priority_Control insert_priority;
240 insert_priority = _Scheduler_SMP_Node_priority( ready_to_scheduled );
241 insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
242 _Scheduler_SMP_Insert_scheduled(
256 Scheduler_Context *context,
257 Scheduler_Node *filter
261 Per_CPU_Control *filter_cpu;
263 Scheduler_Node *highest_ready;
264 Scheduler_Node *curr_node;
265 Scheduler_Node *next_node;
277 filter_cpu = _Thread_Get_CPU( filter->user );
279 cpu_max = _SMP_Get_processor_maximum();
281 for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {
282 CPU[ cpu_index ].
visited =
false;
285 CPU[ ++rear ].
cpu = filter_cpu;
286 CPU[ _Per_CPU_Get_index( filter_cpu ) ].
visited =
true;
294 if ( highest_ready != filter ) {
306 curr_node = &node->
Base.Base;
312 (void) _Scheduler_SMP_Preempt(
319 if ( curr_node == highest_ready ) {
329 curr_node = &node->
Base.Base;
330 highest_ready = curr_node;
333 return highest_ready;
340 Scheduler_Context *context,
341 Scheduler_Node *filter_base
346 Scheduler_Node *curr_node;
347 Scheduler_Node *lowest_scheduled = NULL;
348 Priority_Control max_priority_num;
349 Priority_Control curr_priority;
354 max_priority_num = 0;
358 _Assert( !_Processor_mask_Is_zero( &filter_strong_node->
Affinity ) );
359 cpu_max = _SMP_Get_processor_maximum();
361 for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {
363 if ( _Processor_mask_Is_set( &filter_strong_node->
Affinity, cpu_index ) ) {
364 Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );
366 if ( _Per_CPU_Is_processor_online( cpu ) ) {
368 curr_priority = _Scheduler_Node_get_priority( curr_node );
369 curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority );
371 if ( curr_priority > max_priority_num ) {
372 lowest_scheduled = curr_node;
373 max_priority_num = curr_priority;
379 _Assert( lowest_scheduled != NULL );
380 return lowest_scheduled;
384 Scheduler_Context *context,
385 Scheduler_Node *node_to_extract
394 _Scheduler_SMP_Extract_from_scheduled( &self->Base.Base, &node->
Base.Base );
399 Scheduler_Context *context,
400 Scheduler_Node *node_to_extract
409 if( !_Chain_Is_node_off_chain( &node->
Ready_node ) ) {
410 _Chain_Extract_unprotected( &node->
Ready_node );
417 Scheduler_Context *context,
418 Scheduler_Node *node_base,
419 Priority_Control insert_priority
428 if( _Chain_Is_node_off_chain( &node->
Ready_node ) ) {
429 _Chain_Append_unprotected( &self->Ready, &node->
Ready_node );
431 _Chain_Extract_unprotected( &node->
Ready_node );
433 _Chain_Append_unprotected( &self->Ready, &node->
Ready_node );
438 Scheduler_Context *context,
439 Scheduler_Node *scheduled_to_ready
442 Priority_Control insert_priority;
444 if( !_Chain_Is_node_off_chain( &scheduled_to_ready->Node.Chain ) ) {
445 _Scheduler_SMP_Extract_from_scheduled( context, scheduled_to_ready );
448 insert_priority = _Scheduler_SMP_Node_priority( scheduled_to_ready );
461 Per_CPU_Control **cpu_to_preempt
464 Scheduler_Node *lowest_reachable;
465 Priority_Control max_priority_num;
468 Thread_Control *curr_thread;
469 Per_CPU_Control *curr_CPU;
470 Priority_Control curr_priority;
471 Scheduler_Node *curr_node;
475 max_priority_num = 0;
477 cpu_max = _SMP_Get_processor_maximum();
479 while ( front <= rear ) {
480 curr_CPU = CPU[ front ].
cpu;
484 curr_thread = curr_node->user;
486 curr_priority = _Scheduler_Node_get_priority( curr_node );
487 curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority );
491 if ( curr_priority > max_priority_num ) {
492 lowest_reachable = curr_node;
493 max_priority_num = curr_priority;
494 *cpu_to_preempt = curr_CPU;
497 if ( !curr_thread->is_idle ) {
498 for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {
499 if ( _Processor_mask_Is_set( &curr_strong_node->
Affinity, cpu_index ) ) {
501 Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );
503 _Per_CPU_Is_processor_online( cpu ) &&
504 CPU[ cpu_index ].visited ==
false )
507 CPU[ rear ].
cpu = cpu;
508 CPU[ cpu_index ].
visited =
true;
516 return lowest_reachable;
520 Scheduler_Context *context,
521 Scheduler_Node *lowest_reachable,
522 Scheduler_Node *node,
523 Priority_Control insert_priority,
524 Per_CPU_Control *cpu_to_preempt
528 Priority_Control node_priority;
529 Priority_Control lowest_priority;
531 Scheduler_Node *curr_node;
533 Per_CPU_Control *curr_CPU;
535 Scheduler_Node *next_node;
540 node_priority = _Scheduler_Node_get_priority( node );
541 node_priority = SCHEDULER_PRIORITY_PURIFY( node_priority );
543 if( lowest_reachable == NULL ) {
546 lowest_priority = UINT64_MAX;
548 lowest_priority = _Scheduler_Node_get_priority( lowest_reachable );
549 lowest_priority = SCHEDULER_PRIORITY_PURIFY( lowest_priority );
552 if ( lowest_priority > node_priority ) {
559 curr_node = CPU[ _Per_CPU_Get_index( cpu_to_preempt ) ].
preempting_node;
564 while ( curr_node != node ) {
565 curr_CPU = _Thread_Get_CPU( curr_node->user );
574 node_priority = _Scheduler_Node_get_priority( curr_node );
575 node_priority = SCHEDULER_PRIORITY_PURIFY( node_priority );
577 _Scheduler_SMP_Enqueue_to_scheduled(
582 _Scheduler_SMP_Insert_scheduled,
587 curr_node = next_node;
590 while ( curr_node != lowest_reachable ) {
594 _Scheduler_SMP_Preempt(
601 curr_node = next_node;
624 Scheduler_Context *context,
625 Scheduler_Node *node,
626 Priority_Control insert_priority
633 Per_CPU_Control *cpu_to_preempt;
634 Scheduler_Node *lowest_reachable;
646 cpu_max = _SMP_Get_processor_maximum();
649 for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {
650 CPU[ cpu_index ].
visited =
false;
653 if ( _Processor_mask_Is_set( &strong_node->
Affinity, cpu_index ) ) {
654 Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );
656 if ( _Per_CPU_Is_processor_online( cpu ) ) {
658 CPU[ rear ].
cpu = cpu;
659 CPU[ cpu_index ].
visited =
true;
682 Scheduler_Context *context,
683 Scheduler_Node *node,
684 Priority_Control insert_priority
687 return _Scheduler_SMP_Enqueue_scheduled(
691 _Scheduler_SMP_Priority_less_equal,
695 _Scheduler_SMP_Insert_scheduled,
702 Scheduler_Context *context,
703 Thread_Control *the_thread,
707 return _Scheduler_SMP_Ask_for_help(
711 _Scheduler_SMP_Priority_less_equal,
713 _Scheduler_SMP_Insert_scheduled,
721 Scheduler_Context *context,
722 Scheduler_Node *node_base,
729 node->
Affinity = *( (
const Processor_mask *) arg );
737 _Scheduler_SMP_Initialize( &self->Base );
738 _Chain_Initialize_empty( &self->Ready );
742 const Scheduler_Control *scheduler,
743 Thread_Control *thread,
747 Scheduler_Context *context = _Scheduler_Get_context( scheduler );
749 _Scheduler_SMP_Yield(
760 const Scheduler_Control *scheduler,
761 Thread_Control *thread,
765 Scheduler_Context *context = _Scheduler_Get_context( scheduler );
773 _Scheduler_SMP_Block(
786 const Scheduler_Control *scheduler,
787 Thread_Control *thread,
791 Scheduler_Context *context = _Scheduler_Get_context( scheduler );
793 _Scheduler_SMP_Unblock(
803 const Scheduler_Control *scheduler,
804 Thread_Control *thread,
808 Scheduler_Context *context = _Scheduler_Get_context( scheduler );
810 _Scheduler_SMP_Update_priority(
823 const Scheduler_Control *scheduler,
824 Thread_Control *the_thread,
828 Scheduler_Context *context = _Scheduler_Get_context( scheduler );
838 const Scheduler_Control *scheduler,
839 Thread_Control *the_thread,
843 Scheduler_Context *context = _Scheduler_Get_context( scheduler );
845 _Scheduler_SMP_Reconsider_help_request(
854 const Scheduler_Control *scheduler,
855 Thread_Control *the_thread,
856 Scheduler_Node *node,
857 Thread_Scheduler_state next_state
860 Scheduler_Context *context = _Scheduler_Get_context( scheduler );
862 _Scheduler_SMP_Withdraw_node(
875 Scheduler_Context *context,
876 Scheduler_Node *idle_base,
887 const Scheduler_Control *scheduler,
891 Scheduler_Context *context = _Scheduler_Get_context( scheduler );
893 _Scheduler_SMP_Add_processor(
903 const Scheduler_Control *scheduler,
904 Thread_Control *idle,
908 Scheduler_Context *context;
910 context = _Scheduler_Get_context( scheduler );
912 _Scheduler_SMP_Do_start_idle(
921 const Scheduler_Control *scheduler,
925 Scheduler_Context *context = _Scheduler_Get_context( scheduler );
927 return _Scheduler_SMP_Remove_processor(
936 const Scheduler_Control *scheduler,
937 Scheduler_Node *node,
938 Thread_Control *the_thread,
939 Priority_Control priority
942 Scheduler_SMP_Node *smp_node;
945 smp_node = _Scheduler_SMP_Node_downcast( node );
948 _Scheduler_SMP_Node_initialize( scheduler, smp_node, the_thread, priority );
950 _Processor_mask_Assign(
952 _SMP_Get_online_processors()
957 const Scheduler_Control *scheduler,
958 Thread_Control *thread,
959 Scheduler_Node *node_base,
960 const Processor_mask *affinity
963 Scheduler_Context *context;
965 Processor_mask local_affinity;
967 context = _Scheduler_Get_context( scheduler );
968 _Processor_mask_And( &local_affinity, &context->Processors, affinity );
970 if ( _Processor_mask_Is_zero( &local_affinity ) ) {
976 if ( _Processor_mask_Is_equal( &node->
Affinity, affinity ) )
979 _Processor_mask_Assign( &node->
Affinity, &local_affinity );
981 _Scheduler_SMP_Set_affinity(
static void _Scheduler_strong_APA_Set_scheduled(Scheduler_strong_APA_Context *self, Scheduler_Node *executing, const Per_CPU_Control *cpu)
Per_CPU_Control * cpu
CPU in a queue.
Scheduler_Node * preempting_node
The node that would preempt this CPU.
void _Scheduler_strong_APA_Add_processor(const Scheduler_Control *scheduler, Thread_Control *idle)
Adds the idle thread to a processor.
Chain_Node Ready_node
Chain node for Scheduler_strong_APA_Context::Ready.
static void _Scheduler_strong_APA_Do_set_affinity(Scheduler_Context *context, Scheduler_Node *node_base, void *arg)
static Scheduler_Node * _Scheduler_strong_APA_Get_lowest_reachable(Scheduler_strong_APA_Context *self, uint32_t front, uint32_t rear, Per_CPU_Control **cpu_to_preempt)
static Scheduler_strong_APA_Context * _Scheduler_strong_APA_Get_context(const Scheduler_Control *scheduler)
static Scheduler_Node * _Scheduler_strong_APA_Get_lowest_scheduled(Scheduler_Context *context, Scheduler_Node *filter_base)
CPU related variables and a CPU_Control to implement BFS.
bool _Scheduler_strong_APA_Ask_for_help(const Scheduler_Control *scheduler, Thread_Control *the_thread, Scheduler_Node *node)
Asks for help.
static void _Scheduler_strong_APA_Extract_from_scheduled(Scheduler_Context *context, Scheduler_Node *node_to_extract)
static void _Scheduler_strong_APA_Allocate_processor(Scheduler_Context *context, Scheduler_Node *scheduled_base, Scheduler_Node *victim_base, Per_CPU_Control *victim_cpu)
void _Scheduler_strong_APA_Node_initialize(const Scheduler_Control *scheduler, Scheduler_Node *node, Thread_Control *the_thread, Priority_Control priority)
Initializes the node with the given priority.
static void _Scheduler_strong_APA_Do_update(Scheduler_Context *context, Scheduler_Node *node, Priority_Control new_priority)
bool visited
Whether or not this cpu has been added to the queue (visited in BFS).
static void _Scheduler_strong_APA_Register_idle(Scheduler_Context *context, Scheduler_Node *idle_base, Per_CPU_Control *cpu)
Scheduler_SMP_Node Base
SMP scheduler node.
static bool _Scheduler_strong_APA_Do_ask_for_help(Scheduler_Context *context, Thread_Control *the_thread, Scheduler_Node *node)
static void _Scheduler_strong_APA_Insert_ready(Scheduler_Context *context, Scheduler_Node *node_base, Priority_Control insert_priority)
Thread_Control * _Scheduler_strong_APA_Remove_processor(const Scheduler_Control *scheduler, Per_CPU_Control *cpu)
Removes an idle thread from the given cpu.
static bool _Scheduler_strong_APA_Has_ready(Scheduler_Context *context)
void _Scheduler_strong_APA_Withdraw_node(const Scheduler_Control *scheduler, Thread_Control *the_thread, Scheduler_Node *node, Thread_Scheduler_state next_state)
Withdraws the node.
static bool _Scheduler_strong_APA_Do_enqueue(Scheduler_Context *context, Scheduler_Node *lowest_reachable, Scheduler_Node *node, Priority_Control insert_priority, Per_CPU_Control *cpu_to_preempt)
static Scheduler_Node * _Scheduler_strong_APA_Find_highest_ready(Scheduler_strong_APA_Context *self, uint32_t front, uint32_t rear)
static void _Scheduler_strong_APA_Move_from_ready_to_scheduled(Scheduler_Context *context, Scheduler_Node *ready_to_scheduled)
Per_CPU_Control * cpu_to_preempt
CPU that this node would preempt in the backtracking part of _Scheduler_strong_APA_Get_highest_ready ...
void _Scheduler_strong_APA_Start_idle(const Scheduler_Control *scheduler, Thread_Control *idle, Per_CPU_Control *cpu)
Starts an idle thread.
Scheduler context and node definition for Strong APA scheduler.
void _Scheduler_strong_APA_Update_priority(const Scheduler_Control *scheduler, Thread_Control *thread, Scheduler_Node *node)
Updates the priority of the node.
Processor_mask Affinity
The associated affinity set of this node.
static void _Scheduler_strong_APA_Extract_from_ready(Scheduler_Context *context, Scheduler_Node *node_to_extract)
Scheduler node specialization for Strong APA schedulers.
#define STRONG_SCHEDULER_NODE_OF_CHAIN(node)
void _Scheduler_strong_APA_Block(const Scheduler_Control *scheduler, Thread_Control *thread, Scheduler_Node *node)
Blocks the thread.
static Scheduler_Node * _Scheduler_strong_APA_Get_scheduled(const Scheduler_strong_APA_Context *self, const Per_CPU_Control *cpu)
void _Scheduler_strong_APA_Initialize(const Scheduler_Control *scheduler)
Initializes the scheduler.
static Scheduler_strong_APA_Node * _Scheduler_strong_APA_Node_downcast(Scheduler_Node *node)
static void _Scheduler_strong_APA_Move_from_scheduled_to_ready(Scheduler_Context *context, Scheduler_Node *scheduled_to_ready)
static Scheduler_strong_APA_Context * _Scheduler_strong_APA_Get_self(Scheduler_Context *context)
static bool _Scheduler_strong_APA_Enqueue(Scheduler_Context *context, Scheduler_Node *node, Priority_Control insert_priority)
bool _Scheduler_strong_APA_Set_affinity(const Scheduler_Control *scheduler, Thread_Control *thread, Scheduler_Node *node_base, const Processor_mask *affinity)
Sets the affinity .
void _Scheduler_strong_APA_Unblock(const Scheduler_Control *scheduler, Thread_Control *thread, Scheduler_Node *node)
Unblocks the thread.
static bool _Scheduler_strong_APA_Enqueue_scheduled(Scheduler_Context *context, Scheduler_Node *node, Priority_Control insert_priority)
static Scheduler_Node * _Scheduler_strong_APA_Get_highest_ready(Scheduler_Context *context, Scheduler_Node *filter)
void _Scheduler_strong_APA_Reconsider_help_request(const Scheduler_Control *scheduler, Thread_Control *the_thread, Scheduler_Node *node)
Reconsiders help request.
void _Scheduler_strong_APA_Yield(const Scheduler_Control *scheduler, Thread_Control *thread, Scheduler_Node *node)
Performs a yield operation.