Scheduler Strong APA  Release
schedulerstrongapa.c
Go to the documentation of this file.
1 
9 /* SPDX-License-Identifier: BSD-2-Clause
10  *
11  * Copyright (C) 2020 Richi Dubey
12  * Copyright (c) 2013, 2016 embedded brains GmbH
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  * notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  * notice, this list of conditions and the following disclaimer in the
21  * documentation and/or other materials provided with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 #ifdef HAVE_CONFIG_H
37 #include "config.h"
38 #endif
39 
40 #include <rtems/score/schedulerstrongapa.h>
41 #include <rtems/score/schedulersmpimpl.h>
42 #include <rtems/score/assert.h>
43 
44 static inline Scheduler_strong_APA_Context *
45 _Scheduler_strong_APA_Get_context( const Scheduler_Control *scheduler )
46 {
47  return (Scheduler_strong_APA_Context *) _Scheduler_Get_context( scheduler );
48 }
49 
50 static inline Scheduler_strong_APA_Context *
51 _Scheduler_strong_APA_Get_self( Scheduler_Context *context )
52 {
53  return (Scheduler_strong_APA_Context *) context;
54 }
55 
56 static inline Scheduler_strong_APA_Node *
57 _Scheduler_strong_APA_Node_downcast( Scheduler_Node *node )
58 {
59  return (Scheduler_strong_APA_Node *) node;
60 }
61 
63  Scheduler_Context *context,
64  Scheduler_Node *node,
65  Priority_Control new_priority
66 )
67 {
68  Scheduler_SMP_Node *smp_node;
69  (void) context;
70 
71  smp_node = _Scheduler_SMP_Node_downcast( node );
72  _Scheduler_SMP_Node_update_priority( smp_node, new_priority );
73 }
74 
75 /*
76  * Returns true if the Strong APA scheduler has ready nodes
77  * available for scheduling.
78  */
79 static inline bool _Scheduler_strong_APA_Has_ready(
80  Scheduler_Context *context
81 )
82 {
84  const Chain_Node *tail;
85  Chain_Node *next;
87 
88  self = _Scheduler_strong_APA_Get_self( context );
89  tail = _Chain_Immutable_tail( &self->Ready );
90  next = _Chain_First( &self->Ready );
91 
92  while ( next != tail ) {
94 
95  if (
96  _Scheduler_SMP_Node_state( &node->Base.Base ) ==
97  SCHEDULER_SMP_NODE_READY
98  ) {
99  return true;
100  }
101 
102  next = _Chain_Next( next );
103  }
104 
105  return false;
106 }
107 
110  Scheduler_Node *executing,
111  const Per_CPU_Control *cpu
112 )
113 {
114  self->CPU[ _Per_CPU_Get_index( cpu ) ].executing = executing;
115 }
116 
117 static inline Scheduler_Node *_Scheduler_strong_APA_Get_scheduled(
118  const Scheduler_strong_APA_Context *self,
119  const Per_CPU_Control *cpu
120 )
121 {
122  return self->CPU[ _Per_CPU_Get_index( cpu ) ].executing;
123 }
124 
126  Scheduler_Context *context,
127  Scheduler_Node *scheduled_base,
128  Scheduler_Node *victim_base,
129  Per_CPU_Control *victim_cpu
130 )
131 {
132  Scheduler_strong_APA_Node *scheduled;
134 
135  (void) victim_base;
136 
137  scheduled = _Scheduler_strong_APA_Node_downcast( scheduled_base );
138  self = _Scheduler_strong_APA_Get_self( context );
139 
140  _Scheduler_strong_APA_Set_scheduled( self, scheduled_base, victim_cpu );
141 
142  _Scheduler_SMP_Allocate_processor_exact(
143  context,
144  &( scheduled->Base.Base ),
145  NULL,
146  victim_cpu
147  );
148 }
149 
150 /*
151  * Finds and returns the highest ready node present by accessing the
152  * _Strong_APA_Context->CPU with front and rear values.
153  */
154 static inline Scheduler_Node * _Scheduler_strong_APA_Find_highest_ready(
156  uint32_t front,
157  uint32_t rear
158 )
159 {
160  Scheduler_Node *highest_ready;
162  const Chain_Node *tail;
163  Chain_Node *next;
165  Priority_Control min_priority_num;
166  Priority_Control curr_priority;
167  Per_CPU_Control *assigned_cpu;
168  Scheduler_SMP_Node_state curr_state;
169  Per_CPU_Control *curr_CPU;
170 
171  CPU = self->CPU;
172  /*
173  * When the first task accessed has nothing to compare its priority against.
174  * So, it is the task with the highest priority witnessed so far.
175  */
176  min_priority_num = UINT64_MAX;
177 
178  while ( front <= rear ) {
179  curr_CPU = CPU[ front++ ].cpu;
180 
181  tail = _Chain_Immutable_tail( &self->Ready );
182  next = _Chain_First( &self->Ready );
183 
184  while ( next != tail ) {
186  /* Check if the curr_CPU is in the affinity set of the node. */
187  if (
188  _Processor_mask_Is_set( &node->Affinity, _Per_CPU_Get_index( curr_CPU ) )
189  ) {
190  curr_state = _Scheduler_SMP_Node_state( &node->Base.Base );
191 
192  if ( curr_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
193  assigned_cpu = _Thread_Get_CPU( node->Base.Base.user );
194 
195  if ( CPU[ _Per_CPU_Get_index( assigned_cpu ) ].visited == false ) {
196  CPU[ ++rear ].cpu = assigned_cpu;
197  CPU[ _Per_CPU_Get_index( assigned_cpu ) ].visited = true;
198  /*
199  * The curr CPU of the queue invoked this node to add its CPU
200  * that it is executing on to the queue. So this node might get
201  * preempted because of the invoker curr_CPU and this curr_CPU
202  * is the CPU that node should preempt in case this node
203  * gets preempted.
204  */
205  node->cpu_to_preempt = curr_CPU;
206  }
207  } else if ( curr_state == SCHEDULER_SMP_NODE_READY ) {
208  curr_priority = _Scheduler_Node_get_priority( &node->Base.Base );
209  curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority );
210 
211  if (
212  min_priority_num == UINT64_MAX ||
213  curr_priority < min_priority_num
214  ) {
215  min_priority_num = curr_priority;
216  highest_ready = &node->Base.Base;
217  /*
218  * In case curr_CPU is filter_CPU, we need to store the
219  * cpu_to_preempt value so that we go back to SMP_*
220  * function, rather than preempting the node ourselves.
221  */
222  node->cpu_to_preempt = curr_CPU;
223  }
224  }
225  }
226  next = _Chain_Next( next );
227  }
228  }
229 
230  return highest_ready;
231 }
232 
234  Scheduler_Context *context,
235  Scheduler_Node *ready_to_scheduled
236 )
237 {
238  Priority_Control insert_priority;
239 
240  insert_priority = _Scheduler_SMP_Node_priority( ready_to_scheduled );
241  insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
242  _Scheduler_SMP_Insert_scheduled(
243  context,
244  ready_to_scheduled,
245  insert_priority
246  );
247 }
248 
249 /*
250  * Implement the BFS Algorithm for task departure to get the highest ready task
251  * for a particular CPU, returns the highest ready Scheduler_Node
252  * Scheduler_Node filter here points to the victim node that is blocked
253  * resulting which this function is called.
254  */
255 static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready(
256  Scheduler_Context *context,
257  Scheduler_Node *filter
258 )
259 {
261  Per_CPU_Control *filter_cpu;
263  Scheduler_Node *highest_ready;
264  Scheduler_Node *curr_node;
265  Scheduler_Node *next_node;
267  uint32_t front;
268  uint32_t rear;
269  uint32_t cpu_max;
270  uint32_t cpu_index;
271 
272  self = _Scheduler_strong_APA_Get_self( context );
273  /* Denotes front and rear of the queue */
274  front = 0;
275  rear = -1;
276 
277  filter_cpu = _Thread_Get_CPU( filter->user );
278  CPU = self->CPU;
279  cpu_max = _SMP_Get_processor_maximum();
280 
281  for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {
282  CPU[ cpu_index ].visited = false;
283  }
284 
285  CPU[ ++rear ].cpu = filter_cpu;
286  CPU[ _Per_CPU_Get_index( filter_cpu ) ].visited = true;
287 
289  self,
290  front,
291  rear
292  );
293 
294  if ( highest_ready != filter ) {
295  /*
296  * Backtrack on the path from
297  * filter_cpu to highest_ready, shifting along every task.
298  */
299 
300  node = _Scheduler_strong_APA_Node_downcast( highest_ready );
301  /*
302  * Highest ready is not just directly reachable from the victim cpu
303  * So there is need for task shifting.
304  */
305  while ( node->cpu_to_preempt != filter_cpu ) {
306  curr_node = &node->Base.Base;
308  self,
309  node->cpu_to_preempt
310  );
311 
312  (void) _Scheduler_SMP_Preempt(
313  context,
314  curr_node,
315  next_node,
317  );
318 
319  if ( curr_node == highest_ready ) {
321  }
322 
323  node = _Scheduler_strong_APA_Node_downcast( next_node );
324  }
325  /*
326  * To save the last node so that the caller SMP_* function
327  * can do the allocation
328  */
329  curr_node = &node->Base.Base;
330  highest_ready = curr_node;
331  }
332 
333  return highest_ready;
334 }
335 
336 /*
337  * Checks the lowest scheduled directly reachable task
338  */
339 static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled(
340  Scheduler_Context *context,
341  Scheduler_Node *filter_base
342 )
343 {
344  uint32_t cpu_max;
345  uint32_t cpu_index;
346  Scheduler_Node *curr_node;
347  Scheduler_Node *lowest_scheduled = NULL;
348  Priority_Control max_priority_num;
349  Priority_Control curr_priority;
350  Scheduler_strong_APA_Node *filter_strong_node;
352 
353  self = _Scheduler_strong_APA_Get_self( context );
354  max_priority_num = 0; /* Max (Lowest) priority encountered so far */
355  filter_strong_node = _Scheduler_strong_APA_Node_downcast( filter_base );
356 
357  /* lowest_scheduled is NULL if affinity of a node is 0 */
358  _Assert( !_Processor_mask_Is_zero( &filter_strong_node->Affinity ) );
359  cpu_max = _SMP_Get_processor_maximum();
360 
361  for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {
362  /* Checks if the CPU is in the affinity set of filter_strong_node */
363  if ( _Processor_mask_Is_set( &filter_strong_node->Affinity, cpu_index ) ) {
364  Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );
365 
366  if ( _Per_CPU_Is_processor_online( cpu ) ) {
367  curr_node = _Scheduler_strong_APA_Get_scheduled( self, cpu );
368  curr_priority = _Scheduler_Node_get_priority( curr_node );
369  curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority );
370 
371  if ( curr_priority > max_priority_num ) {
372  lowest_scheduled = curr_node;
373  max_priority_num = curr_priority;
374  }
375  }
376  }
377  }
378 
379  _Assert( lowest_scheduled != NULL );
380  return lowest_scheduled;
381 }
382 
384  Scheduler_Context *context,
385  Scheduler_Node *node_to_extract
386 )
387 {
390 
391  self = _Scheduler_strong_APA_Get_self( context );
392  node = _Scheduler_strong_APA_Node_downcast( node_to_extract );
393 
394  _Scheduler_SMP_Extract_from_scheduled( &self->Base.Base, &node->Base.Base );
395  /* Not removing it from Ready since the node could go in the READY state */
396 }
397 
399  Scheduler_Context *context,
400  Scheduler_Node *node_to_extract
401 )
402 {
405 
406  self = _Scheduler_strong_APA_Get_self( context );
407  node = _Scheduler_strong_APA_Node_downcast( node_to_extract );
408 
409  if( !_Chain_Is_node_off_chain( &node->Ready_node ) ) {
410  _Chain_Extract_unprotected( &node->Ready_node );
411  _Chain_Set_off_chain( &node->Ready_node );
412  }
413 
414 }
415 
417  Scheduler_Context *context,
418  Scheduler_Node *node_base,
419  Priority_Control insert_priority
420 )
421 {
424 
425  self = _Scheduler_strong_APA_Get_self( context );
426  node = _Scheduler_strong_APA_Node_downcast( node_base );
427 
428  if( _Chain_Is_node_off_chain( &node->Ready_node ) ) {
429  _Chain_Append_unprotected( &self->Ready, &node->Ready_node );
430  } else {
431  _Chain_Extract_unprotected( &node->Ready_node );
432  _Chain_Set_off_chain( &node->Ready_node );
433  _Chain_Append_unprotected( &self->Ready, &node->Ready_node );
434  }
435 }
436 
438  Scheduler_Context *context,
439  Scheduler_Node *scheduled_to_ready
440 )
441 {
442  Priority_Control insert_priority;
443 
444  if( !_Chain_Is_node_off_chain( &scheduled_to_ready->Node.Chain ) ) {
445  _Scheduler_SMP_Extract_from_scheduled( context, scheduled_to_ready );
446  }
447 
448  insert_priority = _Scheduler_SMP_Node_priority( scheduled_to_ready );
449 
451  context,
452  scheduled_to_ready,
453  insert_priority
454  );
455 }
456 
457 static inline Scheduler_Node* _Scheduler_strong_APA_Get_lowest_reachable(
459  uint32_t front,
460  uint32_t rear,
461  Per_CPU_Control **cpu_to_preempt
462 )
463 {
464  Scheduler_Node *lowest_reachable;
465  Priority_Control max_priority_num;
466  uint32_t cpu_max;
467  uint32_t cpu_index;
468  Thread_Control *curr_thread;
469  Per_CPU_Control *curr_CPU;
470  Priority_Control curr_priority;
471  Scheduler_Node *curr_node;
472  Scheduler_strong_APA_Node *curr_strong_node;
474 
475  max_priority_num = 0; /* Max (Lowest) priority encountered so far */
476  CPU = self->CPU;
477  cpu_max = _SMP_Get_processor_maximum();
478 
479  while ( front <= rear ) {
480  curr_CPU = CPU[ front ].cpu;
481  front = front + 1;
482 
483  curr_node = _Scheduler_strong_APA_Get_scheduled( self, curr_CPU );
484  curr_thread = curr_node->user;
485 
486  curr_priority = _Scheduler_Node_get_priority( curr_node );
487  curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority );
488 
489  curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node );
490 
491  if ( curr_priority > max_priority_num ) {
492  lowest_reachable = curr_node;
493  max_priority_num = curr_priority;
494  *cpu_to_preempt = curr_CPU;
495  }
496 
497  if ( !curr_thread->is_idle ) {
498  for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {
499  if ( _Processor_mask_Is_set( &curr_strong_node->Affinity, cpu_index ) ) {
500  /* Checks if the thread_CPU is in the affinity set of the node */
501  Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );
502  if (
503  _Per_CPU_Is_processor_online( cpu ) &&
504  CPU[ cpu_index ].visited == false )
505  {
506  rear = rear + 1;
507  CPU[ rear ].cpu = cpu;
508  CPU[ cpu_index ].visited = true;
509  CPU[ cpu_index ].preempting_node = curr_node;
510  }
511  }
512  }
513  }
514  }
515 
516  return lowest_reachable;
517 }
518 
520  Scheduler_Context *context,
521  Scheduler_Node *lowest_reachable,
522  Scheduler_Node *node,
523  Priority_Control insert_priority,
524  Per_CPU_Control *cpu_to_preempt
525 )
526 {
527  bool needs_help;
528  Priority_Control node_priority;
529  Priority_Control lowest_priority;
531  Scheduler_Node *curr_node;
532  Scheduler_strong_APA_Node *curr_strong_node;
533  Per_CPU_Control *curr_CPU;
535  Scheduler_Node *next_node;
536 
537  self = _Scheduler_strong_APA_Get_self( context );
538  CPU = self->CPU;
539 
540  node_priority = _Scheduler_Node_get_priority( node );
541  node_priority = SCHEDULER_PRIORITY_PURIFY( node_priority );
542 
543  if( lowest_reachable == NULL ) {
544  //This means the affinity set of the newly arrived node
545  //is empty.
546  lowest_priority = UINT64_MAX;
547  } else {
548  lowest_priority = _Scheduler_Node_get_priority( lowest_reachable );
549  lowest_priority = SCHEDULER_PRIORITY_PURIFY( lowest_priority );
550  }
551 
552  if ( lowest_priority > node_priority ) {
553  /*
554  * Backtrack on the path from
555  * _Thread_Get_CPU(lowest_reachable->user) to lowest_reachable, shifting
556  * along every task
557  */
558 
559  curr_node = CPU[ _Per_CPU_Get_index( cpu_to_preempt ) ].preempting_node;
560  curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node );
561  curr_strong_node->cpu_to_preempt = cpu_to_preempt;
562 
563  /* Save which cpu to preempt in cpu_to_preempt value of the node */
564  while ( curr_node != node ) {
565  curr_CPU = _Thread_Get_CPU( curr_node->user );
566  curr_node = CPU[ _Per_CPU_Get_index( curr_CPU ) ].preempting_node;
567  curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node );
568  curr_strong_node->cpu_to_preempt = curr_CPU;
569  }
570 
571  curr_CPU = curr_strong_node->cpu_to_preempt;
572  next_node = _Scheduler_strong_APA_Get_scheduled( self, curr_CPU );
573 
574  node_priority = _Scheduler_Node_get_priority( curr_node );
575  node_priority = SCHEDULER_PRIORITY_PURIFY( node_priority );
576 
577  _Scheduler_SMP_Enqueue_to_scheduled(
578  context,
579  curr_node,
580  node_priority,
581  next_node,
582  _Scheduler_SMP_Insert_scheduled,
585  );
586 
587  curr_node = next_node;
588  curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node );
589 
590  while ( curr_node != lowest_reachable ) {
591  curr_CPU = curr_strong_node->cpu_to_preempt;
592  next_node = _Scheduler_strong_APA_Get_scheduled( self, curr_CPU );
593  /* curr_node preempts the next_node; */
594  _Scheduler_SMP_Preempt(
595  context,
596  curr_node,
597  next_node,
599  );
600 
601  curr_node = next_node;
602  curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node );
603  }
604 
605  _Scheduler_strong_APA_Move_from_scheduled_to_ready( context, lowest_reachable );
606 
607  needs_help = false;
608  } else {
609  needs_help = true;
610  }
611 
612  /* Add it to Ready chain since it is now either scheduled or just ready. */
613  _Scheduler_strong_APA_Insert_ready( context,node, insert_priority );
614 
615  return needs_help;
616 }
617 
618 /*
619  * BFS Algorithm for task arrival
620  * Enqueue node either in the scheduled chain or in the ready chain.
621  * node is the newly arrived node and is currently not scheduled.
622  */
623 static inline bool _Scheduler_strong_APA_Enqueue(
624  Scheduler_Context *context,
625  Scheduler_Node *node,
626  Priority_Control insert_priority
627 )
628 {
631  uint32_t cpu_max;
632  uint32_t cpu_index;
633  Per_CPU_Control *cpu_to_preempt;
634  Scheduler_Node *lowest_reachable;
635  Scheduler_strong_APA_Node *strong_node;
636 
637  /* Denotes front and rear of the queue */
638  uint32_t front;
639  uint32_t rear;
640 
641  front = 0;
642  rear = -1;
643 
644  self = _Scheduler_strong_APA_Get_self( context );
645  strong_node = _Scheduler_strong_APA_Node_downcast( node );
646  cpu_max = _SMP_Get_processor_maximum();
647  CPU = self->CPU;
648 
649  for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {
650  CPU[ cpu_index ].visited = false;
651 
652  /* Checks if the thread_CPU is in the affinity set of the node */
653  if ( _Processor_mask_Is_set( &strong_node->Affinity, cpu_index ) ) {
654  Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );
655 
656  if ( _Per_CPU_Is_processor_online( cpu ) ) {
657  rear = rear + 1;
658  CPU[ rear ].cpu = cpu;
659  CPU[ cpu_index ].visited = true;
660  CPU[ cpu_index ].preempting_node = node;
661  }
662  }
663  }
664 
666  self,
667  front,
668  rear,
669  &cpu_to_preempt
670  );
671 
673  context,
674  lowest_reachable,
675  node,
676  insert_priority,
677  cpu_to_preempt
678  );
679 }
680 
682  Scheduler_Context *context,
683  Scheduler_Node *node,
684  Priority_Control insert_priority
685 )
686 {
687  return _Scheduler_SMP_Enqueue_scheduled(
688  context,
689  node,
690  insert_priority,
691  _Scheduler_SMP_Priority_less_equal,
695  _Scheduler_SMP_Insert_scheduled,
698  );
699 }
700 
702  Scheduler_Context *context,
703  Thread_Control *the_thread,
704  Scheduler_Node *node
705 )
706 {
707  return _Scheduler_SMP_Ask_for_help(
708  context,
709  the_thread,
710  node,
711  _Scheduler_SMP_Priority_less_equal,
713  _Scheduler_SMP_Insert_scheduled,
717  );
718 }
719 
721  Scheduler_Context *context,
722  Scheduler_Node *node_base,
723  void *arg
724 )
725 {
727 
728  node = _Scheduler_strong_APA_Node_downcast( node_base );
729  node->Affinity = *( (const Processor_mask *) arg );
730 }
731 
732 void _Scheduler_strong_APA_Initialize( const Scheduler_Control *scheduler )
733 {
736 
737  _Scheduler_SMP_Initialize( &self->Base );
738  _Chain_Initialize_empty( &self->Ready );
739 }
740 
742  const Scheduler_Control *scheduler,
743  Thread_Control *thread,
744  Scheduler_Node *node
745 )
746 {
747  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
748 
749  _Scheduler_SMP_Yield(
750  context,
751  thread,
752  node,
756  );
757 }
758 
760  const Scheduler_Control *scheduler,
761  Thread_Control *thread,
762  Scheduler_Node *node
763 )
764 {
765  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
766 
767  // Needed in case the node is scheduled node, since _SMP_Block only extracts
768  // from the SMP scheduled chain and from the Strong APA Ready_chain
769  // when the node is ready. But the Strong APA Ready_chain stores both
770  // ready and scheduled nodes.
772 
773  _Scheduler_SMP_Block(
774  context,
775  thread,
776  node,
782  );
783 }
784 
786  const Scheduler_Control *scheduler,
787  Thread_Control *thread,
788  Scheduler_Node *node
789 )
790 {
791  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
792 
793  _Scheduler_SMP_Unblock(
794  context,
795  thread,
796  node,
799  );
800 }
801 
803  const Scheduler_Control *scheduler,
804  Thread_Control *thread,
805  Scheduler_Node *node
806 )
807 {
808  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
809 
810  _Scheduler_SMP_Update_priority(
811  context,
812  thread,
813  node,
819  );
820 }
821 
823  const Scheduler_Control *scheduler,
824  Thread_Control *the_thread,
825  Scheduler_Node *node
826 )
827 {
828  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
829 
831  context,
832  the_thread,
833  node
834  );
835 }
836 
838  const Scheduler_Control *scheduler,
839  Thread_Control *the_thread,
840  Scheduler_Node *node
841 )
842 {
843  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
844 
845  _Scheduler_SMP_Reconsider_help_request(
846  context,
847  the_thread,
848  node,
850  );
851 }
852 
854  const Scheduler_Control *scheduler,
855  Thread_Control *the_thread,
856  Scheduler_Node *node,
857  Thread_Scheduler_state next_state
858 )
859 {
860  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
861 
862  _Scheduler_SMP_Withdraw_node(
863  context,
864  the_thread,
865  node,
866  next_state,
871  );
872 }
873 
875  Scheduler_Context *context,
876  Scheduler_Node *idle_base,
877  Per_CPU_Control *cpu
878 )
879 {
881  self = _Scheduler_strong_APA_Get_self( context );
882 
883  _Scheduler_strong_APA_Set_scheduled( self, idle_base, cpu );
884 }
885 
887  const Scheduler_Control *scheduler,
888  Thread_Control *idle
889 )
890 {
891  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
892 
893  _Scheduler_SMP_Add_processor(
894  context,
895  idle,
899  );
900 }
901 
903  const Scheduler_Control *scheduler,
904  Thread_Control *idle,
905  Per_CPU_Control *cpu
906 )
907 {
908  Scheduler_Context *context;
909 
910  context = _Scheduler_Get_context( scheduler );
911 
912  _Scheduler_SMP_Do_start_idle(
913  context,
914  idle,
915  cpu,
917  );
918 }
919 
921  const Scheduler_Control *scheduler,
922  Per_CPU_Control *cpu
923 )
924 {
925  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
926 
927  return _Scheduler_SMP_Remove_processor(
928  context,
929  cpu,
932  );
933 }
934 
936  const Scheduler_Control *scheduler,
937  Scheduler_Node *node,
938  Thread_Control *the_thread,
939  Priority_Control priority
940 )
941 {
942  Scheduler_SMP_Node *smp_node;
943  Scheduler_strong_APA_Node *strong_node;
944 
945  smp_node = _Scheduler_SMP_Node_downcast( node );
946  strong_node = _Scheduler_strong_APA_Node_downcast( node );
947 
948  _Scheduler_SMP_Node_initialize( scheduler, smp_node, the_thread, priority );
949 
950  _Processor_mask_Assign(
951  &strong_node->Affinity,
952  _SMP_Get_online_processors()
953  );
954 }
955 
957  const Scheduler_Control *scheduler,
958  Thread_Control *thread,
959  Scheduler_Node *node_base,
960  const Processor_mask *affinity
961 )
962 {
963  Scheduler_Context *context;
965  Processor_mask local_affinity;
966 
967  context = _Scheduler_Get_context( scheduler );
968  _Processor_mask_And( &local_affinity, &context->Processors, affinity );
969 
970  if ( _Processor_mask_Is_zero( &local_affinity ) ) {
971  return false;
972  }
973 
974  node = _Scheduler_strong_APA_Node_downcast( node_base );
975 
976  if ( _Processor_mask_Is_equal( &node->Affinity, affinity ) )
977  return true; /* Nothing to do. Return true. */
978 
979  _Processor_mask_Assign( &node->Affinity, &local_affinity );
980 
981  _Scheduler_SMP_Set_affinity(
982  context,
983  thread,
984  node_base,
985  &local_affinity,
992  );
993 
994  return true;
995 }
996 
static void _Scheduler_strong_APA_Set_scheduled(Scheduler_strong_APA_Context *self, Scheduler_Node *executing, const Per_CPU_Control *cpu)
Per_CPU_Control * cpu
CPU in a queue.
Scheduler_Node * preempting_node
The node that would preempt this CPU.
void _Scheduler_strong_APA_Add_processor(const Scheduler_Control *scheduler, Thread_Control *idle)
Adds the idle thread to a processor.
Chain_Node Ready_node
Chain node for Scheduler_strong_APA_Context::Ready.
static void _Scheduler_strong_APA_Do_set_affinity(Scheduler_Context *context, Scheduler_Node *node_base, void *arg)
static Scheduler_Node * _Scheduler_strong_APA_Get_lowest_reachable(Scheduler_strong_APA_Context *self, uint32_t front, uint32_t rear, Per_CPU_Control **cpu_to_preempt)
static Scheduler_strong_APA_Context * _Scheduler_strong_APA_Get_context(const Scheduler_Control *scheduler)
static Scheduler_Node * _Scheduler_strong_APA_Get_lowest_scheduled(Scheduler_Context *context, Scheduler_Node *filter_base)
CPU related variables and a CPU_Control to implement BFS.
bool _Scheduler_strong_APA_Ask_for_help(const Scheduler_Control *scheduler, Thread_Control *the_thread, Scheduler_Node *node)
Asks for help.
static void _Scheduler_strong_APA_Extract_from_scheduled(Scheduler_Context *context, Scheduler_Node *node_to_extract)
static void _Scheduler_strong_APA_Allocate_processor(Scheduler_Context *context, Scheduler_Node *scheduled_base, Scheduler_Node *victim_base, Per_CPU_Control *victim_cpu)
void _Scheduler_strong_APA_Node_initialize(const Scheduler_Control *scheduler, Scheduler_Node *node, Thread_Control *the_thread, Priority_Control priority)
Initializes the node with the given priority.
static void _Scheduler_strong_APA_Do_update(Scheduler_Context *context, Scheduler_Node *node, Priority_Control new_priority)
bool visited
Whether or not this cpu has been added to the queue (visited in BFS).
static void _Scheduler_strong_APA_Register_idle(Scheduler_Context *context, Scheduler_Node *idle_base, Per_CPU_Control *cpu)
Scheduler_SMP_Node Base
SMP scheduler node.
static bool _Scheduler_strong_APA_Do_ask_for_help(Scheduler_Context *context, Thread_Control *the_thread, Scheduler_Node *node)
static void _Scheduler_strong_APA_Insert_ready(Scheduler_Context *context, Scheduler_Node *node_base, Priority_Control insert_priority)
Thread_Control * _Scheduler_strong_APA_Remove_processor(const Scheduler_Control *scheduler, Per_CPU_Control *cpu)
Removes an idle thread from the given cpu.
static bool _Scheduler_strong_APA_Has_ready(Scheduler_Context *context)
void _Scheduler_strong_APA_Withdraw_node(const Scheduler_Control *scheduler, Thread_Control *the_thread, Scheduler_Node *node, Thread_Scheduler_state next_state)
Withdraws the node.
static bool _Scheduler_strong_APA_Do_enqueue(Scheduler_Context *context, Scheduler_Node *lowest_reachable, Scheduler_Node *node, Priority_Control insert_priority, Per_CPU_Control *cpu_to_preempt)
static Scheduler_Node * _Scheduler_strong_APA_Find_highest_ready(Scheduler_strong_APA_Context *self, uint32_t front, uint32_t rear)
static void _Scheduler_strong_APA_Move_from_ready_to_scheduled(Scheduler_Context *context, Scheduler_Node *ready_to_scheduled)
Per_CPU_Control * cpu_to_preempt
CPU that this node would preempt in the backtracking part of _Scheduler_strong_APA_Get_highest_ready ...
void _Scheduler_strong_APA_Start_idle(const Scheduler_Control *scheduler, Thread_Control *idle, Per_CPU_Control *cpu)
Starts an idle thread.
Scheduler context and node definition for Strong APA scheduler.
void _Scheduler_strong_APA_Update_priority(const Scheduler_Control *scheduler, Thread_Control *thread, Scheduler_Node *node)
Updates the priority of the node.
Processor_mask Affinity
The associated affinity set of this node.
static void _Scheduler_strong_APA_Extract_from_ready(Scheduler_Context *context, Scheduler_Node *node_to_extract)
Scheduler node specialization for Strong APA schedulers.
#define STRONG_SCHEDULER_NODE_OF_CHAIN(node)
void _Scheduler_strong_APA_Block(const Scheduler_Control *scheduler, Thread_Control *thread, Scheduler_Node *node)
Blocks the thread.
static Scheduler_Node * _Scheduler_strong_APA_Get_scheduled(const Scheduler_strong_APA_Context *self, const Per_CPU_Control *cpu)
void _Scheduler_strong_APA_Initialize(const Scheduler_Control *scheduler)
Initializes the scheduler.
static Scheduler_strong_APA_Node * _Scheduler_strong_APA_Node_downcast(Scheduler_Node *node)
static void _Scheduler_strong_APA_Move_from_scheduled_to_ready(Scheduler_Context *context, Scheduler_Node *scheduled_to_ready)
static Scheduler_strong_APA_Context * _Scheduler_strong_APA_Get_self(Scheduler_Context *context)
static bool _Scheduler_strong_APA_Enqueue(Scheduler_Context *context, Scheduler_Node *node, Priority_Control insert_priority)
bool _Scheduler_strong_APA_Set_affinity(const Scheduler_Control *scheduler, Thread_Control *thread, Scheduler_Node *node_base, const Processor_mask *affinity)
Sets the affinity .
void _Scheduler_strong_APA_Unblock(const Scheduler_Control *scheduler, Thread_Control *thread, Scheduler_Node *node)
Unblocks the thread.
static bool _Scheduler_strong_APA_Enqueue_scheduled(Scheduler_Context *context, Scheduler_Node *node, Priority_Control insert_priority)
static Scheduler_Node * _Scheduler_strong_APA_Get_highest_ready(Scheduler_Context *context, Scheduler_Node *filter)
void _Scheduler_strong_APA_Reconsider_help_request(const Scheduler_Control *scheduler, Thread_Control *the_thread, Scheduler_Node *node)
Reconsiders help request.
void _Scheduler_strong_APA_Yield(const Scheduler_Control *scheduler, Thread_Control *thread, Scheduler_Node *node)
Performs a yield operation.