Scheduler Strong APA  Release
schedulerstrongapa.c
Go to the documentation of this file.
1 
9 /* SPDX-License-Identifier: BSD-2-Clause
10  *
11  * Copyright (C) 2020 Richi Dubey
12  * Copyright (c) 2013, 2018 embedded brains GmbH
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  * notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  * notice, this list of conditions and the following disclaimer in the
21  * documentation and/or other materials provided with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 #ifdef HAVE_CONFIG_H
37 #include "config.h"
38 #endif
39 
40 #include <rtems/score/schedulerstrongapa.h>
41 #include <rtems/score/schedulersmpimpl.h>
42 #include <rtems/score/assert.h>
43 
44 static inline Scheduler_strong_APA_Context *
45 _Scheduler_strong_APA_Get_context( const Scheduler_Control *scheduler )
46 {
47  return (Scheduler_strong_APA_Context *) _Scheduler_Get_context( scheduler );
48 }
49 
50 static inline Scheduler_strong_APA_Context *
51 _Scheduler_strong_APA_Get_self( Scheduler_Context *context )
52 {
53  return (Scheduler_strong_APA_Context *) context;
54 }
55 
56 static inline Scheduler_strong_APA_Node *
57 _Scheduler_strong_APA_Node_downcast( Scheduler_Node *node )
58 {
59  return (Scheduler_strong_APA_Node *) node;
60 }
61 
63  Scheduler_Context *context,
64  Scheduler_Node *node,
65  Priority_Control new_priority
66 )
67 {
68  Scheduler_SMP_Node *smp_node;
69  (void) context;
70 
71  smp_node = _Scheduler_SMP_Node_downcast( node );
72  _Scheduler_SMP_Node_update_priority( smp_node, new_priority );
73 }
74 
75 /*
76  * Returns true if the Strong APA scheduler has ready nodes
77  * available for scheduling.
78  */
79 static inline bool
80  _Scheduler_strong_APA_Has_ready( Scheduler_Context *context )
81 {
83  const Chain_Node *tail;
84  Chain_Node *next;
86 
87  self = _Scheduler_strong_APA_Get_self( context );
88  tail = _Chain_Immutable_tail( &self->Ready );
89  next = _Chain_First( &self->Ready );
90 
91  while ( next != tail ) {
93 
94  if (
95  _Scheduler_SMP_Node_state( &node->Base.Base ) ==
96  SCHEDULER_SMP_NODE_READY
97  ) {
98  return true;
99  }
100 
101  next = _Chain_Next( next );
102  }
103 
104  return false;
105 }
106 
109  Scheduler_Node *executing,
110  const Per_CPU_Control *cpu
111 )
112 {
113  self->CPU[ _Per_CPU_Get_index( cpu ) ].executing = executing;
114 }
115 
116 static inline Scheduler_Node *_Scheduler_strong_APA_Get_scheduled(
117  const Scheduler_strong_APA_Context *self,
118  const Per_CPU_Control *cpu
119 )
120 {
121  return self->CPU[ _Per_CPU_Get_index( cpu ) ].executing;
122 }
123 
125  Scheduler_Context *context,
126  Scheduler_Node *scheduled_base,
127  Scheduler_Node *victim_base,
128  Per_CPU_Control *victim_cpu
129 )
130 {
131  Scheduler_strong_APA_Node *scheduled;
133 
134  (void) victim_base;
135 
136  scheduled = _Scheduler_strong_APA_Node_downcast( scheduled_base );
137  self = _Scheduler_strong_APA_Get_self( context );
138 
139  _Scheduler_strong_APA_Set_scheduled( self, scheduled_base, victim_cpu );
140 
141  _Scheduler_SMP_Allocate_processor_exact(
142  context,
143  &( scheduled->Base.Base ),
144  NULL,
145  victim_cpu
146  );
147 }
148 
149 /*
150  * Finds and returns the highest ready node present by accessing the
151  * _Strong_APA_Context->CPU with front and rear values.
152  */
153 static inline Scheduler_Node * _Scheduler_strong_APA_Find_highest_ready(
155  uint32_t front,
156  uint32_t rear
157 )
158 {
159  Scheduler_Node *highest_ready;
161  const Chain_Node *tail;
162  Chain_Node *next;
163  uint32_t index_assigned_cpu;
164  uint32_t index_curr_cpu;
166  Priority_Control min_priority_num;
167  Priority_Control curr_priority;
168  Per_CPU_Control *assigned_cpu;
169  Scheduler_SMP_Node_state curr_state;
170  Per_CPU_Control *curr_CPU;
171  bool first_task;
172 
173  CPU = self->CPU;
174  /*
175  * When the first task accessed has nothing to compare its priority against
176  * So, it is the task with the highest priority witnessed so far.
177  */
178  first_task = true;
179 
180  _Assert( rear < CONFIGURE_MAXIMUM_PROCESSOR );
181 
182  while( front <= rear ) {
183  curr_CPU = CPU[ front ].cpu;
184  front = front + 1;
185 
186  tail = _Chain_Immutable_tail( &self->Ready );
187  next = _Chain_First( &self->Ready );
188 
189  while ( next != tail ) {
191  /* Check if the curr_CPU is in the affinity set of the node. */
192  index_curr_cpu = _Per_CPU_Get_index( curr_CPU );
193  if (
194  _Processor_mask_Is_set( &node->Affinity, index_curr_cpu )
195  ) {
196  curr_state = _Scheduler_SMP_Node_state( &node->Base.Base );
197 
198  if ( curr_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
199  assigned_cpu = _Thread_Get_CPU( node->Base.Base.user );
200  index_assigned_cpu = _Per_CPU_Get_index( assigned_cpu );
201 
202  if ( CPU[ index_assigned_cpu ].visited == false ) {
203  rear = rear + 1;
204  CPU[ rear ].cpu = assigned_cpu;
205  CPU[ index_assigned_cpu ].visited = true;
206  /*
207  * The curr CPU of the queue invoked this node to add its CPU
208  * that it is executing on to the queue. So this node might get
209  * preempted because of the invoker curr_CPU and this curr_CPU
210  * is the CPU that node should preempt in case this node
211  * gets preempted.
212  */
213  node->cpu_to_preempt = curr_CPU;
214  }
215  }
216  else if ( curr_state == SCHEDULER_SMP_NODE_READY ) {
217  curr_priority = _Scheduler_Node_get_priority( &node->Base.Base );
218  curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority );
219 
220  if ( first_task == true || curr_priority < min_priority_num ) {
221  min_priority_num = curr_priority;
222  highest_ready = &node->Base.Base;
223  first_task = false;
224  /*
225  * In case curr_CPU is filter_CPU, we need to store the
226  * cpu_to_preempt value so that we go back to SMP_*
227  * function, rather than preempting the node ourselves.
228  */
229  node->cpu_to_preempt = curr_CPU;
230  }
231  }
232  }
233  next = _Chain_Next( next );
234  }
235  }
236 
237  return highest_ready;
238 }
239 
241  Scheduler_Context *context,
242  Scheduler_Node *ready_to_scheduled
243 )
244 {
245  Priority_Control insert_priority;
246 
247  insert_priority = _Scheduler_SMP_Node_priority( ready_to_scheduled );
248  insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
249  _Scheduler_SMP_Insert_scheduled(
250  context,
251  ready_to_scheduled,
252  insert_priority
253  );
254 }
255 
256 /*
257  * Implement the BFS Algorithm for task departure to get the highest ready task
258  * for a particular CPU, returns the highest ready Scheduler_Node
259  * Scheduler_Node filter here pointst to the victim node that is blocked
260  * resulting which this function is called.
261  */
262 static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready(
263  Scheduler_Context *context,
264  Scheduler_Node *filter
265 )
266 {
268  Per_CPU_Control *filter_cpu;
270  Scheduler_Node *highest_ready;
271  Scheduler_Node *curr_node;
272  Scheduler_Node *next_node;
274  uint32_t front;
275  uint32_t rear;
276  uint32_t cpu_max;
277  uint32_t cpu_index;
278 
279  self = _Scheduler_strong_APA_Get_self( context );
280  /* Denotes front and rear of the queue */
281  front = 0;
282  rear = -1;
283 
284  filter_cpu = _Thread_Get_CPU( filter->user );
285  CPU = self->CPU;
286  cpu_max = _SMP_Get_processor_maximum();
287 
288  for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {
289  CPU[ cpu_index ].visited = false;
290  }
291 
292  rear = rear + 1;
293  CPU[ rear ].cpu = filter_cpu;
294  CPU[ _Per_CPU_Get_index( filter_cpu ) ].visited = true;
295 
297  self,
298  front,
299  rear
300  );
301 
302  if ( highest_ready != filter ) {
303  /*
304  * Backtrack on the path from
305  * filter_cpu to highest_ready, shifting along every task.
306  */
307 
308  node = _Scheduler_strong_APA_Node_downcast( highest_ready );
309  /*
310  * Highest ready is not just directly reachable from the victim cpu
311  * So there is need for task shifting .
312  */
313  while ( node->cpu_to_preempt != filter_cpu ) {
314  curr_node = &node->Base.Base;
315  next_node =
317 
318  _Scheduler_SMP_Preempt(
319  context,
320  curr_node,
321  next_node,
323  );
324 
325  if( curr_node == highest_ready ) {
327  }
328 
329  node = _Scheduler_strong_APA_Node_downcast( next_node );
330  }
331  /*
332  * To save the last node so that the caller SMP_* function
333  * can do the allocation
334  */
335  curr_node = &node->Base.Base;
336  highest_ready = curr_node;
337  }
338 
339  return highest_ready;
340 }
341 
342 /*
343  * Checks the lowest scheduled directly reachable task
344  */
345 static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled(
346  Scheduler_Context *context,
347  Scheduler_Node *filter_base
348 )
349 {
350  uint32_t cpu_max;
351  uint32_t cpu_index;
352  Scheduler_Node *curr_node;
353  Scheduler_Node *lowest_scheduled;
354  Priority_Control max_priority_num;
355  Priority_Control curr_priority;
356  Scheduler_strong_APA_Node *filter_strong_node;
358 
359  self = _Scheduler_strong_APA_Get_self( context );
360  lowest_scheduled = NULL; /* To remove compiler warning */
361  max_priority_num = 0; /* Max (Lowest) priority encountered so far */
362  filter_strong_node = _Scheduler_strong_APA_Node_downcast( filter_base );
363 
364  /* lowest_scheduled is NULL if affinity of a node is 0 */
365  _Assert( !_Processor_mask_Zero( &filter_strong_node->Affinity ) );
366  cpu_max = _SMP_Get_processor_maximum();
367 
368  for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {
369  /* Checks if the CPU is in the affinity set of filter_strong_node */
370  if ( _Processor_mask_Is_set( &filter_strong_node->Affinity, cpu_index ) ) {
371  Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );
372 
373  if ( _Per_CPU_Is_processor_online( cpu ) ) {
374  curr_node = _Scheduler_strong_APA_Get_scheduled( self, cpu );
375  curr_priority = _Scheduler_Node_get_priority( curr_node );
376  curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority );
377 
378  if ( curr_priority > max_priority_num ) {
379  lowest_scheduled = curr_node;
380  max_priority_num = curr_priority;
381  }
382  }
383  }
384  }
385 
386  _Assert( lowest_scheduled != NULL );
387  return lowest_scheduled;
388 }
389 
391  Scheduler_Context *context,
392  Scheduler_Node *node_to_extract
393 )
394 {
397 
398  self = _Scheduler_strong_APA_Get_self( context );
399  node = _Scheduler_strong_APA_Node_downcast( node_to_extract );
400 
401  _Scheduler_SMP_Extract_from_scheduled( &self->Base.Base, &node->Base.Base );
402  /* Not removing it from Ready since the node could go in the READY state */
403 }
404 
406  Scheduler_Context *context,
407  Scheduler_Node *node_to_extract
408 )
409 {
412 
413  self = _Scheduler_strong_APA_Get_self( context );
414  node = _Scheduler_strong_APA_Node_downcast( node_to_extract );
415 
416  _Assert( !_Chain_Is_empty( self->Ready ) );
417  _Assert( !_Chain_Is_node_off_chain( &node->Ready_node ) );
418 
419  _Chain_Extract_unprotected( &node->Ready_node );
420  _Chain_Set_off_chain( &node->Ready_node );
421 }
422 
424  Scheduler_Context *context,
425  Scheduler_Node *node_base,
426  Priority_Control insert_priority
427 )
428 {
431 
432  self = _Scheduler_strong_APA_Get_self( context );
433  node = _Scheduler_strong_APA_Node_downcast( node_base );
434 
435  if( _Chain_Is_node_off_chain( &node->Ready_node ) ) {
436  _Chain_Append_unprotected( &self->Ready, &node->Ready_node );
437  }
438 }
439 
441  Scheduler_Context *context,
442  Scheduler_Node *scheduled_to_ready
443 )
444 {
445  Priority_Control insert_priority;
446 
447  _Scheduler_SMP_Extract_from_scheduled( context, scheduled_to_ready );
448  insert_priority = _Scheduler_SMP_Node_priority( scheduled_to_ready );
449 
451  context,
452  scheduled_to_ready,
453  insert_priority
454  );
455 }
456 
457 static inline Scheduler_Node* _Scheduler_strong_APA_Get_lowest_reachable(
459  uint32_t front,
460  uint32_t rear,
461  Per_CPU_Control **cpu_to_preempt
462 )
463 {
464  Scheduler_Node *lowest_reachable;
465  Priority_Control max_priority_num;
466  uint32_t cpu_max;
467  uint32_t cpu_index;
468  Thread_Control *curr_thread;
469  Per_CPU_Control *curr_CPU;
470  Priority_Control curr_priority;
471  Scheduler_Node *curr_node;
472  Scheduler_strong_APA_Node *curr_strong_node; /* Current Strong_APA_Node */
474 
475  max_priority_num = 0; /* Max (Lowest) priority encountered so far */
476  CPU = self->CPU;
477  cpu_max = _SMP_Get_processor_maximum();
478 
479  while( front <= rear ) {
480  curr_CPU = CPU[ front ].cpu;
481  front = front + 1;
482 
483  curr_node = _Scheduler_strong_APA_Get_scheduled( self, curr_CPU );
484  curr_thread = curr_node->user;
485 
486  curr_priority = _Scheduler_Node_get_priority( curr_node );
487  curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority );
488 
489  curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node );
490 
491  if ( curr_priority > max_priority_num ) {
492  lowest_reachable = curr_node;
493  max_priority_num = curr_priority;
494  *cpu_to_preempt = curr_CPU;
495  }
496 
497  if ( !curr_thread->is_idle ) {
498  for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {
499  if ( _Processor_mask_Is_set( &curr_strong_node->Affinity, cpu_index ) ) {
500  /* Checks if the thread_CPU is in the affinity set of the node */
501  Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );
502  if (
503  _Per_CPU_Is_processor_online( cpu ) &&
504  CPU[ cpu_index ].visited == false )
505  {
506  rear = rear + 1;
507  CPU[ rear ].cpu = cpu;
508  CPU[ cpu_index ].visited = true;
509  CPU[ cpu_index ].preempting_node = curr_node;
510  }
511  }
512  }
513  }
514  }
515 
516  return lowest_reachable;
517 }
518 
520  Scheduler_Context *context,
521  Scheduler_Node *lowest_reachable,
522  Scheduler_Node *node,
523  Priority_Control insert_priority,
524  Per_CPU_Control *cpu_to_preempt
525 )
526 {
527  bool needs_help;
528  Priority_Control node_priority;
529  Priority_Control lowest_priority;
531  Scheduler_Node *curr_node;
532  Scheduler_strong_APA_Node *curr_strong_node; /* Current Strong_APA_Node */
533  Per_CPU_Control *curr_CPU;
535  Scheduler_Node *next_node;
536 
537  self = _Scheduler_strong_APA_Get_self( context );
538  CPU = self->CPU;
539 
540  node_priority = _Scheduler_Node_get_priority( node );
541  node_priority = SCHEDULER_PRIORITY_PURIFY( node_priority );
542 
543  lowest_priority = _Scheduler_Node_get_priority( lowest_reachable );
544  lowest_priority = SCHEDULER_PRIORITY_PURIFY( lowest_priority );
545 
546  if( lowest_priority > node_priority ) {
547  /*
548  * Backtrack on the path from
549  * _Thread_Get_CPU(lowest_reachable->user) to lowest_reachable, shifting
550  * along every task
551  */
552 
553  curr_node = CPU[ _Per_CPU_Get_index( cpu_to_preempt ) ].preempting_node;
554  curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node );
555  curr_strong_node->cpu_to_preempt = cpu_to_preempt;
556 
557  /* Save which cpu to preempt in cpu_to_preempt value of the node */
558  while ( curr_node != node ) {
559  curr_CPU = _Thread_Get_CPU( curr_node->user );
560  curr_node = CPU[ _Per_CPU_Get_index( curr_CPU ) ].preempting_node;
561  curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node );
562  curr_strong_node->cpu_to_preempt = curr_CPU;
563  }
564 
565  curr_CPU = curr_strong_node->cpu_to_preempt;
566  next_node = _Scheduler_strong_APA_Get_scheduled( self, curr_CPU );
567 
568  node_priority = _Scheduler_Node_get_priority( curr_node );
569  node_priority = SCHEDULER_PRIORITY_PURIFY( node_priority );
570 
571  _Scheduler_SMP_Enqueue_to_scheduled(
572  context,
573  curr_node,
574  node_priority,
575  next_node,
576  _Scheduler_SMP_Insert_scheduled,
579  );
580 
581  curr_node = next_node;
582  curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node );
583 
584  while ( curr_node != lowest_reachable ) {
585  curr_CPU = curr_strong_node->cpu_to_preempt;
586  next_node = _Scheduler_strong_APA_Get_scheduled( self, curr_CPU );
587  /* curr_node preempts the next_node; */
588  _Scheduler_SMP_Preempt(
589  context,
590  curr_node,
591  next_node,
593  );
594 
595  curr_node = next_node;
596  curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node );
597  }
598 
599  _Scheduler_strong_APA_Move_from_scheduled_to_ready( context, lowest_reachable );
600 
601  needs_help = false;
602  } else {
603  needs_help = true;
604  }
605 
606  /* Add it to Ready chain since it is now either scheduled or just ready. */
607  _Scheduler_strong_APA_Insert_ready( context,node, insert_priority );
608 
609  return needs_help;
610 }
611 
612 /*
613  * BFS Algorithm for task arrival
614  * Enqueue node either in the scheduled chain or in the ready chain.
615  * node is the newly arrived node and is currently not scheduled.
616  */
617 static inline bool _Scheduler_strong_APA_Enqueue(
618  Scheduler_Context *context,
619  Scheduler_Node *node,
620  Priority_Control insert_priority
621 )
622 {
625  uint32_t cpu_max;
626  uint32_t cpu_index;
627  Per_CPU_Control *cpu_to_preempt;
628  Scheduler_Node *lowest_reachable;
629  Scheduler_strong_APA_Node *strong_node;
630 
631  /* Denotes front and rear of the queue */
632  uint32_t front;
633  uint32_t rear;
634 
635  front = 0;
636  rear = -1;
637 
638  self = _Scheduler_strong_APA_Get_self( context );
639  strong_node = _Scheduler_strong_APA_Node_downcast( node );
640  cpu_max = _SMP_Get_processor_maximum();
641  CPU = self->CPU;
642 
643  for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {
644  CPU[ cpu_index ].visited = false;
645 
646  /* Checks if the thread_CPU is in the affinity set of the node */
647  if ( _Processor_mask_Is_set( &strong_node->Affinity, cpu_index ) ) {
648  Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );
649 
650  if ( _Per_CPU_Is_processor_online( cpu ) ) {
651  rear = rear + 1;
652  CPU[ rear ].cpu = cpu;
653  CPU[ cpu_index ].visited = true;
654  CPU[ cpu_index ].preempting_node = node;
655  }
656  }
657  }
658 
659  /*
660  * This assert makes sure that there always exist an element in the
661  * Queue when we start the queue traversal.
662  */
663  _Assert( !_Processor_mask_Zero( &strong_node->Affinity ) );
664 
666  self,
667  front,
668  rear,
669  &cpu_to_preempt
670  );
671 
673  context,
674  lowest_reachable,
675  node,
676  insert_priority,
677  cpu_to_preempt
678  );
679 }
680 
682  Scheduler_Context *context,
683  Scheduler_Node *node,
684  Priority_Control insert_priority
685 )
686 {
687  return _Scheduler_SMP_Enqueue_scheduled(
688  context,
689  node,
690  insert_priority,
691  _Scheduler_SMP_Priority_less_equal,
695  _Scheduler_SMP_Insert_scheduled,
698  );
699 }
700 
702  Scheduler_Context *context,
703  Thread_Control *the_thread,
704  Scheduler_Node *node
705 )
706 {
707  return _Scheduler_SMP_Ask_for_help(
708  context,
709  the_thread,
710  node,
711  _Scheduler_SMP_Priority_less_equal,
713  _Scheduler_SMP_Insert_scheduled,
717  );
718 }
719 
721  Scheduler_Context *context,
722  Scheduler_Node *node_base,
723  void *arg
724 )
725 {
727 
728  node = _Scheduler_strong_APA_Node_downcast( node_base );
729  node->Affinity = *( (const Processor_mask *) arg );
730 }
731 
732 void _Scheduler_strong_APA_Initialize( const Scheduler_Control *scheduler )
733 {
736 
737  _Scheduler_SMP_Initialize( &self->Base );
738  _Chain_Initialize_empty( &self->Ready );
739 }
740 
742  const Scheduler_Control *scheduler,
743  Thread_Control *thread,
744  Scheduler_Node *node
745 )
746 {
747  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
748 
749  _Scheduler_SMP_Yield(
750  context,
751  thread,
752  node,
756  );
757 }
758 
760  const Scheduler_Control *scheduler,
761  Thread_Control *thread,
762  Scheduler_Node *node
763 )
764 {
765  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
766  /* The extract from ready automatically removes the node from Ready chain */
767  _Scheduler_SMP_Block(
768  context,
769  thread,
770  node,
776  );
777 }
778 
780  const Scheduler_Control *scheduler,
781  Thread_Control *thread,
782  Scheduler_Node *node
783 )
784 {
785  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
786 
787  _Scheduler_SMP_Unblock(
788  context,
789  thread,
790  node,
793  );
794 }
795 
797  const Scheduler_Control *scheduler,
798  Thread_Control *thread,
799  Scheduler_Node *node
800 )
801 {
802  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
803 
804  _Scheduler_SMP_Update_priority(
805  context,
806  thread,
807  node,
813  );
814 }
815 
817  const Scheduler_Control *scheduler,
818  Thread_Control *the_thread,
819  Scheduler_Node *node
820 )
821 {
822  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
823 
825  context,
826  the_thread,
827  node
828  );
829 }
830 
832  const Scheduler_Control *scheduler,
833  Thread_Control *the_thread,
834  Scheduler_Node *node
835 )
836 {
837  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
838 
839  _Scheduler_SMP_Reconsider_help_request(
840  context,
841  the_thread,
842  node,
844  );
845 }
846 
848  const Scheduler_Control *scheduler,
849  Thread_Control *the_thread,
850  Scheduler_Node *node,
851  Thread_Scheduler_state next_state
852 )
853 {
854  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
855 
856  _Scheduler_SMP_Withdraw_node(
857  context,
858  the_thread,
859  node,
860  next_state,
865  );
866 }
867 
869  Scheduler_Context *context,
870  Scheduler_Node *idle_base,
871  Per_CPU_Control *cpu
872 )
873 {
875  self = _Scheduler_strong_APA_Get_self( context );
876 
877  _Scheduler_strong_APA_Set_scheduled( self, idle_base, cpu );
878 }
879 
881  const Scheduler_Control *scheduler,
882  Thread_Control *idle
883 )
884 {
885  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
886 
887  _Scheduler_SMP_Add_processor(
888  context,
889  idle,
893  );
894 }
895 
897  const Scheduler_Control *scheduler,
898  Thread_Control *idle,
899  Per_CPU_Control *cpu
900 )
901 {
902  Scheduler_Context *context;
903 
904  context = _Scheduler_Get_context( scheduler );
905 
906  _Scheduler_SMP_Do_start_idle(
907  context,
908  idle,
909  cpu,
911  );
912 }
913 
915  const Scheduler_Control *scheduler,
916  Per_CPU_Control *cpu
917 )
918 {
919  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
920 
921  return _Scheduler_SMP_Remove_processor(
922  context,
923  cpu,
926  );
927 }
928 
930  const Scheduler_Control *scheduler,
931  Scheduler_Node *node,
932  Thread_Control *the_thread,
933  Priority_Control priority
934 )
935 {
936  Scheduler_SMP_Node *smp_node;
937  Scheduler_strong_APA_Node *strong_node;
938 
939  smp_node = _Scheduler_SMP_Node_downcast( node );
940  strong_node = _Scheduler_strong_APA_Node_downcast( node );
941 
942  _Scheduler_SMP_Node_initialize( scheduler, smp_node, the_thread, priority );
943 
944  _Processor_mask_Assign(
945  &strong_node->Affinity,
946  _SMP_Get_online_processors()
947  );
948 }
949 
951  const Scheduler_Control *scheduler,
952  Thread_Control *thread,
953  Scheduler_Node *node_base,
954  const Processor_mask *affinity
955 )
956 {
957  Scheduler_Context *context;
959  Processor_mask local_affinity;
960 
961  context = _Scheduler_Get_context( scheduler );
962  _Processor_mask_And( &local_affinity, &context->Processors, affinity );
963 
964  if ( _Processor_mask_Is_zero( &local_affinity ) ) {
965  return false;
966  }
967 
968  node = _Scheduler_strong_APA_Node_downcast( node_base );
969 
970  if ( _Processor_mask_Is_equal( &node->Affinity, affinity ) )
971  return true; /* Nothing to do. Return true. */
972 
973  _Processor_mask_Assign( &node->Affinity, &local_affinity );
974 
975  _Scheduler_SMP_Set_affinity(
976  context,
977  thread,
978  node_base,
979  &local_affinity,
986  );
987 
988  return true;
989 }
990 
static void _Scheduler_strong_APA_Set_scheduled(Scheduler_strong_APA_Context *self, Scheduler_Node *executing, const Per_CPU_Control *cpu)
Per_CPU_Control * cpu
CPU in a queue.
Scheduler_Node * preempting_node
The node that would preempt this CPU.
void _Scheduler_strong_APA_Add_processor(const Scheduler_Control *scheduler, Thread_Control *idle)
Adds the idle thread to a processor.
Chain_Node Ready_node
Chain node for Scheduler_strong_APA_Context::Ready.
static void _Scheduler_strong_APA_Do_set_affinity(Scheduler_Context *context, Scheduler_Node *node_base, void *arg)
static Scheduler_Node * _Scheduler_strong_APA_Get_lowest_reachable(Scheduler_strong_APA_Context *self, uint32_t front, uint32_t rear, Per_CPU_Control **cpu_to_preempt)
static Scheduler_strong_APA_Context * _Scheduler_strong_APA_Get_context(const Scheduler_Control *scheduler)
static Scheduler_Node * _Scheduler_strong_APA_Get_lowest_scheduled(Scheduler_Context *context, Scheduler_Node *filter_base)
CPU related variables and a CPU_Control to implement BFS.
bool _Scheduler_strong_APA_Ask_for_help(const Scheduler_Control *scheduler, Thread_Control *the_thread, Scheduler_Node *node)
Asks for help.
static void _Scheduler_strong_APA_Extract_from_scheduled(Scheduler_Context *context, Scheduler_Node *node_to_extract)
static void _Scheduler_strong_APA_Allocate_processor(Scheduler_Context *context, Scheduler_Node *scheduled_base, Scheduler_Node *victim_base, Per_CPU_Control *victim_cpu)
void _Scheduler_strong_APA_Node_initialize(const Scheduler_Control *scheduler, Scheduler_Node *node, Thread_Control *the_thread, Priority_Control priority)
Initializes the node with the given priority.
static void _Scheduler_strong_APA_Do_update(Scheduler_Context *context, Scheduler_Node *node, Priority_Control new_priority)
bool visited
Whether or not this cpu has been added to the queue (visited in BFS).
static void _Scheduler_strong_APA_Register_idle(Scheduler_Context *context, Scheduler_Node *idle_base, Per_CPU_Control *cpu)
Scheduler_SMP_Node Base
SMP scheduler node.
static bool _Scheduler_strong_APA_Do_ask_for_help(Scheduler_Context *context, Thread_Control *the_thread, Scheduler_Node *node)
static void _Scheduler_strong_APA_Insert_ready(Scheduler_Context *context, Scheduler_Node *node_base, Priority_Control insert_priority)
Thread_Control * _Scheduler_strong_APA_Remove_processor(const Scheduler_Control *scheduler, Per_CPU_Control *cpu)
Removes an idle thread from the given cpu.
static bool _Scheduler_strong_APA_Has_ready(Scheduler_Context *context)
void _Scheduler_strong_APA_Withdraw_node(const Scheduler_Control *scheduler, Thread_Control *the_thread, Scheduler_Node *node, Thread_Scheduler_state next_state)
Withdraws the node.
static bool _Scheduler_strong_APA_Do_enqueue(Scheduler_Context *context, Scheduler_Node *lowest_reachable, Scheduler_Node *node, Priority_Control insert_priority, Per_CPU_Control *cpu_to_preempt)
static Scheduler_Node * _Scheduler_strong_APA_Find_highest_ready(Scheduler_strong_APA_Context *self, uint32_t front, uint32_t rear)
static void _Scheduler_strong_APA_Move_from_ready_to_scheduled(Scheduler_Context *context, Scheduler_Node *ready_to_scheduled)
Per_CPU_Control * cpu_to_preempt
CPU that this node would preempt in the backtracking part of _Scheduler_strong_APA_Get_highest_ready ...
void _Scheduler_strong_APA_Start_idle(const Scheduler_Control *scheduler, Thread_Control *idle, Per_CPU_Control *cpu)
Starts an idle thread.
Scheduler context and node definition for Strong APA scheduler.
void _Scheduler_strong_APA_Update_priority(const Scheduler_Control *scheduler, Thread_Control *thread, Scheduler_Node *node)
Updates the priority of the node.
Processor_mask Affinity
The associated affinity set of this node.
static void _Scheduler_strong_APA_Extract_from_ready(Scheduler_Context *context, Scheduler_Node *node_to_extract)
Scheduler node specialization for Strong APA schedulers.
#define STRONG_SCHEDULER_NODE_OF_CHAIN(node)
void _Scheduler_strong_APA_Block(const Scheduler_Control *scheduler, Thread_Control *thread, Scheduler_Node *node)
Blocks the thread.
static Scheduler_Node * _Scheduler_strong_APA_Get_scheduled(const Scheduler_strong_APA_Context *self, const Per_CPU_Control *cpu)
void _Scheduler_strong_APA_Initialize(const Scheduler_Control *scheduler)
Initializes the scheduler.
static Scheduler_strong_APA_Node * _Scheduler_strong_APA_Node_downcast(Scheduler_Node *node)
static void _Scheduler_strong_APA_Move_from_scheduled_to_ready(Scheduler_Context *context, Scheduler_Node *scheduled_to_ready)
static Scheduler_strong_APA_Context * _Scheduler_strong_APA_Get_self(Scheduler_Context *context)
static bool _Scheduler_strong_APA_Enqueue(Scheduler_Context *context, Scheduler_Node *node, Priority_Control insert_priority)
bool _Scheduler_strong_APA_Set_affinity(const Scheduler_Control *scheduler, Thread_Control *thread, Scheduler_Node *node_base, const Processor_mask *affinity)
Sets the affinity .
void _Scheduler_strong_APA_Unblock(const Scheduler_Control *scheduler, Thread_Control *thread, Scheduler_Node *node)
Unblocks the thread.
static bool _Scheduler_strong_APA_Enqueue_scheduled(Scheduler_Context *context, Scheduler_Node *node, Priority_Control insert_priority)
static Scheduler_Node * _Scheduler_strong_APA_Get_highest_ready(Scheduler_Context *context, Scheduler_Node *filter)
void _Scheduler_strong_APA_Reconsider_help_request(const Scheduler_Control *scheduler, Thread_Control *the_thread, Scheduler_Node *node)
Reconsiders help request.
void _Scheduler_strong_APA_Yield(const Scheduler_Control *scheduler, Thread_Control *thread, Scheduler_Node *node)
Performs a yield operation.