diff --git a/include/nuttx/queue.h b/include/nuttx/queue.h index 04bda577ee7c8..76b25d6306c50 100644 --- a/include/nuttx/queue.h +++ b/include/nuttx/queue.h @@ -107,6 +107,17 @@ } \ while (0) +#define dq_addfirst_notempty(p, q) \ + do \ + { \ + FAR dq_entry_t *tmp_node = (p); \ + tmp_node->blink = NULL; \ + tmp_node->flink = (q)->head; \ + (q)->head->blink = tmp_node; \ + (q)->head = tmp_node; \ + } \ + while (0) + #define sq_addlast(p, q) \ do \ { \ diff --git a/sched/sched/sched_addreadytorun.c b/sched/sched/sched_addreadytorun.c index c43c627021f60..50c0d133876ed 100644 --- a/sched/sched/sched_addreadytorun.c +++ b/sched/sched/sched_addreadytorun.c @@ -152,30 +152,14 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb) bool nxsched_add_readytorun(FAR struct tcb_s *btcb) { FAR struct tcb_s *rtcb; + FAR struct tcb_s *headtcb; FAR dq_queue_t *tasklist; bool doswitch; int task_state; int cpu; int me; - /* Check if the blocked TCB is locked to this CPU */ - - if ((btcb->flags & TCB_FLAG_CPU_LOCKED) != 0) - { - /* Yes.. that is the CPU we must use */ - - task_state = TSTATE_TASK_ASSIGNED; - cpu = btcb->cpu; - } - else - { - /* Otherwise, find the CPU that is executing the lowest priority task - * (possibly its IDLE task). - */ - - task_state = TSTATE_TASK_READYTORUN; - cpu = nxsched_select_cpu(btcb->affinity); - } + cpu = nxsched_select_cpu(btcb->affinity); /* Get the task currently running on the CPU (may be the IDLE task) */ @@ -191,6 +175,10 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb) { task_state = TSTATE_TASK_RUNNING; } + else + { + task_state = TSTATE_TASK_READYTORUN; + } /* If the selected state is TSTATE_TASK_RUNNING, then we would like to * start running the task. Be we cannot do that if pre-emption is @@ -205,8 +193,7 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb) * situation. */ - if ((nxsched_islocked_global()) && - task_state != TSTATE_TASK_ASSIGNED) + if (nxsched_islocked_global()) { /* Add the new ready-to-run task to the g_pendingtasks task list for * now. @@ -231,7 +218,7 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb) btcb->task_state = TSTATE_TASK_READYTORUN; doswitch = false; } - else /* (task_state == TSTATE_TASK_ASSIGNED || task_state == TSTATE_TASK_RUNNING) */ + else /* (task_state == TSTATE_TASK_RUNNING) */ { /* If we are modifying some assigned task list other than our own, we * will need to stop that CPU. @@ -243,109 +230,32 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb) DEBUGVERIFY(up_cpu_pause(cpu)); } - /* Add the task to the list corresponding to the selected state - * and check if a context switch will occur - */ + tasklist = &g_assignedtasks[cpu]; - tasklist = list_assignedtasks(cpu); - doswitch = nxsched_add_prioritized(btcb, tasklist); + /* Change "head" from TSTATE_TASK_RUNNING to TSTATE_TASK_ASSIGNED */ - /* If the selected task list was the g_assignedtasks[] list and if the - * new tasks is the highest priority (RUNNING) task, then a context - * switch will occur. + headtcb = (FAR struct tcb_s *)tasklist->head; + DEBUGASSERT(headtcb->task_state = TSTATE_TASK_RUNNING); + headtcb->task_state = TSTATE_TASK_ASSIGNED; + + /* Add btcb to the head of the g_assignedtasks + * task list and mark it as running */ - if (doswitch) - { - FAR struct tcb_s *next; - - /* The new btcb was added at the head of the ready-to-run list. It - * is now the new active task! - */ - - /* Assign the CPU and set the running state */ - - DEBUGASSERT(task_state == TSTATE_TASK_RUNNING); - - btcb->cpu = cpu; - btcb->task_state = TSTATE_TASK_RUNNING; - - /* Adjust global pre-emption controls. If the lockcount is - * greater than zero, then this task/this CPU holds the scheduler - * lock. - */ - - if (btcb->lockcount > 0) - { - g_cpu_lockset |= (1 << cpu); - } - else - { - g_cpu_lockset &= ~(1 << cpu); - } - - /* NOTE: If the task runs on another CPU(cpu), adjusting global IRQ - * controls will be done in the pause handler on the new CPU(cpu). - * If the task is scheduled on this CPU(me), do nothing because - * this CPU already has a critical section - */ - - /* If the following task is not locked to this CPU, then it must - * be moved to the g_readytorun list. Since it cannot be at the - * head of the list, we can do this without invoking any heavy - * lifting machinery. - */ - - DEBUGASSERT(btcb->flink != NULL); - next = btcb->flink; - - if ((next->flags & TCB_FLAG_CPU_LOCKED) != 0) - { - DEBUGASSERT(next->cpu == cpu); - next->task_state = TSTATE_TASK_ASSIGNED; - } - else - { - /* Remove the task from the assigned task list */ - - dq_rem((FAR dq_entry_t *)next, tasklist); - - /* Add the task to the g_readytorun or to the g_pendingtasks - * list. NOTE: That the above operations may cause the - * scheduler to become locked. It may be assigned to a - * different CPU the next time that it runs. - */ - - if (nxsched_islocked_global()) - { - next->task_state = TSTATE_TASK_PENDING; - tasklist = list_pendingtasks(); - } - else - { - next->task_state = TSTATE_TASK_READYTORUN; - tasklist = list_readytorun(); - } - - nxsched_add_prioritized(next, tasklist); - } - } - else + dq_addfirst_notempty((FAR dq_entry_t *)btcb, tasklist); + + DEBUGASSERT(task_state == TSTATE_TASK_RUNNING); + btcb->cpu = cpu; + btcb->task_state = TSTATE_TASK_RUNNING; + + doswitch = true; + + /* Resume scheduling lock */ + + DEBUGASSERT(g_cpu_lockset == 0); + if (btcb->lockcount > 0) { - /* No context switch. Assign the CPU and set the assigned state. - * - * REVISIT: I have seen this assertion fire. Apparently another - * CPU may add another, higher priority task to the same - * g_assignedtasks[] list sometime after nxsched_select_cpu() was - * called above, leaving this TCB in the wrong task list if - * task_state is TSTATE_TASK_ASSIGNED). - */ - - DEBUGASSERT(task_state == TSTATE_TASK_ASSIGNED); - - btcb->cpu = cpu; - btcb->task_state = TSTATE_TASK_ASSIGNED; - doswitch = false; + g_cpu_lockset |= (1 << cpu); } /* All done, restart the other CPU (if it was paused). */ diff --git a/sched/sched/sched_removereadytorun.c b/sched/sched/sched_removereadytorun.c index d4f7d8ac47ca0..4f4369171ee0c 100644 --- a/sched/sched/sched_removereadytorun.c +++ b/sched/sched/sched_removereadytorun.c @@ -33,6 +33,31 @@ #include "irq/irq.h" #include "sched/sched.h" +/**************************************************************************** + * Pre-processor Definitions + ****************************************************************************/ + +#define dq_rem_head(p, q) \ + do \ + { \ + FAR dq_entry_t *tmp_node = (p); \ + FAR dq_entry_t *tmp_next = tmp_node->flink; \ + (q)->head = tmp_next; \ + tmp_next->blink = NULL; \ + tmp_node->flink = NULL; \ + } \ + while (0) + +#define dq_rem_mid(p) \ + do \ + { \ + FAR dq_entry_t *tmp_prev = (FAR dq_entry_t *)p->blink; \ + FAR dq_entry_t *tmp_next = (FAR dq_entry_t *)p->flink; \ + tmp_prev->flink = tmp_next; \ + tmp_next->blink = tmp_prev; \ + } \ + while (0) + /**************************************************************************** * Public Functions ****************************************************************************/ @@ -188,7 +213,48 @@ bool nxsched_remove_readytorun(FAR struct tcb_s *rtcb, bool merge) * or the g_assignedtasks[cpu] list. */ - dq_rem((FAR dq_entry_t *)rtcb, tasklist); + dq_rem_head((FAR dq_entry_t *)rtcb, tasklist); + + /* Find the highest priority non-running tasks in the g_assignedtasks + * list of other CPUs, and also non-idle tasks, place them in the + * g_readytorun list. so as to find the task with the highest priority, + * globally + */ + + for (int i = 0; i < CONFIG_SMP_NCPUS; i++) + { + if (i == cpu) + { + /* The highest priority task of the current + * CPU has been found, which is nxttcb. + */ + + continue; + } + + for (rtrtcb = (FAR struct tcb_s *)g_assignedtasks[i].head; + !is_idle_task(rtrtcb); rtrtcb = rtrtcb->flink) + { + if (rtrtcb->task_state != TSTATE_TASK_RUNNING && + CPU_ISSET(cpu, &rtrtcb->affinity)) + { + /* We have found the task with the highest priority whose + * CPU index is i. Since this task must be between the two + * tasks, we can use the dq_rem_mid macro to delete it. + */ + + dq_rem_mid(rtrtcb); + rtrtcb->task_state = TSTATE_TASK_READYTORUN; + + /* Add rtrtcb to g_readytorun to find + * the task with the highest global priority + */ + + nxsched_add_prioritized(rtrtcb, &g_readytorun); + break; + } + } + } /* Which task will go at the head of the list? It will be either the * next tcb in the assigned task list (nxttcb) or a TCB in the @@ -219,7 +285,7 @@ bool nxsched_remove_readytorun(FAR struct tcb_s *rtcb, bool merge) */ dq_rem((FAR dq_entry_t *)rtrtcb, list_readytorun()); - dq_addfirst((FAR dq_entry_t *)rtrtcb, tasklist); + dq_addfirst_notempty((FAR dq_entry_t *)rtrtcb, tasklist); rtrtcb->cpu = cpu; nxttcb = rtrtcb;