Skip to content

Commit

Permalink
irq: All irq_cpu_locked is called in the critical_section, and the pa…
Browse files Browse the repository at this point in the history
…rameter is the current cpu id.

so it must return false, We can safely delete.

Signed-off-by: hujun5 <[email protected]>
  • Loading branch information
hujun260 committed Apr 10, 2024
1 parent 729e9fc commit debdfa2
Show file tree
Hide file tree
Showing 6 changed files with 5 additions and 108 deletions.
27 changes: 0 additions & 27 deletions sched/irq/irq.h
Original file line number Diff line number Diff line change
Expand Up @@ -150,33 +150,6 @@ void irq_initialize(void);

int irq_unexpected_isr(int irq, FAR void *context, FAR void *arg);

/****************************************************************************
* Name: irq_cpu_locked
*
* Description:
* Test if the IRQ lock set OR if this CPU holds the IRQ lock
* There is an interaction with pre-emption controls and IRQ locking:
* Even if the pre-emption is enabled, tasks will be forced to pend if
* the IRQ lock is also set UNLESS the CPU starting the task is the
* holder of the IRQ lock.
*
* Input Parameters:
* rtcb - Points to the blocked TCB that is ready-to-run
*
* Returned Value:
* true - IRQs are locked by a different CPU.
* false - IRQs are unlocked OR if they are locked BUT this CPU
* is the holder of the lock.
*
* Warning: This values are volatile at only valid at the instance that
* the CPU set was queried.
*
****************************************************************************/

#ifdef CONFIG_SMP
bool irq_cpu_locked(int cpu);
#endif

/****************************************************************************
* Name: irq_foreach
*
Expand Down
74 changes: 0 additions & 74 deletions sched/irq/irq_csection.c
Original file line number Diff line number Diff line change
Expand Up @@ -637,80 +637,6 @@ void leave_critical_section(irqstate_t flags)
}
#endif

/****************************************************************************
* Name: irq_cpu_locked
*
* Description:
* Test if the IRQ lock set OR if this CPU holds the IRQ lock
* There is an interaction with pre-emption controls and IRQ locking:
* Even if the pre-emption is enabled, tasks will be forced to pend if
* the IRQ lock is also set UNLESS the CPU starting the task is the
* holder of the IRQ lock.
*
* Input Parameters:
* cpu - Points to which cpu
*
* Returned Value:
* true - IRQs are locked by a different CPU.
* false - IRQs are unlocked OR if they are locked BUT this CPU
* is the holder of the lock.
*
* Warning: This values are volatile at only valid at the instance that
* the CPU set was queried.
*
****************************************************************************/

#ifdef CONFIG_SMP
bool irq_cpu_locked(int cpu)
{
cpu_set_t irqset;

/* g_cpu_irqset is not valid in early phases of initialization */

if (nxsched_get_initstate() < OSINIT_OSREADY)
{
/* We are still single threaded. In either state of g_cpu_irqlock,
* the correct return value should always be false.
*/

return false;
}

/* Test if g_cpu_irqlock is locked. We don't really need to use check
* g_cpu_irqlock to do this, we can use the g_cpu_set.
*
* Sample the g_cpu_irqset once. That is an atomic operation. All
* subsequent operations will operate on the sampled cpu set.
*/

irqset = (cpu_set_t)g_cpu_irqset;
if (irqset != 0)
{
/* Some CPU holds the lock. So g_cpu_irqlock should be locked.
* Return false if the 'cpu' is the holder of the lock; return
* true if g_cpu_irqlock is locked, but this CPU is not the
* holder of the lock.
*/

return ((irqset & (1 << cpu)) == 0);
}

/* No CPU holds the lock */

else
{
/* In this case g_cpu_irqlock should be unlocked. However, if
* the lock was established in the interrupt handler AND there are
* no bits set in g_cpu_irqset, that probably means only that
* critical section was established from an interrupt handler.
* Return false in either case.
*/

return false;
}
}
#endif

/****************************************************************************
* Name: restore_critical_section
*
Expand Down
2 changes: 1 addition & 1 deletion sched/sched/sched_addreadytorun.c
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,7 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb)
*/

me = this_cpu();
if ((nxsched_islocked_global() || irq_cpu_locked(me)) &&
if ((nxsched_islocked_global()) &&
task_state != TSTATE_TASK_ASSIGNED)
{
/* Add the new ready-to-run task to the g_pendingtasks task list for
Expand Down
6 changes: 2 additions & 4 deletions sched/sched/sched_mergepending.c
Original file line number Diff line number Diff line change
Expand Up @@ -190,16 +190,14 @@ bool nxsched_merge_pending(void)
FAR struct tcb_s *tcb;
bool ret = false;
int cpu;
int me;

/* Remove and process every TCB in the g_pendingtasks list.
*
* Do nothing if (1) pre-emption is still disabled (by any CPU), or (2) if
* some CPU other than this one is in a critical section.
*/

me = this_cpu();
if (!nxsched_islocked_global() && !irq_cpu_locked(me))
if (!nxsched_islocked_global())
{
/* Find the CPU that is executing the lowest priority task */

Expand Down Expand Up @@ -237,7 +235,7 @@ bool nxsched_merge_pending(void)
* Check if that happened.
*/

if (nxsched_islocked_global() || irq_cpu_locked(me))
if (nxsched_islocked_global())
{
/* Yes.. then we may have incorrectly placed some TCBs in the
* g_readytorun list (unlikely, but possible). We will have to
Expand Down
2 changes: 1 addition & 1 deletion sched/sched/sched_setpriority.c
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ static FAR struct tcb_s *nxsched_nexttcb(FAR struct tcb_s *tcb)
* then use the 'nxttcb' which will probably be the IDLE thread.
*/

if (!nxsched_islocked_global() && !irq_cpu_locked(this_cpu()))
if (!nxsched_islocked_global())
{
/* Search for the highest priority task that can run on tcb->cpu. */

Expand Down
2 changes: 1 addition & 1 deletion sched/sched/sched_unlock.c
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ int sched_unlock(void)
* BEFORE it clears IRQ lock.
*/

if (!nxsched_islocked_global() && !irq_cpu_locked(cpu) &&
if (!nxsched_islocked_global() &&
list_pendingtasks()->head != NULL)
{
if (nxsched_merge_pending())
Expand Down

0 comments on commit debdfa2

Please sign in to comment.