Skip to content

Commit

Permalink
sched: remove csection and reduce the interrupt disabling time in sch…
Browse files Browse the repository at this point in the history
…ed_[un]lock

reason:
1 Accelerated the implementation of sched_lock, remove enter_critical_section in sched_lock and
only enter_critical_section when task scheduling is required.
2 we add sched_lock_wo_note/sched_unlock_wo_note and it does not perform instrumentation logic

Signed-off-by: hujun5 <[email protected]>
  • Loading branch information
hujun260 committed Jan 22, 2025
1 parent 57188e6 commit 977ad95
Show file tree
Hide file tree
Showing 7 changed files with 58 additions and 302 deletions.
14 changes: 2 additions & 12 deletions arch/xtensa/src/esp32/esp32_ble_adapter.c
Original file line number Diff line number Diff line change
Expand Up @@ -2353,12 +2353,7 @@ static int32_t esp_task_create_pinned_to_core(void *entry,
DEBUGASSERT(task_handle != NULL);

#ifdef CONFIG_SMP
ret = sched_lock();
if (ret)
{
wlerr("Failed to lock scheduler before creating pinned thread\n");
return false;
}
sched_lock();
#endif

pid = kthread_create(name, prio, stack_depth, entry,
Expand Down Expand Up @@ -2390,12 +2385,7 @@ static int32_t esp_task_create_pinned_to_core(void *entry,
}

#ifdef CONFIG_SMP
ret = sched_unlock();
if (ret)
{
wlerr("Failed to unlock scheduler after creating pinned thread\n");
return false;
}
sched_unlock();
#endif

return pid > 0;
Expand Down
1 change: 1 addition & 0 deletions include/nuttx/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,7 @@
#define TCB_FLAG_JOIN_COMPLETED (1 << 13) /* Bit 13: Pthread join completed */
#define TCB_FLAG_FREE_TCB (1 << 14) /* Bit 14: Free tcb after exit */
#define TCB_FLAG_SIGDELIVER (1 << 15) /* Bit 15: Deliver pending signals */
#define TCB_FLAG_PREEMPT_SCHED (1 << 16) /* Bit 16: tcb is PREEMPT_SCHED */

/* Values for struct task_group tg_flags */

Expand Down
4 changes: 2 additions & 2 deletions include/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -265,8 +265,8 @@ int sched_cpucount(FAR const cpu_set_t *set);

/* Task Switching Interfaces (non-standard) */

int sched_lock(void);
int sched_unlock(void);
void sched_lock(void);
void sched_unlock(void);
int sched_lockcount(void);

/* Queries */
Expand Down
2 changes: 2 additions & 0 deletions sched/sched/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -415,6 +415,8 @@ void nxsched_update_critmon(FAR struct tcb_s *tcb);
#if CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0
void nxsched_critmon_preemption(FAR struct tcb_s *tcb, bool state,
FAR void *caller);
#else
# define nxsched_critmon_preemption(t, s, c)
#endif

#if CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION >= 0
Expand Down
105 changes: 8 additions & 97 deletions sched/sched/sched_lock.c
Original file line number Diff line number Diff line change
Expand Up @@ -64,112 +64,23 @@
*
****************************************************************************/

#ifdef CONFIG_SMP

int sched_lock(void)
void sched_lock(void)
{
FAR struct tcb_s *rtcb;

/* If the CPU supports suppression of interprocessor interrupts, then
* simple disabling interrupts will provide sufficient protection for
* the following operation.
*/

rtcb = this_task();

/* Check for some special cases: (1) rtcb may be NULL only during early
* boot-up phases, and (2) sched_lock() should have no effect if called
* from the interrupt level.
*/

if (rtcb != NULL && !up_interrupt_context())
{
irqstate_t flags;

/* Catch attempts to increment the lockcount beyond the range of the
* integer type.
*/

DEBUGASSERT(rtcb->lockcount < MAX_LOCK_COUNT);

flags = enter_critical_section();

/* A counter is used to support locking. This allows nested lock
* operations on this thread
*/

rtcb->lockcount++;

/* Check if we just acquired the lock */

if (rtcb->lockcount == 1)
{
/* Note that we have pre-emption locked */

#if CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0
nxsched_critmon_preemption(rtcb, true, return_address(0));
#endif
#ifdef CONFIG_SCHED_INSTRUMENTATION_PREEMPTION
sched_note_preemption(rtcb, true);
#endif
}

/* Move any tasks in the ready-to-run list to the pending task list
* where they will not be available to run until the scheduler is
* unlocked and nxsched_merge_pending() is called.
*/

nxsched_merge_prioritized(list_readytorun(),
list_pendingtasks(),
TSTATE_TASK_PENDING);

leave_critical_section(flags);
}

return OK;
}

#else /* CONFIG_SMP */

int sched_lock(void)
{
FAR struct tcb_s *rtcb = this_task();

/* Check for some special cases: (1) rtcb may be NULL only during early
* boot-up phases, and (2) sched_lock() should have no effect if called
* from the interrupt level.
*/

if (rtcb != NULL && !up_interrupt_context())
if (!up_interrupt_context())
{
/* Catch attempts to increment the lockcount beyond the range of the
* integer type.
*/

DEBUGASSERT(rtcb->lockcount < MAX_LOCK_COUNT);

/* A counter is used to support locking. This allows nested lock
* operations on this thread (on any CPU)
*/

rtcb->lockcount++;

/* Check if we just acquired the lock */

if (rtcb->lockcount == 1)
rtcb = this_task();
if (rtcb != NULL && rtcb->lockcount++ == 0)
{
/* Note that we have pre-emption locked */
#if (CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0) || \
defined(CONFIG_SCHED_INSTRUMENTATION_PREEMPTION)
irqstate_t flags = enter_critical_section_wo_note();

#if CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0
nxsched_critmon_preemption(rtcb, true, return_address(0));
#endif
#ifdef CONFIG_SCHED_INSTRUMENTATION_PREEMPTION
sched_note_preemption(rtcb, true);
leave_critical_section_wo_note(flags);
#endif
}
}

return OK;
}

#endif /* CONFIG_SMP */
Loading

0 comments on commit 977ad95

Please sign in to comment.