diff --git a/arch/xtensa/src/esp32/esp32_ble_adapter.c b/arch/xtensa/src/esp32/esp32_ble_adapter.c index 253a407b4d47f..c6122c3ce87a5 100644 --- a/arch/xtensa/src/esp32/esp32_ble_adapter.c +++ b/arch/xtensa/src/esp32/esp32_ble_adapter.c @@ -2353,12 +2353,7 @@ static int32_t esp_task_create_pinned_to_core(void *entry, DEBUGASSERT(task_handle != NULL); #ifdef CONFIG_SMP - ret = sched_lock(); - if (ret) - { - wlerr("Failed to lock scheduler before creating pinned thread\n"); - return false; - } + sched_lock(); #endif pid = kthread_create(name, prio, stack_depth, entry, @@ -2390,12 +2385,7 @@ static int32_t esp_task_create_pinned_to_core(void *entry, } #ifdef CONFIG_SMP - ret = sched_unlock(); - if (ret) - { - wlerr("Failed to unlock scheduler after creating pinned thread\n"); - return false; - } + sched_unlock(); #endif return pid > 0; diff --git a/drivers/note/note_driver.c b/drivers/note/note_driver.c index a6be1ca29d8f2..8bef2b1575131 100644 --- a/drivers/note/note_driver.c +++ b/drivers/note/note_driver.c @@ -1841,7 +1841,7 @@ void sched_note_filter_mode(FAR struct note_filter_named_mode_s *oldm, irqstate_t irq_mask; FAR struct note_driver_s **driver; - irq_mask = spin_lock_irqsave_wo_note(&g_note_lock); + irq_mask = raw_spin_lock_irqsave(&g_note_lock); if (oldm != NULL) { @@ -1877,7 +1877,7 @@ void sched_note_filter_mode(FAR struct note_filter_named_mode_s *oldm, } } - spin_unlock_irqrestore_wo_note(&g_note_lock, irq_mask); + raw_spin_unlock_irqrestore(&g_note_lock, irq_mask); } /**************************************************************************** @@ -1907,7 +1907,7 @@ void sched_note_filter_syscall(FAR struct note_filter_named_syscall_s *oldf, irqstate_t irq_mask; FAR struct note_driver_s **driver; - irq_mask = spin_lock_irqsave_wo_note(&g_note_lock); + irq_mask = raw_spin_lock_irqsave(&g_note_lock); if (oldf != NULL) { @@ -1943,7 +1943,7 @@ void sched_note_filter_syscall(FAR struct note_filter_named_syscall_s *oldf, } } - spin_unlock_irqrestore_wo_note(&g_note_lock, irq_mask); + raw_spin_unlock_irqrestore(&g_note_lock, irq_mask); } #endif @@ -1974,7 +1974,7 @@ void sched_note_filter_irq(FAR struct note_filter_named_irq_s *oldf, irqstate_t irq_mask; FAR struct note_driver_s **driver; - irq_mask = spin_lock_irqsave_wo_note(&g_note_lock); + irq_mask = raw_spin_lock_irqsave(&g_note_lock); if (oldf != NULL) { @@ -2010,7 +2010,7 @@ void sched_note_filter_irq(FAR struct note_filter_named_irq_s *oldf, } } - spin_unlock_irqrestore_wo_note(&g_note_lock, irq_mask); + raw_spin_unlock_irqrestore(&g_note_lock, irq_mask); } #endif @@ -2041,7 +2041,7 @@ void sched_note_filter_tag(FAR struct note_filter_named_tag_s *oldf, FAR struct note_driver_s **driver; irqstate_t irq_mask; - irq_mask = spin_lock_irqsave_wo_note(&g_note_lock); + irq_mask = raw_spin_lock_irqsave(&g_note_lock); if (oldf != NULL) { @@ -2077,7 +2077,7 @@ void sched_note_filter_tag(FAR struct note_filter_named_tag_s *oldf, } } - spin_unlock_irqrestore_wo_note(&g_note_lock, irq_mask); + raw_spin_unlock_irqrestore(&g_note_lock, irq_mask); } #endif diff --git a/drivers/note/noteram_driver.c b/drivers/note/noteram_driver.c index d41d74a2d423f..a099558341a2e 100644 --- a/drivers/note/noteram_driver.c +++ b/drivers/note/noteram_driver.c @@ -467,9 +467,9 @@ static ssize_t noteram_read(FAR struct file *filep, FAR char *buffer, if (ctx->mode == NOTERAM_MODE_READ_BINARY) { - flags = spin_lock_irqsave_wo_note(&drv->lock); + flags = raw_spin_lock_irqsave(&drv->lock); ret = noteram_get(drv, (FAR uint8_t *)buffer, buflen); - spin_unlock_irqrestore_wo_note(&drv->lock, flags); + raw_spin_unlock_irqrestore(&drv->lock, flags); } else { @@ -481,9 +481,9 @@ static ssize_t noteram_read(FAR struct file *filep, FAR char *buffer, /* Get the next note (removing it from the buffer) */ - flags = spin_lock_irqsave_wo_note(&drv->lock); + flags = raw_spin_lock_irqsave(&drv->lock); ret = noteram_get(drv, note, sizeof(note)); - spin_unlock_irqrestore_wo_note(&drv->lock, flags); + raw_spin_unlock_irqrestore(&drv->lock, flags); if (ret <= 0) { return ret; @@ -508,7 +508,7 @@ static int noteram_ioctl(FAR struct file *filep, int cmd, unsigned long arg) { int ret = -ENOSYS; FAR struct noteram_driver_s *drv = filep->f_inode->i_private; - irqstate_t flags = spin_lock_irqsave_wo_note(&drv->lock); + irqstate_t flags = raw_spin_lock_irqsave(&drv->lock); /* Handle the ioctl commands */ @@ -600,7 +600,7 @@ static int noteram_ioctl(FAR struct file *filep, int cmd, unsigned long arg) break; } - spin_unlock_irqrestore_wo_note(&drv->lock, flags); + raw_spin_unlock_irqrestore(&drv->lock, flags); return ret; } @@ -622,7 +622,7 @@ static int noteram_poll(FAR struct file *filep, FAR struct pollfd *fds, DEBUGASSERT(inode != NULL && inode->i_private != NULL); drv = inode->i_private; - flags = spin_lock_irqsave_wo_note(&drv->lock); + flags = raw_spin_lock_irqsave(&drv->lock); /* Ignore waits that do not include POLLIN */ @@ -655,7 +655,7 @@ static int noteram_poll(FAR struct file *filep, FAR struct pollfd *fds, if (noteram_unread_length(drv) > 0) { - spin_unlock_irqrestore_wo_note(&drv->lock, flags); + raw_spin_unlock_irqrestore(&drv->lock, flags); poll_notify(&drv->pfd, 1, POLLIN); return ret; } @@ -666,7 +666,7 @@ static int noteram_poll(FAR struct file *filep, FAR struct pollfd *fds, } errout: - spin_unlock_irqrestore_wo_note(&drv->lock, flags); + raw_spin_unlock_irqrestore(&drv->lock, flags); return ret; } @@ -698,11 +698,11 @@ static void noteram_add(FAR struct note_driver_s *driver, unsigned int space; irqstate_t flags; - flags = spin_lock_irqsave_wo_note(&drv->lock); + flags = raw_spin_lock_irqsave(&drv->lock); if (drv->ni_overwrite == NOTERAM_MODE_OVERWRITE_OVERFLOW) { - spin_unlock_irqrestore_wo_note(&drv->lock, flags); + raw_spin_unlock_irqrestore(&drv->lock, flags); return; } @@ -716,7 +716,7 @@ static void noteram_add(FAR struct note_driver_s *driver, /* Stop recording if not in overwrite mode */ drv->ni_overwrite = NOTERAM_MODE_OVERWRITE_OVERFLOW; - spin_unlock_irqrestore_wo_note(&drv->lock, flags); + raw_spin_unlock_irqrestore(&drv->lock, flags); return; } @@ -737,7 +737,7 @@ static void noteram_add(FAR struct note_driver_s *driver, memcpy(drv->ni_buffer + head, note, space); memcpy(drv->ni_buffer, buf + space, notelen - space); drv->ni_head = noteram_next(drv, head, NOTE_ALIGN(notelen)); - spin_unlock_irqrestore_wo_note(&drv->lock, flags); + raw_spin_unlock_irqrestore(&drv->lock, flags); poll_notify(&drv->pfd, 1, POLLIN); } diff --git a/drivers/note/noterpmsg_driver.c b/drivers/note/noterpmsg_driver.c index 2db84478f7c35..1d866198265dd 100644 --- a/drivers/note/noterpmsg_driver.c +++ b/drivers/note/noterpmsg_driver.c @@ -180,7 +180,7 @@ static bool noterpmsg_transfer(FAR struct noterpmsg_driver_s *drv, static void noterpmsg_work(FAR void *priv) { FAR struct noterpmsg_driver_s *drv = priv; - irqstate_t flags = spin_lock_irqsave_wo_note(&drv->lock); + irqstate_t flags = raw_spin_lock_irqsave(&drv->lock); if (!noterpmsg_transfer(drv, false)) { @@ -188,7 +188,7 @@ static void noterpmsg_work(FAR void *priv) NOTE_RPMSG_WORK_DELAY); } - spin_unlock_irqrestore_wo_note(&drv->lock, flags); + raw_spin_unlock_irqrestore(&drv->lock, flags); } static void noterpmsg_add(FAR struct note_driver_s *driver, @@ -199,7 +199,7 @@ static void noterpmsg_add(FAR struct note_driver_s *driver, irqstate_t flags; size_t space; - flags = spin_lock_irqsave_wo_note(&drv->lock); + flags = raw_spin_lock_irqsave(&drv->lock); space = CONFIG_DRIVERS_NOTERPMSG_BUFSIZE - noterpmsg_length(drv); if (space < notelen) @@ -236,7 +236,7 @@ static void noterpmsg_add(FAR struct note_driver_s *driver, NOTE_RPMSG_WORK_DELAY); } - spin_unlock_irqrestore_wo_note(&drv->lock, flags); + raw_spin_unlock_irqrestore(&drv->lock, flags); } static int noterpmsg_ept_cb(FAR struct rpmsg_endpoint *ept, diff --git a/drivers/segger/config/SEGGER_RTT_Conf.h b/drivers/segger/config/SEGGER_RTT_Conf.h index aa19a89390be8..bdcfc23c58e14 100644 --- a/drivers/segger/config/SEGGER_RTT_Conf.h +++ b/drivers/segger/config/SEGGER_RTT_Conf.h @@ -89,11 +89,11 @@ extern spinlock_t g_segger_lock; /* Lock RTT (nestable) (i.e. disable interrupts) */ -#define SEGGER_RTT_LOCK() irqstate_t __flags = spin_lock_irqsave_wo_note(&g_segger_lock) +#define SEGGER_RTT_LOCK() irqstate_t __flags = raw_spin_lock_irqsave(&g_segger_lock) /* Unlock RTT (nestable) (i.e. enable previous interrupt lock state) */ -#define SEGGER_RTT_UNLOCK() spin_unlock_irqrestore_wo_note(&g_segger_lock, __flags) +#define SEGGER_RTT_UNLOCK() raw_spin_unlock_irqrestore(&g_segger_lock, __flags) /* Disable RTT SEGGER_RTT_WriteSkipNoLock */ diff --git a/drivers/syslog/syslog_intbuffer.c b/drivers/syslog/syslog_intbuffer.c index 49ff74093922e..02adf2b4946f6 100644 --- a/drivers/syslog/syslog_intbuffer.c +++ b/drivers/syslog/syslog_intbuffer.c @@ -150,7 +150,7 @@ void syslog_add_intbuffer(FAR const char *buffer, size_t buflen) /* Disable concurrent modification from interrupt handling logic */ - flags = spin_lock_irqsave_wo_note(&g_syslog_intbuffer.splock); + flags = raw_spin_lock_irqsave(&g_syslog_intbuffer.splock); space = circbuf_space(&g_syslog_intbuffer.circ); @@ -172,7 +172,7 @@ void syslog_add_intbuffer(FAR const char *buffer, size_t buflen) buffer + space, buflen - space); } - spin_unlock_irqrestore_wo_note(&g_syslog_intbuffer.splock, flags); + raw_spin_unlock_irqrestore(&g_syslog_intbuffer.splock, flags); } /**************************************************************************** @@ -198,9 +198,9 @@ void syslog_flush_intbuffer(bool force) { irqstate_t flags; - flags = spin_lock_irqsave_wo_note(&g_syslog_intbuffer.splock); + flags = raw_spin_lock_irqsave(&g_syslog_intbuffer.splock); syslog_flush_internal(force, sizeof(g_syslog_intbuffer.buffer)); - spin_unlock_irqrestore_wo_note(&g_syslog_intbuffer.splock, flags); + raw_spin_unlock_irqrestore(&g_syslog_intbuffer.splock, flags); } #endif /* CONFIG_SYSLOG_INTBUFFER */ diff --git a/include/nuttx/irq.h b/include/nuttx/irq.h index ea4de3faf8ea8..6f2dbb6cb4361 100644 --- a/include/nuttx/irq.h +++ b/include/nuttx/irq.h @@ -80,7 +80,7 @@ do \ { \ g_cpu_irqset = 0; \ - spin_unlock_wo_note(&g_cpu_irqlock); \ + raw_spin_unlock(&g_cpu_irqlock); \ } \ while (0) #endif diff --git a/include/nuttx/sched.h b/include/nuttx/sched.h index f9b8635faffff..9846269e437cc 100644 --- a/include/nuttx/sched.h +++ b/include/nuttx/sched.h @@ -110,6 +110,7 @@ #define TCB_FLAG_JOIN_COMPLETED (1 << 13) /* Bit 13: Pthread join completed */ #define TCB_FLAG_FREE_TCB (1 << 14) /* Bit 14: Free tcb after exit */ #define TCB_FLAG_SIGDELIVER (1 << 15) /* Bit 15: Deliver pending signals */ +#define TCB_FLAG_PREEMPT_SCHED (1 << 16) /* Bit 16: tcb is PREEMPT_SCHED */ /* Values for struct task_group tg_flags */ diff --git a/include/nuttx/spinlock.h b/include/nuttx/spinlock.h index 01abb24fd3ec0..00190482c55c6 100644 --- a/include/nuttx/spinlock.h +++ b/include/nuttx/spinlock.h @@ -168,7 +168,7 @@ static inline spinlock_t up_testset(FAR volatile spinlock_t *lock) #define spin_lock_init(l) do { *(l) = SP_UNLOCKED; } while (0) /**************************************************************************** - * Name: spin_lock_wo_note + * Name: raw_spin_lock * * Description: * If this CPU does not already hold the spinlock, then loop until the @@ -190,7 +190,7 @@ static inline spinlock_t up_testset(FAR volatile spinlock_t *lock) ****************************************************************************/ #ifdef CONFIG_SPINLOCK -static inline_function void spin_lock_wo_note(FAR volatile spinlock_t *lock) +static inline_function void raw_spin_lock(FAR volatile spinlock_t *lock) { #ifdef CONFIG_TICKET_SPINLOCK int ticket = atomic_fetch_add(&lock->next, 1); @@ -233,22 +233,26 @@ static inline_function void spin_lock_wo_note(FAR volatile spinlock_t *lock) #ifdef CONFIG_SPINLOCK static inline_function void spin_lock(FAR volatile spinlock_t *lock) { + sched_lock(); + /* Notify that we are waiting for a spinlock */ sched_note_spinlock_lock(lock); /* Lock without trace note */ - spin_lock_wo_note(lock); + raw_spin_lock(lock); /* Notify that we have the spinlock */ sched_note_spinlock_locked(lock); } +#else +# define spin_lock(l) sched_lock() #endif /* CONFIG_SPINLOCK */ /**************************************************************************** - * Name: spin_trylock_wo_note + * Name: raw_spin_trylock * * Description: * Try once to lock the spinlock. Do not wait if the spinlock is already @@ -271,7 +275,7 @@ static inline_function void spin_lock(FAR volatile spinlock_t *lock) #ifdef CONFIG_SPINLOCK static inline_function bool -spin_trylock_wo_note(FAR volatile spinlock_t *lock) +raw_spin_trylock(FAR volatile spinlock_t *lock) { #ifdef CONFIG_TICKET_SPINLOCK if (!atomic_cmpxchg(&lock->next, &lock->owner, @@ -313,13 +317,15 @@ static inline_function bool spin_trylock(FAR volatile spinlock_t *lock) { bool locked; + sched_lock(); + /* Notify that we are waiting for a spinlock */ sched_note_spinlock_lock(lock); /* Try lock without trace note */ - locked = spin_trylock_wo_note(lock); + locked = raw_spin_trylock(lock); if (locked) { /* Notify that we have the spinlock */ @@ -331,14 +337,17 @@ static inline_function bool spin_trylock(FAR volatile spinlock_t *lock) /* Notify that we abort for a spinlock */ sched_note_spinlock_abort(lock); + sched_unlock(); } return locked; } +#else +# define spin_trylock(l) (sched_lock(), true) #endif /* CONFIG_SPINLOCK */ /**************************************************************************** - * Name: spin_unlock_wo_note + * Name: raw_spin_unlock * * Description: * Release one count on a non-reentrant spinlock. @@ -359,7 +368,7 @@ static inline_function bool spin_trylock(FAR volatile spinlock_t *lock) #ifdef CONFIG_SPINLOCK static inline_function void -spin_unlock_wo_note(FAR volatile spinlock_t *lock) +raw_spin_unlock(FAR volatile spinlock_t *lock) { UP_DMB(); #ifdef CONFIG_TICKET_SPINLOCK @@ -395,15 +404,19 @@ static inline_function void spin_unlock(FAR volatile spinlock_t *lock) { /* Unlock without trace note */ - spin_unlock_wo_note(lock); + raw_spin_unlock(lock); /* Notify that we are unlocking the spinlock */ sched_note_spinlock_unlock(lock); + + sched_unlock(); } # else -# define spin_unlock(l) do { *(l) = SP_UNLOCKED; } while (0) +# define spin_unlock(l) do { *(l) = SP_UNLOCKED; sched_unlock();} while (0) # endif +#else +# define spin_unlock(l) sched_unlock() #endif /* CONFIG_SPINLOCK */ /**************************************************************************** @@ -429,7 +442,7 @@ static inline_function void spin_unlock(FAR volatile spinlock_t *lock) #endif /**************************************************************************** - * Name: spin_lock_irqsave_wo_note + * Name: raw_spin_lock_irqsave * * Description: * This function is no trace version of spin_lock_irqsave() @@ -438,17 +451,17 @@ static inline_function void spin_unlock(FAR volatile spinlock_t *lock) #ifdef CONFIG_SPINLOCK static inline_function -irqstate_t spin_lock_irqsave_wo_note(FAR volatile spinlock_t *lock) +irqstate_t raw_spin_lock_irqsave(FAR volatile spinlock_t *lock) { irqstate_t flags; flags = up_irq_save(); - spin_lock_wo_note(lock); + raw_spin_lock(lock); return flags; } #else -# define spin_lock_irqsave_wo_note(l) ((void)(l), up_irq_save()) +# define raw_spin_lock_irqsave(l) ((void)(l), up_irq_save()) #endif /**************************************************************************** @@ -456,15 +469,15 @@ irqstate_t spin_lock_irqsave_wo_note(FAR volatile spinlock_t *lock) * * Description: * If SMP is enabled: - * Disable local interrupts and take the lock spinlock and return - * the interrupt state. + * Disable local interrupts, sched_lock and take the lock spinlock and + * return the interrupt state. * * NOTE: This API is very simple to protect data (e.g. H/W register * or internal data structure) in SMP mode. But do not use this API * with kernel APIs which suspend a caller thread. (e.g. nxsem_wait) * * If SMP is not enabled: - * This function is equivalent to up_irq_save(). + * This function is equivalent to up_irq_save() + sched_lock(). * * Input Parameters: * lock - Caller specific spinlock. not NULL. @@ -485,9 +498,8 @@ irqstate_t spin_lock_irqsave(FAR volatile spinlock_t *lock) sched_note_spinlock_lock(lock); - /* Lock without trace note */ - - flags = spin_lock_irqsave_wo_note(lock); + flags = raw_spin_lock_irqsave(lock); + sched_lock(); /* Notify that we have the spinlock */ @@ -496,11 +508,17 @@ irqstate_t spin_lock_irqsave(FAR volatile spinlock_t *lock) return flags; } #else -# define spin_lock_irqsave(l) ((void)(l), up_irq_save()) +static inline_function +irqstate_t spin_lock_irqsave(FAR volatile spinlock_t *lock) +{ + irqstate_t flags = up_irq_save(); + sched_lock(); + return flags; +} #endif /**************************************************************************** - * Name: spin_trylock_irqsave_wo_note + * Name: raw_spin_trylock_irqsave * * Description: * Try once to lock the spinlock. Do not wait if the spinlock is already @@ -523,14 +541,14 @@ irqstate_t spin_lock_irqsave(FAR volatile spinlock_t *lock) ****************************************************************************/ #ifdef CONFIG_SPINLOCK -# define spin_trylock_irqsave_wo_note(l, f) \ +# define raw_spin_trylock_irqsave(l, f) \ ({ \ f = up_irq_save(); \ - spin_trylock_wo_note(l) ? \ + raw_spin_trylock(l) ? \ true : ({ up_irq_restore(f); false; }); \ }) #else -# define spin_trylock_irqsave_wo_note(l, f) \ +# define raw_spin_trylock_irqsave(l, f) \ ({ \ (void)(l); \ f = up_irq_save(); \ @@ -570,12 +588,13 @@ irqstate_t spin_lock_irqsave(FAR volatile spinlock_t *lock) ({ \ (void)(l); \ f = up_irq_save(); \ + sched_lock(); \ true; \ }) #endif /* CONFIG_SPINLOCK */ /**************************************************************************** - * Name: spin_unlock_irqrestore_wo_note + * Name: raw_spin_unlock_irqrestore * * Description: * This function is no trace version of spin_unlock_irqrestore() @@ -584,15 +603,15 @@ irqstate_t spin_lock_irqsave(FAR volatile spinlock_t *lock) #ifdef CONFIG_SPINLOCK static inline_function -void spin_unlock_irqrestore_wo_note(FAR volatile spinlock_t *lock, - irqstate_t flags) +void raw_spin_unlock_irqrestore(FAR volatile spinlock_t *lock, + irqstate_t flags) { - spin_unlock_wo_note(lock); + raw_spin_unlock(lock); up_irq_restore(flags); } #else -# define spin_unlock_irqrestore_wo_note(l, f) ((void)(l), up_irq_restore(f)) +# define raw_spin_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f)) #endif /**************************************************************************** @@ -600,11 +619,11 @@ void spin_unlock_irqrestore_wo_note(FAR volatile spinlock_t *lock, * * Description: * If SMP is enabled: - * Release the lock and restore the interrupt state as it was prior - * to the previous call to spin_lock_irqsave(lock). + * Release the lock and restore the interrupt state, sched_unlock + * as it was prior to the previous call to spin_lock_irqsave(lock). * * If SMP is not enabled: - * This function is equivalent to up_irq_restore(). + * This function is equivalent to up_irq_restore() + sched_unlock(). * * Input Parameters: * lock - Caller specific spinlock. not NULL @@ -619,19 +638,20 @@ void spin_unlock_irqrestore_wo_note(FAR volatile spinlock_t *lock, #ifdef CONFIG_SPINLOCK static inline_function -void spin_unlock_irqrestore(FAR volatile spinlock_t *lock, - irqstate_t flags) +void spin_unlock_irqrestore(FAR volatile spinlock_t *lock, irqstate_t flags) { /* Unlock without trace note */ - spin_unlock_irqrestore_wo_note(lock, flags); + raw_spin_unlock_irqrestore(lock, flags); + + sched_unlock(); /* Notify that we are unlocking the spinlock */ sched_note_spinlock_unlock(lock); } #else -# define spin_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f)) +# define spin_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f), sched_unlock()) #endif #if defined(CONFIG_RW_SPINLOCK) @@ -680,6 +700,8 @@ void spin_unlock_irqrestore(FAR volatile spinlock_t *lock, static inline_function void read_lock(FAR volatile rwlock_t *lock) { + sched_lock(); + while (true) { int old = atomic_read(lock); @@ -724,12 +746,15 @@ static inline_function void read_lock(FAR volatile rwlock_t *lock) static inline_function bool read_trylock(FAR volatile rwlock_t *lock) { + sched_lock(); while (true) { int old = atomic_read(lock); if (old <= RW_SP_WRITE_LOCKED) { DEBUGASSERT(old == RW_SP_WRITE_LOCKED); + sched_unlock(); + return false; } else if (atomic_cmpxchg(lock, &old, old + 1)) @@ -767,6 +792,8 @@ static inline_function void read_unlock(FAR volatile rwlock_t *lock) atomic_fetch_sub(lock, 1); UP_DSB(); UP_SEV(); + + sched_unlock(); } /**************************************************************************** @@ -798,6 +825,7 @@ static inline_function void write_lock(FAR volatile rwlock_t *lock) { int zero = RW_SP_UNLOCKED; + sched_lock(); while (!atomic_cmpxchg(lock, &zero, RW_SP_WRITE_LOCKED)) { UP_DSB(); @@ -836,9 +864,11 @@ static inline_function bool write_trylock(FAR volatile rwlock_t *lock) { int zero = RW_SP_UNLOCKED; + sched_lock(); if (atomic_cmpxchg(lock, &zero, RW_SP_WRITE_LOCKED)) { UP_DMB(); + sched_unlock(); return true; } @@ -873,6 +903,7 @@ static inline_function void write_unlock(FAR volatile rwlock_t *lock) atomic_set(lock, RW_SP_UNLOCKED); UP_DSB(); UP_SEV(); + sched_unlock(); } /**************************************************************************** @@ -903,7 +934,15 @@ static inline_function void write_unlock(FAR volatile rwlock_t *lock) #ifdef CONFIG_SPINLOCK irqstate_t read_lock_irqsave(FAR rwlock_t *lock); #else -# define read_lock_irqsave(l) ((void)(l), up_irq_save()) +irqstate_t inline_function read_lock_irqsave(FAR rwlock_t *lock) +{ + irqstate_t ret; + + ret = up_irq_save(); + sched_lock(); + + return ret; +} #endif /**************************************************************************** @@ -932,7 +971,7 @@ irqstate_t read_lock_irqsave(FAR rwlock_t *lock); #ifdef CONFIG_SPINLOCK void read_unlock_irqrestore(FAR rwlock_t *lock, irqstate_t flags); #else -# define read_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f)) +# define read_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f), sched_unlock()) #endif /**************************************************************************** @@ -963,7 +1002,15 @@ void read_unlock_irqrestore(FAR rwlock_t *lock, irqstate_t flags); #ifdef CONFIG_SPINLOCK irqstate_t write_lock_irqsave(FAR rwlock_t *lock); #else -# define write_lock_irqsave(l) ((void)(l), up_irq_save()) +static inline_function write_lock_irqsave(FAR rwlock_t *lock) +{ + irqstate_t ret; + + ret = up_irq_save(); + sched_lock(); + + return ret; +} #endif /**************************************************************************** @@ -992,7 +1039,7 @@ irqstate_t write_lock_irqsave(FAR rwlock_t *lock); #ifdef CONFIG_SPINLOCK void write_unlock_irqrestore(FAR rwlock_t *lock, irqstate_t flags); #else -# define write_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f)) +# define write_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f), sched_unlock()) #endif #endif /* CONFIG_RW_SPINLOCK */ diff --git a/include/sched.h b/include/sched.h index 332dda4055054..9fa907af2f527 100644 --- a/include/sched.h +++ b/include/sched.h @@ -265,8 +265,8 @@ int sched_cpucount(FAR const cpu_set_t *set); /* Task Switching Interfaces (non-standard) */ -int sched_lock(void); -int sched_unlock(void); +void sched_lock(void); +void sched_unlock(void); int sched_lockcount(void); /* Queries */ diff --git a/libs/libc/machine/arch_atomic.c b/libs/libc/machine/arch_atomic.c index 8d7e7f48771ea..cb37673976ea6 100644 --- a/libs/libc/machine/arch_atomic.c +++ b/libs/libc/machine/arch_atomic.c @@ -46,11 +46,11 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED; void weak_function CONCATENATE(fn, n)(FAR volatile void *ptr, \ type value, int memorder) \ { \ - irqstate_t irqstate = spin_lock_irqsave_wo_note(&g_atomic_lock); \ + irqstate_t irqstate = raw_spin_lock_irqsave(&g_atomic_lock); \ \ *(FAR type *)ptr = value; \ \ - spin_unlock_irqrestore_wo_note(&g_atomic_lock, irqstate); \ + raw_spin_unlock_irqrestore(&g_atomic_lock, irqstate); \ } #define LOAD(fn, n, type) \ @@ -58,11 +58,11 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED; type weak_function CONCATENATE(fn, n)(FAR const volatile void *ptr, \ int memorder) \ { \ - irqstate_t irqstate = spin_lock_irqsave_wo_note(&g_atomic_lock); \ + irqstate_t irqstate = raw_spin_lock_irqsave(&g_atomic_lock); \ \ type ret = *(FAR type *)ptr; \ \ - spin_unlock_irqrestore_wo_note(&g_atomic_lock, irqstate); \ + raw_spin_unlock_irqrestore(&g_atomic_lock, irqstate); \ return ret; \ } @@ -71,13 +71,13 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED; type weak_function CONCATENATE(fn, n)(FAR volatile void *ptr, \ type value, int memorder) \ { \ - irqstate_t irqstate = spin_lock_irqsave_wo_note(&g_atomic_lock); \ + irqstate_t irqstate = raw_spin_lock_irqsave(&g_atomic_lock); \ FAR type *tmp = (FAR type *)ptr; \ \ type ret = *tmp; \ *tmp = value; \ \ - spin_unlock_irqrestore_wo_note(&g_atomic_lock, irqstate); \ + raw_spin_unlock_irqrestore(&g_atomic_lock, irqstate); \ return ret; \ } @@ -89,7 +89,7 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED; int success, int failure) \ { \ bool ret = false; \ - irqstate_t irqstate = spin_lock_irqsave_wo_note(&g_atomic_lock); \ + irqstate_t irqstate = raw_spin_lock_irqsave(&g_atomic_lock); \ FAR type *tmpmem = (FAR type *)mem; \ FAR type *tmpexp = (FAR type *)expect; \ \ @@ -103,7 +103,7 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED; *tmpexp = *tmpmem; \ } \ \ - spin_unlock_irqrestore_wo_note(&g_atomic_lock, irqstate); \ + raw_spin_unlock_irqrestore(&g_atomic_lock, irqstate); \ return ret; \ } @@ -112,13 +112,13 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED; type weak_function CONCATENATE(fn, n)(FAR volatile void *ptr, \ int memorder) \ { \ - irqstate_t irqstate = spin_lock_irqsave_wo_note(&g_atomic_lock); \ + irqstate_t irqstate = raw_spin_lock_irqsave(&g_atomic_lock); \ FAR type *tmp = (FAR type *)ptr; \ type ret = *tmp; \ \ *(FAR type *)ptr = 1; \ \ - spin_unlock_irqrestore_wo_note(&g_atomic_lock, irqstate); \ + raw_spin_unlock_irqrestore(&g_atomic_lock, irqstate); \ return ret; \ } @@ -127,13 +127,13 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED; type weak_function CONCATENATE(fn, n)(FAR volatile void *ptr, \ type value, int memorder) \ { \ - irqstate_t irqstate = spin_lock_irqsave_wo_note(&g_atomic_lock); \ + irqstate_t irqstate = raw_spin_lock_irqsave(&g_atomic_lock); \ FAR type *tmp = (FAR type *)ptr; \ type ret = *tmp; \ \ *tmp = *tmp + value; \ \ - spin_unlock_irqrestore_wo_note(&g_atomic_lock, irqstate); \ + raw_spin_unlock_irqrestore(&g_atomic_lock, irqstate); \ return ret; \ } @@ -142,13 +142,13 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED; type weak_function CONCATENATE(fn, n)(FAR volatile void *ptr, \ type value, int memorder) \ { \ - irqstate_t irqstate = spin_lock_irqsave_wo_note(&g_atomic_lock); \ + irqstate_t irqstate = raw_spin_lock_irqsave(&g_atomic_lock); \ FAR type *tmp = (FAR type *)ptr; \ type ret = *tmp; \ \ *tmp = *tmp - value; \ \ - spin_unlock_irqrestore_wo_note(&g_atomic_lock, irqstate); \ + raw_spin_unlock_irqrestore(&g_atomic_lock, irqstate); \ return ret; \ } @@ -157,13 +157,13 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED; type weak_function CONCATENATE(fn, n)(FAR volatile void *ptr, \ type value, int memorder) \ { \ - irqstate_t irqstate = spin_lock_irqsave_wo_note(&g_atomic_lock); \ + irqstate_t irqstate = raw_spin_lock_irqsave(&g_atomic_lock); \ FAR type *tmp = (FAR type *)ptr; \ type ret = *tmp; \ \ *tmp = *tmp & value; \ \ - spin_unlock_irqrestore_wo_note(&g_atomic_lock, irqstate); \ + raw_spin_unlock_irqrestore(&g_atomic_lock, irqstate); \ return ret; \ } @@ -172,13 +172,13 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED; type weak_function CONCATENATE(fn, n)(FAR volatile void *ptr, \ type value, int memorder) \ { \ - irqstate_t irqstate = spin_lock_irqsave_wo_note(&g_atomic_lock); \ + irqstate_t irqstate = raw_spin_lock_irqsave(&g_atomic_lock); \ FAR type *tmp = (FAR type *)ptr; \ type ret = *tmp; \ \ *tmp = *tmp | value; \ \ - spin_unlock_irqrestore_wo_note(&g_atomic_lock, irqstate); \ + raw_spin_unlock_irqrestore(&g_atomic_lock, irqstate); \ return ret; \ } @@ -187,13 +187,13 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED; type weak_function CONCATENATE(fn, n)(FAR volatile void *ptr, \ type value, int memorder) \ { \ - irqstate_t irqstate = spin_lock_irqsave_wo_note(&g_atomic_lock); \ + irqstate_t irqstate = raw_spin_lock_irqsave(&g_atomic_lock); \ FAR type *tmp = (FAR type *)ptr; \ type ret = *tmp; \ \ *tmp = *tmp ^ value; \ \ - spin_unlock_irqrestore_wo_note(&g_atomic_lock, irqstate); \ + raw_spin_unlock_irqrestore(&g_atomic_lock, irqstate); \ return ret; \ } @@ -202,12 +202,12 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED; type weak_function CONCATENATE(fn, n)(FAR volatile void *ptr, \ type value) \ { \ - irqstate_t irqstate = spin_lock_irqsave_wo_note(&g_atomic_lock); \ + irqstate_t irqstate = raw_spin_lock_irqsave(&g_atomic_lock); \ FAR type *tmp = (FAR type *)ptr; \ \ *tmp = *tmp + value; \ \ - spin_unlock_irqrestore_wo_note(&g_atomic_lock, irqstate); \ + raw_spin_unlock_irqrestore(&g_atomic_lock, irqstate); \ return *tmp; \ } @@ -216,12 +216,12 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED; type weak_function CONCATENATE(fn, n)(FAR volatile void *ptr, \ type value) \ { \ - irqstate_t irqstate = spin_lock_irqsave_wo_note(&g_atomic_lock); \ + irqstate_t irqstate = raw_spin_lock_irqsave(&g_atomic_lock); \ FAR type *tmp = (FAR type *)ptr; \ \ *tmp = *tmp - value; \ \ - spin_unlock_irqrestore_wo_note(&g_atomic_lock, irqstate); \ + raw_spin_unlock_irqrestore(&g_atomic_lock, irqstate); \ return *tmp; \ } @@ -230,12 +230,12 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED; type weak_function CONCATENATE(fn, n)(FAR volatile void *ptr, \ type value) \ { \ - irqstate_t irqstate = spin_lock_irqsave_wo_note(&g_atomic_lock); \ + irqstate_t irqstate = raw_spin_lock_irqsave(&g_atomic_lock); \ FAR type *tmp = (FAR type *)ptr; \ \ *tmp = *tmp | value; \ \ - spin_unlock_irqrestore_wo_note(&g_atomic_lock, irqstate); \ + raw_spin_unlock_irqrestore(&g_atomic_lock, irqstate); \ return *tmp; \ } @@ -244,12 +244,12 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED; type weak_function CONCATENATE(fn, n)(FAR volatile void *ptr, \ type value) \ { \ - irqstate_t irqstate = spin_lock_irqsave_wo_note(&g_atomic_lock); \ + irqstate_t irqstate = raw_spin_lock_irqsave(&g_atomic_lock); \ FAR type *tmp = (FAR type *)ptr; \ \ *tmp = *tmp & value; \ \ - spin_unlock_irqrestore_wo_note(&g_atomic_lock, irqstate); \ + raw_spin_unlock_irqrestore(&g_atomic_lock, irqstate); \ return *tmp; \ } @@ -258,12 +258,12 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED; type weak_function CONCATENATE(fn, n)(FAR volatile void *ptr, \ type value) \ { \ - irqstate_t irqstate = spin_lock_irqsave_wo_note(&g_atomic_lock); \ + irqstate_t irqstate = raw_spin_lock_irqsave(&g_atomic_lock); \ FAR type *tmp = (FAR type *)ptr; \ \ *tmp = *tmp ^ value; \ \ - spin_unlock_irqrestore_wo_note(&g_atomic_lock, irqstate); \ + raw_spin_unlock_irqrestore(&g_atomic_lock, irqstate); \ return *tmp; \ } @@ -272,12 +272,12 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED; type weak_function CONCATENATE(fn, n)(FAR volatile void *ptr, \ type value) \ { \ - irqstate_t irqstate = spin_lock_irqsave_wo_note(&g_atomic_lock); \ + irqstate_t irqstate = raw_spin_lock_irqsave(&g_atomic_lock); \ FAR type *tmp = (FAR type *)ptr; \ \ *tmp = ~(*tmp & value); \ \ - spin_unlock_irqrestore_wo_note(&g_atomic_lock, irqstate); \ + raw_spin_unlock_irqrestore(&g_atomic_lock, irqstate); \ return *tmp; \ } @@ -288,7 +288,7 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED; type newvalue) \ { \ bool ret = false; \ - irqstate_t irqstate = spin_lock_irqsave_wo_note(&g_atomic_lock); \ + irqstate_t irqstate = raw_spin_lock_irqsave(&g_atomic_lock); \ FAR type *tmp = (FAR type *)ptr; \ \ if (*tmp == oldvalue) \ @@ -297,7 +297,7 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED; *tmp = newvalue; \ } \ \ - spin_unlock_irqrestore_wo_note(&g_atomic_lock, irqstate); \ + raw_spin_unlock_irqrestore(&g_atomic_lock, irqstate); \ return ret; \ } @@ -307,7 +307,7 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED; type oldvalue, \ type newvalue) \ { \ - irqstate_t irqstate = spin_lock_irqsave_wo_note(&g_atomic_lock); \ + irqstate_t irqstate = raw_spin_lock_irqsave(&g_atomic_lock); \ FAR type *tmp = (FAR type *)ptr; \ type ret = *tmp; \ \ @@ -316,7 +316,7 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED; *tmp = newvalue; \ } \ \ - spin_unlock_irqrestore_wo_note(&g_atomic_lock, irqstate); \ + raw_spin_unlock_irqrestore(&g_atomic_lock, irqstate); \ return ret; \ } diff --git a/sched/irq/irq_csection.c b/sched/irq/irq_csection.c index 30e7f8820f8bc..1e4763e750975 100644 --- a/sched/irq/irq_csection.c +++ b/sched/irq/irq_csection.c @@ -180,7 +180,7 @@ irqstate_t enter_critical_section_wo_note(void) * no longer blocked by the critical section). */ - spin_lock_wo_note(&g_cpu_irqlock); + raw_spin_lock(&g_cpu_irqlock); cpu_irqlock_set(cpu); } @@ -231,7 +231,7 @@ irqstate_t enter_critical_section_wo_note(void) DEBUGASSERT((g_cpu_irqset & (1 << cpu)) == 0); - spin_lock_wo_note(&g_cpu_irqlock); + raw_spin_lock(&g_cpu_irqlock); /* Then set the lock count to 1. * diff --git a/sched/sched/sched.h b/sched/sched/sched.h index a978493061070..52156fa99bff8 100644 --- a/sched/sched/sched.h +++ b/sched/sched/sched.h @@ -415,6 +415,8 @@ void nxsched_update_critmon(FAR struct tcb_s *tcb); #if CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0 void nxsched_critmon_preemption(FAR struct tcb_s *tcb, bool state, FAR void *caller); +#else +# define nxsched_critmon_preemption(t, s, c) #endif #if CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION >= 0 diff --git a/sched/sched/sched_lock.c b/sched/sched/sched_lock.c index d825e09f14201..d8f75e7c90ae0 100644 --- a/sched/sched/sched_lock.c +++ b/sched/sched/sched_lock.c @@ -64,112 +64,36 @@ * ****************************************************************************/ -#ifdef CONFIG_SMP - -int sched_lock(void) +void sched_lock(void) { - FAR struct tcb_s *rtcb; - - /* If the CPU supports suppression of interprocessor interrupts, then - * simple disabling interrupts will provide sufficient protection for - * the following operation. - */ - - rtcb = this_task(); + /* sched_lock() should have no effect if called from the interrupt level. */ - /* Check for some special cases: (1) rtcb may be NULL only during early - * boot-up phases, and (2) sched_lock() should have no effect if called - * from the interrupt level. - */ - - if (rtcb != NULL && !up_interrupt_context()) + if (!up_interrupt_context()) { - irqstate_t flags; + FAR struct tcb_s *rtcb = this_task(); /* Catch attempts to increment the lockcount beyond the range of the * integer type. */ - DEBUGASSERT(rtcb->lockcount < MAX_LOCK_COUNT); - - flags = enter_critical_section(); - - /* A counter is used to support locking. This allows nested lock - * operations on this thread - */ - - rtcb->lockcount++; - - /* Check if we just acquired the lock */ - - if (rtcb->lockcount == 1) - { - /* Note that we have pre-emption locked */ - -#if CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0 - nxsched_critmon_preemption(rtcb, true, return_address(0)); -#endif -#ifdef CONFIG_SCHED_INSTRUMENTATION_PREEMPTION - sched_note_preemption(rtcb, true); -#endif - } - - /* Move any tasks in the ready-to-run list to the pending task list - * where they will not be available to run until the scheduler is - * unlocked and nxsched_merge_pending() is called. - */ - - nxsched_merge_prioritized(list_readytorun(), - list_pendingtasks(), - TSTATE_TASK_PENDING); + DEBUGASSERT(rtcb == NULL || rtcb->lockcount < MAX_LOCK_COUNT); - leave_critical_section(flags); - } - - return OK; -} - -#else /* CONFIG_SMP */ - -int sched_lock(void) -{ - FAR struct tcb_s *rtcb = this_task(); - - /* Check for some special cases: (1) rtcb may be NULL only during early - * boot-up phases, and (2) sched_lock() should have no effect if called - * from the interrupt level. - */ - - if (rtcb != NULL && !up_interrupt_context()) - { - /* Catch attempts to increment the lockcount beyond the range of the - * integer type. - */ - - DEBUGASSERT(rtcb->lockcount < MAX_LOCK_COUNT); - - /* A counter is used to support locking. This allows nested lock + /* A counter is used to support locking. This allows nested lock * operations on this thread (on any CPU) */ - rtcb->lockcount++; - - /* Check if we just acquired the lock */ - - if (rtcb->lockcount == 1) + if (rtcb != NULL && rtcb->lockcount++ == 0) { +#if (CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0) || \ + defined(CONFIG_SCHED_INSTRUMENTATION_PREEMPTION) + irqstate_t flags = enter_critical_section_wo_note(); + /* Note that we have pre-emption locked */ -#if CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0 nxsched_critmon_preemption(rtcb, true, return_address(0)); -#endif -#ifdef CONFIG_SCHED_INSTRUMENTATION_PREEMPTION sched_note_preemption(rtcb, true); + leave_critical_section_wo_note(flags); #endif } } - - return OK; } - -#endif /* CONFIG_SMP */ diff --git a/sched/sched/sched_process_delivered.c b/sched/sched/sched_process_delivered.c index 016f7307129b5..32e2987d7d63f 100644 --- a/sched/sched/sched_process_delivered.c +++ b/sched/sched/sched_process_delivered.c @@ -73,7 +73,7 @@ void nxsched_process_delivered(int cpu) if ((g_cpu_irqset & (1 << cpu)) == 0) { - spin_lock_wo_note(&g_cpu_irqlock); + raw_spin_lock(&g_cpu_irqlock); g_cpu_irqset |= (1 << cpu); } diff --git a/sched/sched/sched_unlock.c b/sched/sched/sched_unlock.c index 23def4bf2585e..4c7d681454239 100644 --- a/sched/sched/sched_unlock.c +++ b/sched/sched/sched_unlock.c @@ -53,50 +53,30 @@ * ****************************************************************************/ -#ifdef CONFIG_SMP - -int sched_unlock(void) +void sched_unlock(void) { - FAR struct tcb_s *rtcb; - - /* This operation is safe because the scheduler is locked and no context - * switch may occur. - */ - - rtcb = this_task(); - - /* Check for some special cases: (1) rtcb may be NULL only during - * early boot-up phases, and (2) sched_unlock() should have no - * effect if called from the interrupt level. - */ + /* sched_unlock should have no effect if called from the interrupt level. */ - if (rtcb != NULL && !up_interrupt_context()) + if (!up_interrupt_context()) { - /* Prevent context switches throughout the following. */ + FAR struct tcb_s *rtcb = this_task(); - irqstate_t flags = enter_critical_section(); - int cpu = this_cpu(); + /* rtcb may be NULL only during early boot-up phases */ - DEBUGASSERT(rtcb->lockcount > 0); + DEBUGASSERT(rtcb == NULL || rtcb->lockcount > 0); - /* Decrement the preemption lock counter */ - - rtcb->lockcount--; - - /* Check if the lock counter has decremented to zero. If so, + /* Check if the lock counter has decremented to zero. If so, * then pre-emption has been re-enabled. */ - if (rtcb->lockcount <= 0) + if (rtcb != NULL && --rtcb->lockcount == 0) { + irqstate_t flags = enter_critical_section_wo_note(); + /* Note that we no longer have pre-emption disabled. */ -#if CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0 nxsched_critmon_preemption(rtcb, false, return_address(0)); -#endif -#ifdef CONFIG_SCHED_INSTRUMENTATION_PREEMPTION sched_note_preemption(rtcb, false); -#endif /* Release any ready-to-run tasks that have collected in * g_pendingtasks. @@ -131,153 +111,25 @@ int sched_unlock(void) * maximum. */ - if (rtcb != current_task(cpu)) - { - rtcb->timeslice = MSEC2TICK(CONFIG_RR_INTERVAL); - } -#ifdef CONFIG_SCHED_TICKLESS - else - { - nxsched_reassess_timer(); - } -#endif - } -#endif - -#ifdef CONFIG_SCHED_SPORADIC -#if CONFIG_RR_INTERVAL > 0 - else -#endif - /* If (1) the task that was running supported sporadic scheduling - * and (2) if its budget slice has already expired, but (3) it - * could not slice out because pre-emption was disabled, then we - * need to swap the task out now and reassess the interval timer - * for the next time slice. - */ - - if ((rtcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_SPORADIC - && rtcb->timeslice < 0) - { - /* Yes.. that is the situation. Force the low-priority state - * now - */ - - nxsched_sporadic_lowpriority(rtcb); - -#ifdef CONFIG_SCHED_TICKLESS - /* Make sure that the call to nxsched_merge_pending() did not - * change the currently active task. - */ - - if (rtcb == current_task(cpu)) - { - nxsched_reassess_timer(); - } -#endif - } -#endif - } - - UNUSED(cpu); - leave_critical_section(flags); - } - - return OK; -} - -#else /* CONFIG_SMP */ - -int sched_unlock(void) -{ - FAR struct tcb_s *rtcb = this_task(); - - /* Check for some special cases: (1) rtcb may be NULL only during - * early boot-up phases, and (2) sched_unlock() should have no - * effect if called from the interrupt level. - */ - - if (rtcb != NULL && !up_interrupt_context()) - { - /* Prevent context switches throughout the following. */ - - irqstate_t flags = enter_critical_section(); - - DEBUGASSERT(rtcb->lockcount > 0); - - /* Decrement the preemption lock counter */ - - rtcb->lockcount--; - - /* Check if the lock counter has decremented to zero. If so, - * then pre-emption has been re-enabled. - */ - - if (rtcb->lockcount <= 0) - { - /* Note that we no longer have pre-emption disabled. */ - -#if CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0 - nxsched_critmon_preemption(rtcb, false, return_address(0)); -#endif -#ifdef CONFIG_SCHED_INSTRUMENTATION_PREEMPTION - sched_note_preemption(rtcb, false); -#endif - - /* Release any ready-to-run tasks that have collected in - * g_pendingtasks. - * - * NOTE: This operation has a very high likelihood of causing - * this task to be switched out! - * - * In the single CPU case, decrementing lockcount to zero is - * sufficient to release the pending tasks. Further, in that - * configuration, critical sections and pre-emption can operate - * fully independently. - */ - - if (list_pendingtasks()->head != NULL) - { - if (nxsched_merge_pending()) - { - up_switch_context(this_task(), rtcb); - } - } - -#if CONFIG_RR_INTERVAL > 0 - /* If (1) the task that was running supported round-robin - * scheduling and (2) if its time slice has already expired, but - * (3) it could not be sliced out because pre-emption was disabled, - * then we need to swap the task out now and reassess the interval - * timer for the next time slice. - */ - - if ((rtcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_RR && - rtcb->timeslice == 0) - { - /* Yes.. that is the situation. But one more thing: The call - * to nxsched_merge_pending() above may have actually replaced - * the task at the head of the ready-to-run list. In that - * case, we need only to reset the timeslice value back to the - * maximum. - */ - if (rtcb != this_task()) { rtcb->timeslice = MSEC2TICK(CONFIG_RR_INTERVAL); } -#ifdef CONFIG_SCHED_TICKLESS - else +# ifdef CONFIG_SCHED_TICKLESS + else if ((rtcb->flags & TCB_FLAG_PREEMPT_SCHED) == 0) { + rtcb->flags |= TCB_FLAG_PREEMPT_SCHED; nxsched_reassess_timer(); + rtcb->flags &= ~TCB_FLAG_PREEMPT_SCHED; } -#endif +# endif } #endif #ifdef CONFIG_SCHED_SPORADIC -#if CONFIG_RR_INTERVAL > 0 +# if CONFIG_RR_INTERVAL > 0 else -#endif +# endif /* If (1) the task that was running supported sporadic scheduling * and (2) if its budget slice has already expired, but (3) it * could not slice out because pre-emption was disabled, then we @@ -294,24 +146,23 @@ int sched_unlock(void) nxsched_sporadic_lowpriority(rtcb); -#ifdef CONFIG_SCHED_TICKLESS +# ifdef CONFIG_SCHED_TICKLESS /* Make sure that the call to nxsched_merge_pending() did not * change the currently active task. */ - if (rtcb == this_task()) + if (rtcb == this_task() && + (rtcb->flags & TCB_FLAG_PREEMPT_SCHED) == 0) { + rtcb->flags |= TCB_FLAG_PREEMPT_SCHED; nxsched_reassess_timer(); + rtcb->flags &= ~TCB_FLAG_PREEMPT_SCHED; } -#endif +# endif } #endif - } - leave_critical_section(flags); + leave_critical_section_wo_note(flags); + } } - - return OK; } - -#endif /* CONFIG_SMP */ diff --git a/syscall/syscall.csv b/syscall/syscall.csv index 959f7d9545ea9..e834dedf61fa5 100644 --- a/syscall/syscall.csv +++ b/syscall/syscall.csv @@ -140,13 +140,13 @@ "sched_getcpu","sched.h","","int" "sched_getparam","sched.h","","int","pid_t","FAR struct sched_param *" "sched_getscheduler","sched.h","","int","pid_t" -"sched_lock","sched.h","","int" +"sched_lock","sched.h","","void" "sched_lockcount","sched.h","","int" "sched_rr_get_interval","sched.h","","int","pid_t","struct timespec *" "sched_setaffinity","sched.h","defined(CONFIG_SMP)","int","pid_t","size_t","FAR const cpu_set_t*" "sched_setparam","sched.h","","int","pid_t","const struct sched_param *" "sched_setscheduler","sched.h","","int","pid_t","int","const struct sched_param *" -"sched_unlock","sched.h","","int" +"sched_unlock","sched.h","","void" "sched_yield","sched.h","","int" "select","sys/select.h","","int","int","FAR fd_set *","FAR fd_set *","FAR fd_set *","FAR struct timeval *" "send","sys/socket.h","defined(CONFIG_NET)","ssize_t","int","FAR const void *","size_t","int"