diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h index 4cc17d5e92ecd5..fe093d1c1b11ff 100644 --- a/kernel/include/ksched.h +++ b/kernel/include/ksched.h @@ -180,7 +180,7 @@ static ALWAYS_INLINE struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q) thread = _priq_wait_best(&wait_q->waitq); if (unlikely(thread != NULL)) { unpend_thread_no_timeout(thread); - (void)z_abort_thread_timeout(thread); + z_abort_thread_timeout(thread); } } diff --git a/kernel/include/timeout_q.h b/kernel/include/timeout_q.h index a62242a9f3cfb7..b3c3e61cd9fc89 100644 --- a/kernel/include/timeout_q.h +++ b/kernel/include/timeout_q.h @@ -49,9 +49,9 @@ static inline void z_add_thread_timeout(struct k_thread *thread, k_timeout_t tic z_add_timeout(&thread->base.timeout, z_thread_timeout, ticks); } -static inline int z_abort_thread_timeout(struct k_thread *thread) +static inline void z_abort_thread_timeout(struct k_thread *thread) { - return z_abort_timeout(&thread->base.timeout); + z_abort_timeout(&thread->base.timeout); } int32_t z_get_next_timeout_expiry(void); @@ -62,7 +62,7 @@ k_ticks_t z_timeout_remaining(const struct _timeout *timeout); /* Stubs when !CONFIG_SYS_CLOCK_EXISTS */ #define z_init_thread_timeout(thread_base) do {} while (false) -#define z_abort_thread_timeout(to) (0) +#define z_abort_thread_timeout(to) do {} while (false) #define z_is_inactive_timeout(to) 1 #define z_get_next_timeout_expiry() ((int32_t) K_TICKS_FOREVER) #define z_set_timeout_expiry(ticks, is_idle) do {} while (false) diff --git a/kernel/sched.c b/kernel/sched.c index 41cdafa5d7eb57..0c2be8ab74b870 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -652,7 +652,7 @@ struct k_thread *z_unpend1_no_timeout(_wait_q_t *wait_q) void z_unpend_thread(struct k_thread *thread) { z_unpend_thread_no_timeout(thread); - (void)z_abort_thread_timeout(thread); + z_abort_thread_timeout(thread); } /* Priority set utility that does no rescheduling, it just changes the @@ -1164,7 +1164,7 @@ void z_impl_k_wakeup(k_tid_t thread) { SYS_PORT_TRACING_OBJ_FUNC(k_thread, wakeup, thread); - (void)z_abort_thread_timeout(thread); + z_abort_thread_timeout(thread); k_spinlock_key_t key = k_spin_lock(&_sched_spinlock); @@ -1212,7 +1212,7 @@ static inline void unpend_all(_wait_q_t *wait_q) for (thread = z_waitq_head(wait_q); thread != NULL; thread = z_waitq_head(wait_q)) { unpend_thread_no_timeout(thread); - (void)z_abort_thread_timeout(thread); + z_abort_thread_timeout(thread); arch_thread_return_value_set(thread, 0); ready_thread(thread); } @@ -1247,7 +1247,7 @@ static ALWAYS_INLINE void halt_thread(struct k_thread *thread, uint8_t new_state if (thread->base.pended_on != NULL) { unpend_thread_no_timeout(thread); } - (void)z_abort_thread_timeout(thread); + z_abort_thread_timeout(thread); unpend_all(&thread->join_queue); /* Edge case: aborting arch_current_thread() from within an @@ -1458,7 +1458,7 @@ bool z_sched_wake(_wait_q_t *wait_q, int swap_retval, void *swap_data) swap_retval, swap_data); unpend_thread_no_timeout(thread); - (void)z_abort_thread_timeout(thread); + z_abort_thread_timeout(thread); ready_thread(thread); ret = true; }