diff --git a/arch/lkl/include/asm/cpu.h b/arch/lkl/include/asm/cpu.h index 7dba513ebcb415..dd2cd0fc2a28db 100644 --- a/arch/lkl/include/asm/cpu.h +++ b/arch/lkl/include/asm/cpu.h @@ -7,11 +7,7 @@ int lkl_cpu_try_run_irq(int irq); int lkl_cpu_init(void); void lkl_cpu_shutdown(void); void lkl_cpu_wait_shutdown(void); -void lkl_cpu_wakeup_idle(void); void lkl_cpu_change_owner(lkl_thread_t owner); void lkl_cpu_set_irqs_pending(void); -void lkl_idle_tail_schedule(void); -int lkl_cpu_idle_pending(void); -extern void do_idle(void); #endif /* _ASM_LKL_CPU_H */ diff --git a/arch/lkl/include/asm/sched.h b/arch/lkl/include/asm/sched.h index e874d1afa1127a..9fe6413b4e2e29 100644 --- a/arch/lkl/include/asm/sched.h +++ b/arch/lkl/include/asm/sched.h @@ -2,18 +2,17 @@ #define _ASM_LKL_SCHED_H #include +#include static inline void thread_sched_jb(void) { - set_ti_thread_flag(current_thread_info(), TIF_SCHED_JB); - if (test_ti_thread_flag(current_thread_info(), TIF_HOST_THREAD)) { + set_ti_thread_flag(current_thread_info(), TIF_SCHED_JB); set_current_state(TASK_UNINTERRUPTIBLE); lkl_ops->jmp_buf_set(¤t_thread_info()->sched_jb, schedule); - } else { - lkl_ops->jmp_buf_set(¤t_thread_info()->sched_jb, - lkl_idle_tail_schedule); + } else { + lkl_bug("thread_sched_jb() can be used only for host task"); } } diff --git a/arch/lkl/include/asm/syscalls.h b/arch/lkl/include/asm/syscalls.h index 43956b4bbf0ad6..4116e8a0cc6fc1 100644 --- a/arch/lkl/include/asm/syscalls.h +++ b/arch/lkl/include/asm/syscalls.h @@ -4,6 +4,7 @@ int syscalls_init(void); void syscalls_cleanup(void); long lkl_syscall(long no, long *params); +void wakeup_idle_host_task(void); #define sys_mmap sys_mmap_pgoff #define sys_mmap2 sys_mmap_pgoff diff --git a/arch/lkl/include/asm/thread_info.h b/arch/lkl/include/asm/thread_info.h index de00569f158e4c..36ef8c2d9b3149 100644 --- a/arch/lkl/include/asm/thread_info.h +++ b/arch/lkl/include/asm/thread_info.h @@ -59,7 +59,6 @@ void threads_cleanup(void); #define TIF_NOHZ 6 #define TIF_SCHED_JB 7 #define TIF_HOST_THREAD 8 -#define TIF_IDLE 9 #define __HAVE_THREAD_FUNCTIONS diff --git a/arch/lkl/kernel/cpu.c b/arch/lkl/kernel/cpu.c index 73806f5cbfd694..60be3122f496bc 100644 --- a/arch/lkl/kernel/cpu.c +++ b/arch/lkl/kernel/cpu.c @@ -1,8 +1,5 @@ -#include -#include #include #include -#include #include #include #include @@ -52,12 +49,6 @@ struct lkl_cpu { lkl_thread_t owner; /* semaphore for threads waiting the CPU */ struct lkl_sem *sem; - /* semaphore for the idle thread */ - struct lkl_sem *idle_sem; - /* if the idle thread is pending */ - bool idle_pending; - /* jmp_buf used for idle thread to restart */ - struct lkl_jmp_buf idle_jb; /* semaphore used for shutdown */ struct lkl_sem *shutdown_sem; } cpu; @@ -134,7 +125,8 @@ void lkl_cpu_put(void) lkl_ops->mutex_lock(cpu.lock); } - if (need_resched() && cpu.count == 1) { + if (test_ti_thread_flag(current_thread_info(), TIF_HOST_THREAD) && + !single_task_running() && cpu.count == 1) { if (in_interrupt()) lkl_bug("%s: in interrupt\n", __func__); lkl_ops->mutex_unlock(cpu.lock); @@ -191,8 +183,6 @@ static void lkl_cpu_cleanup(bool shutdown) lkl_ops->sem_up(cpu.shutdown_sem); else if (cpu.shutdown_sem) lkl_ops->sem_free(cpu.shutdown_sem); - if (cpu.idle_sem) - lkl_ops->sem_free(cpu.idle_sem); if (cpu.sem) lkl_ops->sem_free(cpu.sem); if (cpu.lock) @@ -215,91 +205,20 @@ void arch_cpu_idle(void) /* enable irqs now to allow direct irqs to run */ local_irq_enable(); - if (need_resched()) - return; - - cpu.idle_pending = true; - lkl_cpu_put(); - - lkl_ops->sem_down(cpu.idle_sem); - - cpu.idle_pending = false; - /* to match that of schedule_preempt_disabled() */ - preempt_disable(); - lkl_ops->jmp_buf_longjmp(&cpu.idle_jb, 1); -} - -void arch_cpu_idle_prepare(void) -{ - set_ti_thread_flag(current_thread_info(), TIF_IDLE); - /* - * We hijack the idle loop here so that we can let the idle thread - * jump back to the beginning. - */ - while (1) - lkl_ops->jmp_buf_set(&cpu.idle_jb, do_idle); -} - -void lkl_cpu_wakeup_idle(void) -{ - lkl_ops->sem_up(cpu.idle_sem); + /* switch to idle_host_task */ + wakeup_idle_host_task(); } int lkl_cpu_init(void) { cpu.lock = lkl_ops->mutex_alloc(0); cpu.sem = lkl_ops->sem_alloc(0); - cpu.idle_sem = lkl_ops->sem_alloc(0); cpu.shutdown_sem = lkl_ops->sem_alloc(0); - if (!cpu.lock || !cpu.sem || !cpu.idle_sem || !cpu.shutdown_sem) { + if (!cpu.lock || !cpu.sem || !cpu.shutdown_sem) { lkl_cpu_cleanup(false); return -ENOMEM; } return 0; } - -/* - * Simulate the exit path of idle loop so that we can schedule when LKL is - * in idle. - * It's just a duplication of those in idle.c so a better way is to refactor - * idle.c to expose such function. - */ -void lkl_idle_tail_schedule(void) -{ - - if (!cpu.idle_pending || - !test_bit(TIF_IDLE, ¤t_thread_info()->flags)) - lkl_bug("%s: not in idle\n", __func__); - - start_critical_timings(); - __current_set_polling(); - - if (WARN_ON_ONCE(irqs_disabled())) - local_irq_enable(); - - rcu_idle_exit(); - arch_cpu_idle_exit(); - preempt_set_need_resched(); - tick_nohz_idle_exit(); - __current_clr_polling(); - - /* - * memory barrier copied from idle.c - */ - smp_mb__after_atomic(); - - /* - * Didn't find a way to include kernel/sched/sched.h for - * sched_ttwu_pending(). - * Anyway, it's no op when not CONFIG_SMP. - */ - - schedule_preempt_disabled(); -} - -int lkl_cpu_idle_pending(void) -{ - return cpu.idle_pending; -} diff --git a/arch/lkl/kernel/syscalls.c b/arch/lkl/kernel/syscalls.c index b429c4c0eabdc5..94d22f289a17b7 100644 --- a/arch/lkl/kernel/syscalls.c +++ b/arch/lkl/kernel/syscalls.c @@ -130,6 +130,34 @@ long lkl_syscall(long no, long *params) return ret; } +static struct task_struct *idle_host_task; + +/* called from idle, don't failed, don't block */ +void wakeup_idle_host_task(void) +{ + if (!need_resched() && idle_host_task) + wake_up_process(idle_host_task); +} + +static int idle_host_task_loop(void *unused) +{ + struct thread_info *ti = task_thread_info(current); + + snprintf(current->comm, sizeof(current->comm), "idle_host_task"); + set_thread_flag(TIF_HOST_THREAD); + idle_host_task = current; + + for (;;) { + lkl_cpu_put(); + lkl_ops->sem_down(ti->sched_sem); + if (idle_host_task == NULL) { + lkl_ops->thread_exit(); + return 0; + } + schedule_tail(ti->prev_sched); + } +} + int syscalls_init(void) { snprintf(current->comm, sizeof(current->comm), "host0"); @@ -142,11 +170,25 @@ int syscalls_init(void) return -1; } + if (kernel_thread(idle_host_task_loop, NULL, CLONE_FLAGS) < 0) { + if (lkl_ops->tls_free) + lkl_ops->tls_free(task_key); + return -1; + } + return 0; } void syscalls_cleanup(void) { + if (idle_host_task) { + struct thread_info *ti = task_thread_info(idle_host_task); + + idle_host_task = NULL; + lkl_ops->sem_up(ti->sched_sem); + lkl_ops->thread_join(ti->tid); + } + if (lkl_ops->tls_free) lkl_ops->tls_free(task_key); } diff --git a/arch/lkl/kernel/threads.c b/arch/lkl/kernel/threads.c index b7d0e0742cfa41..e561a78129a14a 100644 --- a/arch/lkl/kernel/threads.c +++ b/arch/lkl/kernel/threads.c @@ -89,8 +89,6 @@ struct task_struct *__switch_to(struct task_struct *prev, struct thread_info *_prev = task_thread_info(prev); struct thread_info *_next = task_thread_info(next); unsigned long _prev_flags = _prev->flags; - bool wakeup_idle = test_bit(TIF_IDLE, &_next->flags) && - lkl_cpu_idle_pending(); struct lkl_jmp_buf _prev_jb; _current_thread_info = task_thread_info(next); @@ -98,22 +96,15 @@ struct task_struct *__switch_to(struct task_struct *prev, abs_prev = prev; BUG_ON(!_next->tid); + lkl_cpu_change_owner(_next->tid); if (test_bit(TIF_SCHED_JB, &_prev_flags)) { /* Atomic. Must be done before wakeup next */ clear_ti_thread_flag(_prev, TIF_SCHED_JB); _prev_jb = _prev->sched_jb; } - if (wakeup_idle) - schedule_tail(abs_prev); - lkl_cpu_change_owner(_next->tid); - - /* No kernel code is allowed after wakeup next */ - if (wakeup_idle) - lkl_cpu_wakeup_idle(); - else - lkl_ops->sem_up(_next->sched_sem); + lkl_ops->sem_up(_next->sched_sem); if (test_bit(TIF_SCHED_JB, &_prev_flags)) { lkl_ops->jmp_buf_longjmp(&_prev_jb, 1); } else { diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index a565796485c5af..6a4bae0a649d9a 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -205,7 +205,7 @@ static void cpuidle_idle_call(void) * * Called with polling cleared. */ -void do_idle(void) +static void do_idle(void) { /* * If the arch has a polling bit, we maintain an invariant: @@ -265,7 +265,6 @@ void do_idle(void) sched_ttwu_pending(); schedule_preempt_disabled(); } -EXPORT_SYMBOL(do_idle); bool cpu_in_idle(unsigned long pc) {