Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

switch to idle_host_task when idle #336

Merged
merged 3 commits into from
Mar 5, 2017
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 0 additions & 4 deletions arch/lkl/include/asm/cpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,7 @@ int lkl_cpu_try_run_irq(int irq);
int lkl_cpu_init(void);
void lkl_cpu_shutdown(void);
void lkl_cpu_wait_shutdown(void);
void lkl_cpu_wakeup_idle(void);
void lkl_cpu_change_owner(lkl_thread_t owner);
void lkl_cpu_set_irqs_pending(void);
void lkl_idle_tail_schedule(void);
int lkl_cpu_idle_pending(void);
extern void do_idle(void);

#endif /* _ASM_LKL_CPU_H */
9 changes: 4 additions & 5 deletions arch/lkl/include/asm/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,18 +2,17 @@
#define _ASM_LKL_SCHED_H

#include <linux/sched.h>
#include <uapi/asm/host_ops.h>

static inline void thread_sched_jb(void)
{
set_ti_thread_flag(current_thread_info(), TIF_SCHED_JB);

if (test_ti_thread_flag(current_thread_info(), TIF_HOST_THREAD)) {
set_ti_thread_flag(current_thread_info(), TIF_SCHED_JB);
set_current_state(TASK_UNINTERRUPTIBLE);
lkl_ops->jmp_buf_set(&current_thread_info()->sched_jb,
schedule);
} else {
lkl_ops->jmp_buf_set(&current_thread_info()->sched_jb,
lkl_idle_tail_schedule);
} else {
lkl_bug("thread_sched_jb() can be used only for host task");
}
}

Expand Down
1 change: 1 addition & 0 deletions arch/lkl/include/asm/syscalls.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
int syscalls_init(void);
void syscalls_cleanup(void);
long lkl_syscall(long no, long *params);
void wakeup_idle_host_task(void);

#define sys_mmap sys_mmap_pgoff
#define sys_mmap2 sys_mmap_pgoff
Expand Down
1 change: 0 additions & 1 deletion arch/lkl/include/asm/thread_info.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,6 @@ void threads_cleanup(void);
#define TIF_NOHZ 6
#define TIF_SCHED_JB 7
#define TIF_HOST_THREAD 8
#define TIF_IDLE 9

#define __HAVE_THREAD_FUNCTIONS

Expand Down
91 changes: 5 additions & 86 deletions arch/lkl/kernel/cpu.c
Original file line number Diff line number Diff line change
@@ -1,8 +1,5 @@
#include <linux/cpu.h>
#include <linux/cpuidle.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/tick.h>
#include <asm/host_ops.h>
#include <asm/cpu.h>
#include <asm/thread_info.h>
Expand Down Expand Up @@ -52,12 +49,6 @@ struct lkl_cpu {
lkl_thread_t owner;
/* semaphore for threads waiting the CPU */
struct lkl_sem *sem;
/* semaphore for the idle thread */
struct lkl_sem *idle_sem;
/* if the idle thread is pending */
bool idle_pending;
/* jmp_buf used for idle thread to restart */
struct lkl_jmp_buf idle_jb;
/* semaphore used for shutdown */
struct lkl_sem *shutdown_sem;
} cpu;
Expand Down Expand Up @@ -134,7 +125,8 @@ void lkl_cpu_put(void)
lkl_ops->mutex_lock(cpu.lock);
}

if (need_resched() && cpu.count == 1) {
if (test_ti_thread_flag(current_thread_info(), TIF_HOST_THREAD) &&
need_resched() && cpu.count == 1) {
if (in_interrupt())
lkl_bug("%s: in interrupt\n", __func__);
lkl_ops->mutex_unlock(cpu.lock);
Expand Down Expand Up @@ -191,8 +183,6 @@ static void lkl_cpu_cleanup(bool shutdown)
lkl_ops->sem_up(cpu.shutdown_sem);
else if (cpu.shutdown_sem)
lkl_ops->sem_free(cpu.shutdown_sem);
if (cpu.idle_sem)
lkl_ops->sem_free(cpu.idle_sem);
if (cpu.sem)
lkl_ops->sem_free(cpu.sem);
if (cpu.lock)
Expand All @@ -215,91 +205,20 @@ void arch_cpu_idle(void)
/* enable irqs now to allow direct irqs to run */
local_irq_enable();

if (need_resched())
return;

cpu.idle_pending = true;
lkl_cpu_put();

lkl_ops->sem_down(cpu.idle_sem);

cpu.idle_pending = false;
/* to match that of schedule_preempt_disabled() */
preempt_disable();
lkl_ops->jmp_buf_longjmp(&cpu.idle_jb, 1);
}

void arch_cpu_idle_prepare(void)
{
set_ti_thread_flag(current_thread_info(), TIF_IDLE);
/*
* We hijack the idle loop here so that we can let the idle thread
* jump back to the beginning.
*/
while (1)
lkl_ops->jmp_buf_set(&cpu.idle_jb, do_idle);
}

void lkl_cpu_wakeup_idle(void)
{
lkl_ops->sem_up(cpu.idle_sem);
/* switch to idle_host_task */
wakeup_idle_host_task();
}

int lkl_cpu_init(void)
{
cpu.lock = lkl_ops->mutex_alloc(0);
cpu.sem = lkl_ops->sem_alloc(0);
cpu.idle_sem = lkl_ops->sem_alloc(0);
cpu.shutdown_sem = lkl_ops->sem_alloc(0);

if (!cpu.lock || !cpu.sem || !cpu.idle_sem || !cpu.shutdown_sem) {
if (!cpu.lock || !cpu.sem || !cpu.shutdown_sem) {
lkl_cpu_cleanup(false);
return -ENOMEM;
}

return 0;
}

/*
* Simulate the exit path of idle loop so that we can schedule when LKL is
* in idle.
* It's just a duplication of those in idle.c so a better way is to refactor
* idle.c to expose such function.
*/
void lkl_idle_tail_schedule(void)
{

if (!cpu.idle_pending ||
!test_bit(TIF_IDLE, &current_thread_info()->flags))
lkl_bug("%s: not in idle\n", __func__);

start_critical_timings();
__current_set_polling();

if (WARN_ON_ONCE(irqs_disabled()))
local_irq_enable();

rcu_idle_exit();
arch_cpu_idle_exit();
preempt_set_need_resched();
tick_nohz_idle_exit();
__current_clr_polling();

/*
* memory barrier copied from idle.c
*/
smp_mb__after_atomic();

/*
* Didn't find a way to include kernel/sched/sched.h for
* sched_ttwu_pending().
* Anyway, it's no op when not CONFIG_SMP.
*/

schedule_preempt_disabled();
}

int lkl_cpu_idle_pending(void)
{
return cpu.idle_pending;
}
42 changes: 42 additions & 0 deletions arch/lkl/kernel/syscalls.c
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,34 @@ long lkl_syscall(long no, long *params)
return ret;
}

static struct task_struct *idle_host_task;

/* called from idle, don't failed, don't block */
void wakeup_idle_host_task(void)
{
if (!need_resched() && idle_host_task)
wake_up_process(idle_host_task);
}

static int idle_host_task_loop(void *unused)
{
struct thread_info *ti = task_thread_info(current);

snprintf(current->comm, sizeof(current->comm), "idle_host_task");
set_thread_flag(TIF_HOST_THREAD);
idle_host_task = current;

for (;;) {
lkl_cpu_put();
lkl_ops->sem_down(ti->sched_sem);
if (idle_host_task == NULL) {
lkl_ops->thread_exit();
return 0;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not needed

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

the compiler will warn on it if it is removed.

}
schedule_tail(ti->prev_sched);
}
}

int syscalls_init(void)
{
snprintf(current->comm, sizeof(current->comm), "host0");
Expand All @@ -142,11 +170,25 @@ int syscalls_init(void)
return -1;
}

if (kernel_thread(idle_host_task_loop, NULL, CLONE_FLAGS) < 0) {
if (lkl_ops->tls_free)
lkl_ops->tls_free(task_key);
return -1;
}

return 0;
}

void syscalls_cleanup(void)
{
if (idle_host_task) {
struct thread_info *ti = task_thread_info(idle_host_task);

idle_host_task = NULL;
lkl_ops->sem_up(ti->sched_sem);
lkl_ops->thread_join(ti->tid);
}

if (lkl_ops->tls_free)
lkl_ops->tls_free(task_key);
}
Expand Down
13 changes: 2 additions & 11 deletions arch/lkl/kernel/threads.c
Original file line number Diff line number Diff line change
Expand Up @@ -89,31 +89,22 @@ struct task_struct *__switch_to(struct task_struct *prev,
struct thread_info *_prev = task_thread_info(prev);
struct thread_info *_next = task_thread_info(next);
unsigned long _prev_flags = _prev->flags;
bool wakeup_idle = test_bit(TIF_IDLE, &_next->flags) &&
lkl_cpu_idle_pending();
struct lkl_jmp_buf _prev_jb;

_current_thread_info = task_thread_info(next);
_next->prev_sched = prev;
abs_prev = prev;

BUG_ON(!_next->tid);
lkl_cpu_change_owner(_next->tid);

if (test_bit(TIF_SCHED_JB, &_prev_flags)) {
/* Atomic. Must be done before wakeup next */
clear_ti_thread_flag(_prev, TIF_SCHED_JB);
_prev_jb = _prev->sched_jb;
}
if (wakeup_idle)
schedule_tail(abs_prev);
lkl_cpu_change_owner(_next->tid);

/* No kernel code is allowed after wakeup next */
if (wakeup_idle)
lkl_cpu_wakeup_idle();
else
lkl_ops->sem_up(_next->sched_sem);

lkl_ops->sem_up(_next->sched_sem);
if (test_bit(TIF_SCHED_JB, &_prev_flags)) {
lkl_ops->jmp_buf_longjmp(&_prev_jb, 1);
} else {
Expand Down