You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
structhrtimer_cpu_base {
raw_spinlock_tlock;
unsigned intcpu;
unsigned intactive_bases;
unsigned intclock_was_set_seq;
unsigned inthres_active: 1,
in_hrtirq: 1,
hang_detected: 1,
softirq_activated: 1;
ktime_texpires_next;
structhrtimer*next_timer;
ktime_tsoftirq_expires_next;
structhrtimer*softirq_next_timer;
structhrtimer_clock_baseclock_base[HRTIMER_MAX_CLOCK_BASES];
};
enumhrtimer_base_type {
HRTIMER_BASE_MONOTONIC,
HRTIMER_BASE_REALTIME,
HRTIMER_BASE_BOOTTIME,
HRTIMER_BASE_TAI,
HRTIMER_BASE_MONOTONIC_SOFT,
HRTIMER_BASE_REALTIME_SOFT,
HRTIMER_BASE_BOOTTIME_SOFT,
HRTIMER_BASE_TAI_SOFT,
HRTIMER_MAX_CLOCK_BASES,
};
structhrtimer_clock_base {
structhrtimer_cpu_base*cpu_base;
unsigned intindex;
clockid_tclockid;
seqcount_tseq;
structhrtimer*running;
structtimerqueue_headactive;
ktime_t (*get_time)(void);
ktime_toffset;
};
structtimerqueue_head {
structrb_roothead;
structtimerqueue_node*next;
};
structtimerqueue_node {
structrb_nodenode;
ktime_texpires;
};
structhrtimer {
structtimerqueue_nodenode;
ktime_t_softexpires;
enumhrtimer_restart (*function)(structhrtimer*);
structhrtimer_clock_base*base;
u8state;
u8is_rel; /* timer was armed relative */u8is_soft; /* expired in soft interrupt context */
};
voidhrtimer_run_queues(void)
{
structhrtimer_cpu_base*cpu_base=this_cpu_ptr(&hrtimer_bases);
unsigned longflags;
ktime_tnow;
if (__hrtimer_hres_active(cpu_base))
return;
if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) {
/* change event_handler to hrtimer_interrupt */hrtimer_switch_to_hres();
return;
}
raw_spin_lock_irqsave(&cpu_base->lock, flags);
now=hrtimer_update_base(cpu_base);
if (!ktime_before(now, cpu_base->softirq_expires_next)) {
cpu_base->softirq_expires_next=KTIME_MAX;
cpu_base->softirq_activated=1;
raise_softirq_irqoff(HRTIMER_SOFTIRQ);
}
__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
}
staticvoid__hrtimer_run_queues(structhrtimer_cpu_base*cpu_base, ktime_tnow,
unsigned longflags, unsigned intactive_mask)
{
structhrtimer_clock_base*base;
unsigned intactive=cpu_base->active_bases&active_mask;
for_each_active_base(base, cpu_base, active) {
structtimerqueue_node*node;
ktime_tbasenow;
basenow=ktime_add(now, base->offset);
while ((node=timerqueue_getnext(&base->active))) {
structhrtimer*timer;
timer=container_of(node, structhrtimer, node);
if (basenow<hrtimer_get_softexpires_tv64(timer))
break;
__run_hrtimer(cpu_base, base, timer, &basenow, flags);
}
}
}
staticvoid__run_hrtimer(structhrtimer_cpu_base*cpu_base,
structhrtimer_clock_base*base,
structhrtimer*timer, ktime_t*now,
unsigned longflags)
{
enumhrtimer_restart (*fn)(structhrtimer*);
intrestart;
lockdep_assert_held(&cpu_base->lock);
debug_deactivate(timer);
base->running=timer;
/* Separate the ->running assignment from the ->state assignment. * * As with a regular write barrier, this ensures the read side in * hrtimer_active() cannot observe base->running == NULL && * timer->state == INACTIVE. */raw_write_seqcount_barrier(&base->seq);
__remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0);
fn=timer->function;
/* Clear the 'is relative' flag for the TIME_LOW_RES case. If the * timer is restarted with a period then it becomes an absolute * timer. If its not restarted it does not matter. */if (IS_ENABLED(CONFIG_TIME_LOW_RES))
timer->is_rel= false;
/* The timer is marked as running in the CPU base, so it is * protected against migration to a different CPU even if the lock * is dropped. */raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
trace_hrtimer_expire_entry(timer, now);
restart=fn(timer);
trace_hrtimer_expire_exit(timer);
raw_spin_lock_irq(&cpu_base->lock);
/* Note: We clear the running state after enqueue_hrtimer and * we do not reprogram the event hardware. Happens either in * hrtimer_start_range_ns() or in hrtimer_interrupt() * * Note: Because we dropped the cpu_base->lock above, * hrtimer_start_range_ns() can have popped in and enqueued the timer * for us already. */if (restart!=HRTIMER_NORESTART&&
!(timer->state&HRTIMER_STATE_ENQUEUED))
enqueue_hrtimer(timer, base, HRTIMER_MODE_ABS);
/* Separate the ->running assignment from the ->state assignment. * * As with a regular write barrier, this ensures the read side in * hrtimer_active() cannot observe base->running.timer == NULL && * timer->state == INACTIVE. */raw_write_seqcount_barrier(&base->seq);
WARN_ON_ONCE(base->running!=timer);
base->running=NULL;
}
tick_check_oneshot_change
inttick_check_oneshot_change(intallow_nohz)
{
structtick_sched*ts=this_cpu_ptr(&tick_cpu_sched);
if (!test_and_clear_bit(0, &ts->check_clocks))
return0;
if (ts->nohz_mode!=NOHZ_MODE_INACTIVE)
return0;
if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available())
return0;
if (!allow_nohz)
return1;
tick_nohz_switch_to_nohz();
return0;
}
staticvoidtick_nohz_switch_to_nohz(void)
{
structtick_sched*ts=this_cpu_ptr(&tick_cpu_sched);
ktime_tnext;
if (!tick_nohz_enabled)
return;
if (tick_switch_to_oneshot(tick_nohz_handler))
return;
/* Recycle the hrtimer in ts, so we can share the * hrtimer_forward with the highres code. */hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
/* Get the next period */next=tick_init_jiffy_update();
hrtimer_set_expires(&ts->sched_timer, next);
hrtimer_forward_now(&ts->sched_timer, tick_period);
tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
}
inttick_switch_to_oneshot(void (*handler)(structclock_event_device*))
{
structtick_device*td=this_cpu_ptr(&tick_cpu_device);
structclock_event_device*dev=td->evtdev;
td->mode=TICKDEV_MODE_ONESHOT;
dev->event_handler=handler;
clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
tick_broadcast_switch_to_oneshot();
return0;
}
inttick_program_event(ktime_texpires, intforce)
{
structclock_event_device*dev=__this_cpu_read(tick_cpu_device.evtdev);
if (unlikely(clockevent_state_oneshot_stopped(dev))) {
clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
}
returnclockevents_program_event(dev, expires, force);
}
hrtimer_switch_to_hres
staticvoidhrtimer_switch_to_hres(void)
{
structhrtimer_cpu_base*base=this_cpu_ptr(&hrtimer_bases);
if (tick_init_highres()) {
return;
}
base->hres_active=1;
hrtimer_resolution=HIGH_RES_NSEC;
tick_setup_sched_timer();
/* "Retrigger" the interrupt to get things going */retrigger_next_event(NULL);
}
inttick_init_highres(void)
{
returntick_switch_to_oneshot(hrtimer_interrupt);
}
hrtimer tick emulation
/* setup the tick emulation timer */voidtick_setup_sched_timer(void)
{
structtick_sched*ts=this_cpu_ptr(&tick_cpu_sched);
ktime_tnow=ktime_get();
hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
ts->sched_timer.function=tick_sched_timer;
/* Get the next period (per-CPU) */hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
/* Offset the tick to avert jiffies_lock contention. */if (sched_skew_tick) {
u64offset=ktime_to_ns(tick_period) >> 1;
do_div(offset, num_possible_cpus());
offset *= smp_processor_id();
hrtimer_add_expires_ns(&ts->sched_timer, offset);
}
hrtimer_forward(&ts->sched_timer, now, tick_period);
hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED);
tick_nohz_activate(ts, NOHZ_MODE_HIGHRES);
}
staticenumhrtimer_restarttick_sched_timer(structhrtimer*timer)
{
structtick_sched*ts=container_of(timer, structtick_sched, sched_timer);
structpt_regs*regs=get_irq_regs();
ktime_tnow=ktime_get();
tick_sched_do_timer(ts, now);
/* Do not call, when we are not in irq context and have * no valid regs pointer */if (regs)
tick_sched_handle(ts, regs);
elsets->next_tick=0;
/* No need to reprogram if we are in idle or full dynticks mode */if (unlikely(ts->tick_stopped))
returnHRTIMER_NORESTART;
hrtimer_forward(timer, now, tick_period);
returnHRTIMER_RESTART;
}
staticvoidtick_sched_do_timer(structtick_sched*ts, ktime_tnow)
{
intcpu=smp_processor_id();
if (tick_do_timer_cpu==cpu)
tick_do_update_jiffies64(now);
if (ts->inidle)
ts->got_idle_tick=1;
}
staticvoidtick_sched_handle(structtick_sched*ts, structpt_regs*regs)
{
update_process_times(user_mode(regs));
profile_tick(CPU_PROFILING);
}
SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
conststruct__kernel_itimerspec__user*, new_setting,
struct__kernel_itimerspec__user*, old_setting)
{
structitimerspec64new_spec, old_spec;
structitimerspec64*rtn=old_setting ? &old_spec : NULL;
interror=0;
get_itimerspec64(&new_spec, new_setting);
error=do_timer_settime(timer_id, flags, &new_spec, rtn) {
timr=lock_timer(timer_id, &flag);
kc=timr->kclock;
error=kc->timer_set(timr, flags, new_spec64, old_spec64);
}
if (!error&&old_setting) {
if (put_itimerspec64(&old_spec, old_setting))
error=-EFAULT;
}
returnerror;
}
staticconststructk_clock*constposix_clocks[] = {
[CLOCK_REALTIME] =&clock_realtime,
[CLOCK_MONOTONIC] =&clock_monotonic,
[CLOCK_PROCESS_CPUTIME_ID] =&clock_process,
[CLOCK_THREAD_CPUTIME_ID] =&clock_thread,
[CLOCK_MONOTONIC_RAW] =&clock_monotonic_raw,
[CLOCK_REALTIME_COARSE] =&clock_realtime_coarse,
[CLOCK_MONOTONIC_COARSE] =&clock_monotonic_coarse,
[CLOCK_BOOTTIME] =&clock_boottime,
[CLOCK_REALTIME_ALARM] =&alarm_clock,
[CLOCK_BOOTTIME_ALARM] =&alarm_clock,
[CLOCK_TAI] =&clock_tai,
};
staticconststructk_clockclock_realtime= {
.timer_set=common_timer_set,
.timer_arm=common_hrtimer_arm,
};
intcommon_timer_set(structk_itimer*timr, intflags,
structitimerspec64*new_setting,
structitimerspec64*old_setting)
{
conststructk_clock*kc=timr->kclock;
boolsigev_none;
ktime_texpires;
if (old_setting)
common_timer_get(timr, old_setting);
/* Prevent rearming by clearing the interval */timr->it_interval=0;
/* Careful here. On SMP systems the timer expiry function could be * active and spinning on timr->it_lock. */if (kc->timer_try_to_cancel(timr) <0)
returnTIMER_RETRY;
timr->it_active=0;
timr->it_requeue_pending= (timr->it_requeue_pending+2) &
~REQUEUE_PENDING;
timr->it_overrun_last=0;
/* Switch off the timer when it_value is zero */if (!new_setting->it_value.tv_sec&& !new_setting->it_value.tv_nsec)
return0;
timr->it_interval=timespec64_to_ktime(new_setting->it_interval);
expires=timespec64_to_ktime(new_setting->it_value);
sigev_none=timr->it_sigev_notify==SIGEV_NONE;
kc->timer_arm(timr, expires, flags&TIMER_ABSTIME, sigev_none);
timr->it_active= !sigev_none;
return0;
}
staticvoidcommon_hrtimer_arm(structk_itimer*timr, ktime_texpires,
boolabsolute, boolsigev_none)
{
structhrtimer*timer=&timr->it.real.timer;
enumhrtimer_modemode;
mode=absolute ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL;
if (timr->it_clock==CLOCK_REALTIME)
timr->kclock=absolute ? &clock_realtime : &clock_monotonic;
hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);
timr->it.real.timer.function=posix_timer_fn;
if (!absolute)
expires=ktime_add_safe(expires, timer->base->get_time());
hrtimer_set_expires(timer, expires);
if (!sigev_none)
hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
}
timer cancel
staticintcommon_hrtimer_try_to_cancel(structk_itimer*timr)
{
returnhrtimer_try_to_cancel(&timr->it.real.timer);
}
/* Returns: * 0 when the timer was not active * 1 when the timer was active * -1 when the timer is currently executing the callback function and * cannot be stopped */inthrtimer_try_to_cancel(structhrtimer*timer)
{
structhrtimer_clock_base*base;
unsigned longflags;
intret=-1;
if (!hrtimer_active(timer))
return0;
base=lock_hrtimer_base(timer, &flags);
if (!hrtimer_callback_running(timer)) /* timer->base->running == timer; */ret=remove_hrtimer(timer, base, false);
unlock_hrtimer_base(timer, &flags);
returnret;
}
/* A timer is active, when it is enqueued into the rbtree or the * callback function is running or it's in the state of being migrated * to another cpu. * * It is important for this function to not return a false negative. */boolhrtimer_active(conststructhrtimer*timer)
{
structhrtimer_clock_base*base;
unsigned intseq;
do {
base=READ_ONCE(timer->base);
seq=raw_read_seqcount_begin(&base->seq);
if (timer->state!=HRTIMER_STATE_INACTIVE||base->running==timer)
return true;
} while (read_seqcount_retry(&base->seq, seq) ||base!=READ_ONCE(timer->base));
return false;
}
intremove_hrtimer(structhrtimer*timer, structhrtimer_clock_base*base, boolrestart)
{
u8state=timer->state;
if (state&HRTIMER_STATE_ENQUEUED) {
intreprogram=base->cpu_base==this_cpu_ptr(&hrtimer_bases);
if (!restart)
state=HRTIMER_STATE_INACTIVE;
__remove_hrtimer(timer, base, state, reprogram);
return1;
}
return0;
}
staticvoid__remove_hrtimer(structhrtimer*timer,
structhrtimer_clock_base*base,
u8newstate, intreprogram)
{
structhrtimer_cpu_base*cpu_base=base->cpu_base;
u8state=timer->state;
/* Pairs with the lockless read in hrtimer_is_queued() */WRITE_ONCE(timer->state, newstate);
if (!(state&HRTIMER_STATE_ENQUEUED))
return;
if (!timerqueue_del(&base->active, &timer->node))
cpu_base->active_bases &= ~(1 << base->index);
if (reprogram&&timer==cpu_base->next_timer)
hrtimer_force_reprogram(cpu_base, 1);
}
posix_timer_fn
staticenumhrtimer_restartposix_timer_fn(structhrtimer*timer)
{
structk_itimer*timr;
unsigned longflags;
intsi_private=0;
enumhrtimer_restartret=HRTIMER_NORESTART;
timr=container_of(timer, structk_itimer, it.real.timer);
spin_lock_irqsave(&timr->it_lock, flags);
timr->it_active=0;
if (timr->it_interval!=0)
si_private=++timr->it_requeue_pending;
if (posix_timer_event(timr, si_private)) {
/* signal was not sent because of sig_ignor * we will not get a call back to restart it AND * it should be restarted. */if (timr->it_interval!=0) {
ktime_tnow=hrtimer_cb_get_time(timer);
#ifdefCONFIG_HIGH_RES_TIMERS
{
ktime_tkj=NSEC_PER_SEC / HZ;
if (timr->it_interval<kj)
now=ktime_add(now, kj);
}
#endiftimr->it_overrun+=hrtimer_forward(timer, now,
timr->it_interval);
ret=HRTIMER_RESTART;
++timr->it_requeue_pending;
timr->it_active=1;
}
}
unlock_timer(timr, flags);
returnret;
}
intposix_timer_event(structk_itimer*timr, intsi_private)
{
enumpid_typetype;
intret=-1;
timr->sigq->info.si_sys_private=si_private;
type= !(timr->it_sigev_notify&SIGEV_THREAD_ID) ? PIDTYPE_TGID : PIDTYPE_PID;
ret=send_sigqueue(timr->sigq, timr->it_pid, type);
returnret>0;
}
intsend_sigqueue(structsigqueue*q, structpid*pid, enumpid_typetype)
{
intsig=q->info.si_signo;
structsigpending*pending;
structtask_struct*t;
unsigned longflags;
intret, result;
ret=-1;
rcu_read_lock();
t=pid_task(pid, type);
ret=1; /* the signal is ignored */result=TRACE_SIGNAL_IGNORED;
if (!prepare_signal(sig, t, false))
goto out;
ret=0;
if (unlikely(!list_empty(&q->list))) {
BUG_ON(q->info.si_code!=SI_TIMER);
q->info.si_overrun++;
result=TRACE_SIGNAL_ALREADY_PENDING;
goto out;
}
q->info.si_overrun=0;
signalfd_notify(t, sig);
pending= (type!=PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
list_add_tail(&q->list, &pending->list);
sigaddset(&pending->signal, sig);
complete_signal(sig, t, type);
result=TRACE_SIGNAL_DELIVERED;
out:
trace_signal_generate(sig, &q->info, t, type!=PIDTYPE_PID, result);
unlock_task_sighand(t, &flags);
ret:
rcu_read_unlock();
returnret;
}