diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 677c2eb30179..cc2abcc009e2 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -89,8 +89,8 @@ walt_dec_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p) {} * * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds) */ -unsigned int sysctl_sched_latency = 6000000ULL; -unsigned int normalized_sysctl_sched_latency = 6000000ULL; +unsigned int sysctl_sched_latency = 5000000ULL; +unsigned int normalized_sysctl_sched_latency = 5000000ULL; /* * Enable/disable honoring sync flag in energy-aware wakeups. @@ -131,7 +131,7 @@ static unsigned int sched_nr_latency = 8; * After fork, child runs first. If set to 0 (default) then * parent will (try to) run first. */ -unsigned int sysctl_sched_child_runs_first __read_mostly; +unsigned int sysctl_sched_child_runs_first __read_mostly = 1; /* * SCHED_OTHER wake-up granularity. @@ -145,7 +145,7 @@ unsigned int sysctl_sched_child_runs_first __read_mostly; unsigned int sysctl_sched_wakeup_granularity = 1000000UL; unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL; -const_debug unsigned int sysctl_sched_migration_cost = 500000UL; +const_debug unsigned int sysctl_sched_migration_cost = 1000000UL; DEFINE_PER_CPU_READ_MOSTLY(int, sched_load_boost); #ifdef CONFIG_SCHED_WALT diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 50079fa6cde9..b5322ef14154 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -17,7 +17,7 @@ SCHED_FEAT(START_DEBIT, true) * wakeup-preemption), since its likely going to consume data we * touched, increases cache locality. */ -SCHED_FEAT(NEXT_BUDDY, false) +SCHED_FEAT(NEXT_BUDDY, true) /* * Prefer to schedule the task that ran last (when we did @@ -44,13 +44,13 @@ SCHED_FEAT(LB_BIAS, true) /* * Decrement CPU capacity based on time not spent running tasks */ -SCHED_FEAT(NONTASK_CAPACITY, true) +SCHED_FEAT(NONTASK_CAPACITY, false) /* * Queue remote wakeups on the target CPU and process them * using the scheduler IPI. Reduces rq->lock contention/bounces. */ -SCHED_FEAT(TTWU_QUEUE, false) +SCHED_FEAT(TTWU_QUEUE, true) /* * When doing wakeups, attempt to limit superfluous scans of the LLC domain.