From 01b904b446bc9822e83b3fbc8155c2bb33776ea0 Mon Sep 17 00:00:00 2001 From: Alfred Chen Date: Tue, 9 Apr 2024 10:14:52 +0000 Subject: [PATCH] sched/bmq: BMQ boost priority adjustment This is BMQ boost priority adjustment after the just fix commit for #89. --- kernel/sched/alt_core.c | 3 --- kernel/sched/alt_sched.h | 3 --- kernel/sched/bmq.h | 17 +++++++---------- 3 files changed, 7 insertions(+), 16 deletions(-) diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c index 4ef5e29e48154f..6fd9663c1d8032 100644 --- a/kernel/sched/alt_core.c +++ b/kernel/sched/alt_core.c @@ -4875,9 +4875,6 @@ static void __sched notrace __schedule(unsigned int sched_mode) #endif if (likely(prev != next)) { -#ifdef CONFIG_SCHED_BMQ - rq->last_ts_switch = rq->clock; -#endif next->last_ran = rq->clock_task; /*printk(KERN_INFO "sched: %px -> %px\n", prev, next);*/ diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h index ba1b7b805f1a56..81c3ed9ebb7d77 100644 --- a/kernel/sched/alt_sched.h +++ b/kernel/sched/alt_sched.h @@ -235,9 +235,6 @@ struct rq { /* Ensure that all clocks are in the same cache line */ u64 clock ____cacheline_aligned; u64 clock_task; -#ifdef CONFIG_SCHED_BMQ - u64 last_ts_switch; -#endif unsigned int nr_running; unsigned long nr_uninterruptible; diff --git a/kernel/sched/bmq.h b/kernel/sched/bmq.h index b7c9813f6fa713..87a6d304f174fb 100644 --- a/kernel/sched/bmq.h +++ b/kernel/sched/bmq.h @@ -3,10 +3,7 @@ /* * BMQ only routines */ -#define rq_switch_time(rq) ((rq)->clock - (rq)->last_ts_switch) -#define boost_threshold(p) (sysctl_sched_base_slice >> ((20 - (p)->boost_prio) / 2)) - -static inline void boost_task(struct task_struct *p) +static inline void boost_task(struct task_struct *p, int n) { int limit; @@ -21,8 +18,7 @@ static inline void boost_task(struct task_struct *p) return; } - if (p->boost_prio > limit) - p->boost_prio--; + p->boost_prio = max(limit, p->boost_prio - n); } static inline void deboost_task(struct task_struct *p) @@ -90,12 +86,13 @@ static inline void do_sched_yield_type_1(struct task_struct *p, struct rq *rq) static inline void sched_task_ttwu(struct task_struct *p) { - if(this_rq()->clock_task - p->last_ran > sysctl_sched_base_slice) - boost_task(p); + s64 delta = this_rq()->clock_task > p->last_ran; + + if (likely(delta > 0)) + boost_task(p, delta >> 22); } static inline void sched_task_deactivate(struct task_struct *p, struct rq *rq) { - if (rq_switch_time(rq) < boost_threshold(p)) - boost_task(p); + boost_task(p, 1); }