| From f145a0637324f2edf863d3938718608fe15e70fd Mon Sep 17 00:00:00 2001 |
| From: Viresh Kumar <viresh.kumar@linaro.org> |
| Date: Wed, 26 Jun 2019 10:36:29 +0530 |
| Subject: [PATCH] sched/fair: Start tracking SCHED_IDLE tasks count in cfs_rq |
| |
| commit 43e9f7f231e40e4534fc3a735da152911a085c16 upstream. |
| |
| Track how many tasks are present with SCHED_IDLE policy in each cfs_rq. |
| This will be used by later commits. |
| |
| Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> |
| Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> |
| Cc: Daniel Lezcano <daniel.lezcano@linaro.org> |
| Cc: Linus Torvalds <torvalds@linux-foundation.org> |
| Cc: Peter Zijlstra <peterz@infradead.org> |
| Cc: Thomas Gleixner <tglx@linutronix.de> |
| Cc: Vincent Guittot <vincent.guittot@linaro.org> |
| Cc: chris.redpath@arm.com |
| Cc: quentin.perret@linaro.org |
| Cc: songliubraving@fb.com |
| Cc: steven.sistare@oracle.com |
| Cc: subhra.mazumdar@oracle.com |
| Cc: tkjos@google.com |
| Link: https://lkml.kernel.org/r/0d3cdc427fc68808ad5bccc40e86ed0bf9da8bb4.1561523542.git.viresh.kumar@linaro.org |
| Signed-off-by: Ingo Molnar <mingo@kernel.org> |
| Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com> |
| |
| diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c |
| index db8d7b097c79..a49fddb29d07 100644 |
| --- a/kernel/sched/fair.c |
| +++ b/kernel/sched/fair.c |
| @@ -4491,7 +4491,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq) |
| struct rq *rq = rq_of(cfs_rq); |
| struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); |
| struct sched_entity *se; |
| - long task_delta, dequeue = 1; |
| + long task_delta, idle_task_delta, dequeue = 1; |
| bool empty; |
| |
| se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; |
| @@ -4502,6 +4502,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq) |
| rcu_read_unlock(); |
| |
| task_delta = cfs_rq->h_nr_running; |
| + idle_task_delta = cfs_rq->idle_h_nr_running; |
| for_each_sched_entity(se) { |
| struct cfs_rq *qcfs_rq = cfs_rq_of(se); |
| /* throttled entity or throttle-on-deactivate */ |
| @@ -4511,6 +4512,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq) |
| if (dequeue) |
| dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP); |
| qcfs_rq->h_nr_running -= task_delta; |
| + qcfs_rq->idle_h_nr_running -= idle_task_delta; |
| |
| if (qcfs_rq->load.weight) |
| dequeue = 0; |
| @@ -4550,7 +4552,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) |
| struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); |
| struct sched_entity *se; |
| int enqueue = 1; |
| - long task_delta; |
| + long task_delta, idle_task_delta; |
| |
| se = cfs_rq->tg->se[cpu_of(rq)]; |
| |
| @@ -4570,6 +4572,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) |
| return; |
| |
| task_delta = cfs_rq->h_nr_running; |
| + idle_task_delta = cfs_rq->idle_h_nr_running; |
| for_each_sched_entity(se) { |
| if (se->on_rq) |
| enqueue = 0; |
| @@ -4578,6 +4581,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) |
| if (enqueue) |
| enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP); |
| cfs_rq->h_nr_running += task_delta; |
| + cfs_rq->idle_h_nr_running += idle_task_delta; |
| |
| if (cfs_rq_throttled(cfs_rq)) |
| break; |
| @@ -5191,6 +5195,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) |
| { |
| struct cfs_rq *cfs_rq; |
| struct sched_entity *se = &p->se; |
| + int idle_h_nr_running = task_has_idle_policy(p); |
| |
| /* |
| * The code below (indirectly) updates schedutil which looks at |
| @@ -5223,6 +5228,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) |
| if (cfs_rq_throttled(cfs_rq)) |
| break; |
| cfs_rq->h_nr_running++; |
| + cfs_rq->idle_h_nr_running += idle_h_nr_running; |
| |
| flags = ENQUEUE_WAKEUP; |
| } |
| @@ -5230,6 +5236,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) |
| for_each_sched_entity(se) { |
| cfs_rq = cfs_rq_of(se); |
| cfs_rq->h_nr_running++; |
| + cfs_rq->idle_h_nr_running += idle_h_nr_running; |
| |
| if (cfs_rq_throttled(cfs_rq)) |
| break; |
| @@ -5291,6 +5298,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) |
| struct cfs_rq *cfs_rq; |
| struct sched_entity *se = &p->se; |
| int task_sleep = flags & DEQUEUE_SLEEP; |
| + int idle_h_nr_running = task_has_idle_policy(p); |
| |
| for_each_sched_entity(se) { |
| cfs_rq = cfs_rq_of(se); |
| @@ -5305,6 +5313,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) |
| if (cfs_rq_throttled(cfs_rq)) |
| break; |
| cfs_rq->h_nr_running--; |
| + cfs_rq->idle_h_nr_running -= idle_h_nr_running; |
| |
| /* Don't dequeue parent if it has other entities besides us */ |
| if (cfs_rq->load.weight) { |
| @@ -5324,6 +5333,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) |
| for_each_sched_entity(se) { |
| cfs_rq = cfs_rq_of(se); |
| cfs_rq->h_nr_running--; |
| + cfs_rq->idle_h_nr_running -= idle_h_nr_running; |
| |
| if (cfs_rq_throttled(cfs_rq)) |
| break; |
| diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h |
| index 2a63f48194c0..85b5c6974c3e 100644 |
| --- a/kernel/sched/sched.h |
| +++ b/kernel/sched/sched.h |
| @@ -493,7 +493,8 @@ struct cfs_rq { |
| struct load_weight load; |
| unsigned long runnable_weight; |
| unsigned int nr_running; |
| - unsigned int h_nr_running; |
| + unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */ |
| + unsigned int idle_h_nr_running; /* SCHED_IDLE */ |
| |
| u64 exec_clock; |
| u64 min_vruntime; |
| -- |
| 2.7.4 |
| |