trivial refresh of kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch

Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
diff --git a/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch b/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
index 5cd5301..ef9ba26 100644
--- a/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
+++ b/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
@@ -1,4 +1,4 @@
-From 778830b0173c4a9e6f649a0976b2ec0b4a97c067 Mon Sep 17 00:00:00 2001
+From 35e17e9fd784fa53f6d84a77008e1bedba027227 Mon Sep 17 00:00:00 2001
 From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
 Date: Tue, 4 Apr 2017 12:50:16 +0200
 Subject: [PATCH] kernel: sched: Provide a pointer to the valid CPU mask
@@ -106,10 +106,10 @@
  
  out_unlock:
 diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
-index 9681b5877140..a22a6f347e00 100644
+index 38dfa27730ff..0d6cffcd7987 100644
 --- a/arch/mips/kernel/traps.c
 +++ b/arch/mips/kernel/traps.c
-@@ -1191,12 +1191,12 @@ static void mt_ase_fp_affinity(void)
+@@ -1193,12 +1193,12 @@ static void mt_ase_fp_affinity(void)
  		 * restricted the allowed set to exclude any CPUs with FPUs,
  		 * we'll skip the procedure.
  		 */
@@ -321,7 +321,7 @@
  #define PF_MUTEX_TESTER		0x20000000	/* Thread belongs to the rt mutex tester */
  #define PF_FREEZER_SKIP		0x40000000	/* Freezer should not count it as freezable */
 diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
-index f6501f4f6040..cc7a3e699f87 100644
+index ae643412948a..67dcc046c9dd 100644
 --- a/kernel/cgroup/cpuset.c
 +++ b/kernel/cgroup/cpuset.c
 @@ -2092,7 +2092,7 @@ static void cpuset_fork(struct task_struct *task)
@@ -334,7 +334,7 @@
  }
  
 diff --git a/kernel/fork.c b/kernel/fork.c
-index 06d759ab4c62..340633f09d43 100644
+index e53770d2bf95..2791d7d23caa 100644
 --- a/kernel/fork.c
 +++ b/kernel/fork.c
 @@ -563,6 +563,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
@@ -347,7 +347,7 @@
  	 * One for us, one for whoever does the "release_task()" (usually
  	 * parent)
 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index acfd9f8778a0..3ea8c777663f 100644
+index f59eeaa4f86a..63fc634dbdb9 100644
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
 @@ -984,7 +984,7 @@ static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
@@ -479,7 +479,7 @@
  	 *  - any previously selected CPU might disappear through hotplug
  	 *
  	 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
-@@ -4316,7 +4316,7 @@ static int __sched_setscheduler(struct task_struct *p,
+@@ -4341,7 +4341,7 @@ static int __sched_setscheduler(struct task_struct *p,
  			 * the entire root_domain to become SCHED_DEADLINE. We
  			 * will also fail if there's no bandwidth available.
  			 */
@@ -488,7 +488,7 @@
  			    rq->rd->dl_bw.bw == 0) {
  				task_rq_unlock(rq, p, &rf);
  				return -EPERM;
-@@ -4910,7 +4910,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
+@@ -4935,7 +4935,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
  		goto out_unlock;
  
  	raw_spin_lock_irqsave(&p->pi_lock, flags);
@@ -497,7 +497,7 @@
  	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
  
  out_unlock:
-@@ -5474,7 +5474,7 @@ int task_can_attach(struct task_struct *p,
+@@ -5499,7 +5499,7 @@ int task_can_attach(struct task_struct *p,
  	 * allowed nodes is unnecessary.  Thus, cpusets are not
  	 * applicable for such threads.  This prevents checking for
  	 * success of set_cpus_allowed_ptr() on all attached tasks
@@ -506,7 +506,7 @@
  	 */
  	if (p->flags & PF_NO_SETAFFINITY) {
  		ret = -EINVAL;
-@@ -5530,7 +5530,7 @@ int migrate_task_to(struct task_struct *p, int target_cpu)
+@@ -5555,7 +5555,7 @@ int migrate_task_to(struct task_struct *p, int target_cpu)
  	if (curr_cpu == target_cpu)
  		return 0;
  
@@ -515,7 +515,7 @@
  		return -EINVAL;
  
  	/* TODO: This is not properly updating schedstats */
-@@ -5667,7 +5667,7 @@ static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf)
+@@ -5692,7 +5692,7 @@ static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf)
  		next->sched_class->put_prev_task(rq, next);
  
  		/*
@@ -591,10 +591,10 @@
  				     !dl_task(task) ||
  				     !task_on_rq_queued(task))) {
 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
-index d71109321841..92e0bbbf2b3d 100644
+index bafa4e04b850..bfbfcfa5d942 100644
 --- a/kernel/sched/fair.c
 +++ b/kernel/sched/fair.c
-@@ -1547,7 +1547,7 @@ static void task_numa_compare(struct task_numa_env *env,
+@@ -1546,7 +1546,7 @@ static void task_numa_compare(struct task_numa_env *env,
  	 */
  	if (cur) {
  		/* Skip this swap candidate if cannot move to the source cpu */
@@ -603,7 +603,7 @@
  			goto unlock;
  
  		/*
-@@ -1657,7 +1657,7 @@ static void task_numa_find_cpu(struct task_numa_env *env,
+@@ -1656,7 +1656,7 @@ static void task_numa_find_cpu(struct task_numa_env *env,
  
  	for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
  		/* Skip this CPU if the source task cannot migrate */
@@ -612,7 +612,7 @@
  			continue;
  
  		env->dst_cpu = cpu;
-@@ -5485,7 +5485,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
+@@ -5385,7 +5385,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
  
  		/* Skip over this group if it has no CPUs allowed */
  		if (!cpumask_intersects(sched_group_cpus(group),
@@ -621,7 +621,7 @@
  			continue;
  
  		local_group = cpumask_test_cpu(this_cpu,
-@@ -5605,7 +5605,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
+@@ -5505,7 +5505,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
  		return cpumask_first(sched_group_cpus(group));
  
  	/* Traverse only the allowed CPUs */
@@ -630,16 +630,16 @@
  		if (idle_cpu(i)) {
  			struct rq *rq = cpu_rq(i);
  			struct cpuidle_state *idle = idle_get_state(rq);
-@@ -5744,7 +5744,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
+@@ -5607,7 +5607,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
  	if (!test_idle_cores(target, false))
  		return -1;
  
 -	cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed);
 +	cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
  
- 	for_each_cpu_wrap(core, cpus, target, wrap) {
+ 	for_each_cpu_wrap(core, cpus, target) {
  		bool idle = true;
-@@ -5778,7 +5778,7 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t
+@@ -5641,7 +5641,7 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t
  		return -1;
  
  	for_each_cpu(cpu, cpu_smt_mask(target)) {
@@ -648,16 +648,16 @@
  			continue;
  		if (idle_cpu(cpu))
  			return cpu;
-@@ -5830,7 +5830,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
- 	time = local_clock();
- 
- 	for_each_cpu_wrap(cpu, sched_domain_span(sd), target, wrap) {
+@@ -5704,7 +5704,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
+ 	for_each_cpu_wrap(cpu, sched_domain_span(sd), target) {
+ 		if (!--nr)
+ 			return -1;
 -		if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
 +		if (!cpumask_test_cpu(cpu, p->cpus_ptr))
  			continue;
  		if (idle_cpu(cpu))
  			break;
-@@ -5985,7 +5985,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
+@@ -5859,7 +5859,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
  	if (sd_flag & SD_BALANCE_WAKE) {
  		record_wakee(p);
  		want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu)
@@ -666,7 +666,7 @@
  	}
  
  	rcu_read_lock();
-@@ -6718,14 +6718,14 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
+@@ -6600,14 +6600,14 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
  	/*
  	 * We do not migrate tasks that are:
  	 * 1) throttled_lb_pair, or
@@ -683,7 +683,7 @@
  		int cpu;
  
  		schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
-@@ -6745,7 +6745,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
+@@ -6627,7 +6627,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
  
  		/* Prevent to re-select dst_cpu via env's cpus */
  		for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
@@ -692,7 +692,7 @@
  				env->flags |= LBF_DST_PINNED;
  				env->new_dst_cpu = cpu;
  				break;
-@@ -7287,7 +7287,7 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
+@@ -7169,7 +7169,7 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
  
  /*
   * Group imbalance indicates (and tries to solve) the problem where balancing
@@ -701,7 +701,7 @@
   *
   * Imagine a situation of two groups of 4 cpus each and 4 tasks each with a
   * cpumask covering 1 cpu of the first group and 3 cpus of the second group.
-@@ -7862,7 +7862,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
+@@ -7744,7 +7744,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
  	/*
  	 * If the busiest group is imbalanced the below checks don't
  	 * work because they assume all things are equal, which typically
@@ -710,7 +710,7 @@
  	 */
  	if (busiest->group_type == group_imbalanced)
  		goto force_balance;
-@@ -8249,7 +8249,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
+@@ -8131,7 +8131,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
  			 * if the curr task on busiest cpu can't be
  			 * moved to this_cpu
  			 */