refresh irqwork-push_most_work_into_softirq_context.patch

Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
diff --git a/patches/irqwork-push_most_work_into_softirq_context.patch b/patches/irqwork-push_most_work_into_softirq_context.patch
index f3f9b28..1489acb 100644
--- a/patches/irqwork-push_most_work_into_softirq_context.patch
+++ b/patches/irqwork-push_most_work_into_softirq_context.patch
@@ -1,6 +1,7 @@
-Subject: irqwork: push most work into softirq context
+From 690a3e5ec24ce435a1ab38836c18b653b4c3ee2a Mon Sep 17 00:00:00 2001
 From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
 Date: Tue, 23 Jun 2015 15:32:51 +0200
+Subject: [PATCH] irqwork: push most work into softirq context
 
 Initially we defered all irqwork into softirq because we didn't want the
 latency spikes if perf or another user was busy and delayed the RT task.
@@ -18,14 +19,9 @@
 Mike Galbraith,
 
 Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/irq_work.h |    1 +
- kernel/irq_work.c        |   47 ++++++++++++++++++++++++++++++++++-------------
- kernel/sched/rt.c        |    1 +
- kernel/time/tick-sched.c |    1 +
- kernel/time/timer.c      |    6 +++++-
- 5 files changed, 42 insertions(+), 14 deletions(-)
 
+diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
+index 47b9ebd4a74f..0e427a9997f3 100644
 --- a/include/linux/irq_work.h
 +++ b/include/linux/irq_work.h
 @@ -16,6 +16,7 @@
@@ -36,6 +32,8 @@
  
  struct irq_work {
  	unsigned long flags;
+diff --git a/kernel/irq_work.c b/kernel/irq_work.c
+index bcf107ce0854..0ddaf1e66d8c 100644
 --- a/kernel/irq_work.c
 +++ b/kernel/irq_work.c
 @@ -17,6 +17,7 @@
@@ -55,7 +53,7 @@
  	/* All work should have been flushed before going offline */
  	WARN_ON_ONCE(cpu_is_offline(cpu));
  
-@@ -75,7 +78,12 @@ bool irq_work_queue_on(struct irq_work *
+@@ -75,7 +78,12 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
  	if (!irq_work_claim(work))
  		return false;
  
@@ -79,7 +77,7 @@
  	/* Only queue if not already pending */
  	if (!irq_work_claim(work))
  		return false;
-@@ -93,13 +104,15 @@ bool irq_work_queue(struct irq_work *wor
+@@ -93,13 +104,15 @@ bool irq_work_queue(struct irq_work *work)
  	/* Queue the entry and raise the IPI if needed. */
  	preempt_disable();
  
@@ -114,7 +112,7 @@
  
  	/* All work should have been flushed before going offline */
  	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
-@@ -132,7 +144,7 @@ static void irq_work_run_list(struct lli
+@@ -132,7 +144,7 @@ static void irq_work_run_list(struct llist_head *list)
  	struct irq_work *work;
  	struct llist_node *llnode;
  
@@ -123,7 +121,7 @@
  
  	if (llist_empty(list))
  		return;
-@@ -169,7 +181,16 @@ static void irq_work_run_list(struct lli
+@@ -169,7 +181,16 @@ static void irq_work_run_list(struct llist_head *list)
  void irq_work_run(void)
  {
  	irq_work_run_list(this_cpu_ptr(&raised_list));
@@ -141,6 +139,8 @@
  }
  EXPORT_SYMBOL_GPL(irq_work_run);
  
+diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
+index 4dae7ff39ba3..162268dd9960 100644
 --- a/kernel/sched/rt.c
 +++ b/kernel/sched/rt.c
 @@ -102,6 +102,7 @@ void init_rt_rq(struct rt_rq *rt_rq)
@@ -151,9 +151,11 @@
  #endif
  #endif /* CONFIG_SMP */
  	/* We start is dequeued state, because no RT tasks are queued */
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index dde2281324e4..ed2aa06fdbbc 100644
 --- a/kernel/time/tick-sched.c
 +++ b/kernel/time/tick-sched.c
-@@ -224,6 +224,7 @@ static void nohz_full_kick_func(struct i
+@@ -224,6 +224,7 @@ static void nohz_full_kick_func(struct irq_work *work)
  
  static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
  	.func = nohz_full_kick_func,
@@ -161,9 +163,11 @@
  };
  
  /*
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index bccc39f4d0a1..dc70267717f6 100644
 --- a/kernel/time/timer.c
 +++ b/kernel/time/timer.c
-@@ -1604,7 +1604,7 @@ void update_process_times(int user_tick)
+@@ -1623,7 +1623,7 @@ void update_process_times(int user_tick)
  	scheduler_tick();
  	run_local_timers();
  	rcu_check_callbacks(user_tick);
@@ -172,9 +176,9 @@
  	if (in_irq())
  		irq_work_tick();
  #endif
-@@ -1645,6 +1645,10 @@ static __latent_entropy void run_timer_s
- {
- 	struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
+@@ -1677,6 +1677,10 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
+ 	 */
+ 	base->must_forward_clk = false;
  
 +#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
 +	irq_work_tick();
@@ -183,3 +187,6 @@
  	__run_timers(base);
  	if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active)
  		__run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
+-- 
+2.1.4
+