| From 845aed2213b8e6d8b82564793c1394a68168b83b Mon Sep 17 00:00:00 2001 |
| From: Nicholas Piggin <npiggin@gmail.com> |
| Date: Tue, 9 Apr 2019 19:34:03 +1000 |
| Subject: irq_work: Do not raise an IPI when queueing work on the local CPU |
| |
| [ Upstream commit 471ba0e686cb13752bc1ff3216c54b69a2d250ea ] |
| |
| The QEMU PowerPC/PSeries machine model was not expecting a self-IPI, |
| and it may be a bit surprising thing to do, so have irq_work_queue_on |
| do local queueing when target is the current CPU. |
| |
| Suggested-by: Steven Rostedt <rostedt@goodmis.org> |
| Reported-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| Signed-off-by: Nicholas Piggin <npiggin@gmail.com> |
| Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> |
| Reviewed-by: Frederic Weisbecker <frederic@kernel.org> |
| Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> |
| Cc: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= <clg@kaod.org> |
| Cc: Linus Torvalds <torvalds@linux-foundation.org> |
| Cc: Paul Mackerras <paulus@samba.org> |
| Cc: Peter Zijlstra <peterz@infradead.org> |
| Cc: Suraj Jitindar Singh <sjitindarsingh@gmail.com> |
| Cc: Thomas Gleixner <tglx@linutronix.de> |
| Link: https://lkml.kernel.org/r/20190409093403.20994-1-npiggin@gmail.com |
| [ Simplified the preprocessor comments. |
| Fixed unbalanced curly brackets pointed out by Thomas. ] |
| Signed-off-by: Ingo Molnar <mingo@kernel.org> |
| Signed-off-by: Sasha Levin <sashal@kernel.org> |
| --- |
| kernel/irq_work.c | 75 ++++++++++++++++++++++++++--------------------- |
| 1 file changed, 42 insertions(+), 33 deletions(-) |
| |
| diff --git a/kernel/irq_work.c b/kernel/irq_work.c |
| index 6b7cdf17ccf89..73288914ed5e7 100644 |
| --- a/kernel/irq_work.c |
| +++ b/kernel/irq_work.c |
| @@ -56,61 +56,70 @@ void __weak arch_irq_work_raise(void) |
| */ |
| } |
| |
| -/* |
| - * Enqueue the irq_work @work on @cpu unless it's already pending |
| - * somewhere. |
| - * |
| - * Can be re-enqueued while the callback is still in progress. |
| - */ |
| -bool irq_work_queue_on(struct irq_work *work, int cpu) |
| +/* Enqueue on current CPU, work must already be claimed and preempt disabled */ |
| +static void __irq_work_queue_local(struct irq_work *work) |
| { |
| - /* All work should have been flushed before going offline */ |
| - WARN_ON_ONCE(cpu_is_offline(cpu)); |
| - |
| -#ifdef CONFIG_SMP |
| - |
| - /* Arch remote IPI send/receive backend aren't NMI safe */ |
| - WARN_ON_ONCE(in_nmi()); |
| + /* If the work is "lazy", handle it from next tick if any */ |
| + if (work->flags & IRQ_WORK_LAZY) { |
| + if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && |
| + tick_nohz_tick_stopped()) |
| + arch_irq_work_raise(); |
| + } else { |
| + if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) |
| + arch_irq_work_raise(); |
| + } |
| +} |
| |
| +/* Enqueue the irq work @work on the current CPU */ |
| +bool irq_work_queue(struct irq_work *work) |
| +{ |
| /* Only queue if not already pending */ |
| if (!irq_work_claim(work)) |
| return false; |
| |
| - if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) |
| - arch_send_call_function_single_ipi(cpu); |
| - |
| -#else /* #ifdef CONFIG_SMP */ |
| - irq_work_queue(work); |
| -#endif /* #else #ifdef CONFIG_SMP */ |
| + /* Queue the entry and raise the IPI if needed. */ |
| + preempt_disable(); |
| + __irq_work_queue_local(work); |
| + preempt_enable(); |
| |
| return true; |
| } |
| +EXPORT_SYMBOL_GPL(irq_work_queue); |
| |
| -/* Enqueue the irq work @work on the current CPU */ |
| -bool irq_work_queue(struct irq_work *work) |
| +/* |
| + * Enqueue the irq_work @work on @cpu unless it's already pending |
| + * somewhere. |
| + * |
| + * Can be re-enqueued while the callback is still in progress. |
| + */ |
| +bool irq_work_queue_on(struct irq_work *work, int cpu) |
| { |
| +#ifndef CONFIG_SMP |
| + return irq_work_queue(work); |
| + |
| +#else /* CONFIG_SMP: */ |
| + /* All work should have been flushed before going offline */ |
| + WARN_ON_ONCE(cpu_is_offline(cpu)); |
| + |
| /* Only queue if not already pending */ |
| if (!irq_work_claim(work)) |
| return false; |
| |
| - /* Queue the entry and raise the IPI if needed. */ |
| preempt_disable(); |
| - |
| - /* If the work is "lazy", handle it from next tick if any */ |
| - if (work->flags & IRQ_WORK_LAZY) { |
| - if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && |
| - tick_nohz_tick_stopped()) |
| - arch_irq_work_raise(); |
| + if (cpu != smp_processor_id()) { |
| + /* Arch remote IPI send/receive backend aren't NMI safe */ |
| + WARN_ON_ONCE(in_nmi()); |
| + if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) |
| + arch_send_call_function_single_ipi(cpu); |
| } else { |
| - if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) |
| - arch_irq_work_raise(); |
| + __irq_work_queue_local(work); |
| } |
| - |
| preempt_enable(); |
| |
| return true; |
| +#endif /* CONFIG_SMP */ |
| } |
| -EXPORT_SYMBOL_GPL(irq_work_queue); |
| + |
| |
| bool irq_work_needs_cpu(void) |
| { |
| -- |
| 2.20.1 |
| |