| From e454faf4e4af08bb6ef945c2041b8708d36e6462 Mon Sep 17 00:00:00 2001 |
| From: Sasha Levin <sashal@kernel.org> |
| Date: Wed, 12 May 2021 23:43:24 +0200 |
| Subject: net: Treat __napi_schedule_irqoff() as __napi_schedule() on |
| PREEMPT_RT |
| |
| From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| |
| [ Upstream commit 8380c81d5c4fced6f4397795a5ae65758272bbfd ] |
| |
| __napi_schedule_irqoff() is an optimized version of __napi_schedule() |
| which can be used where it is known that interrupts are disabled, |
| e.g. in interrupt-handlers, spin_lock_irq() sections or hrtimer |
| callbacks. |
| |
| On PREEMPT_RT enabled kernels this assumptions is not true. Force- |
| threaded interrupt handlers and spinlocks are not disabling interrupts |
| and the NAPI hrtimer callback is forced into softirq context which runs |
| with interrupts enabled as well. |
| |
| Chasing all usage sites of __napi_schedule_irqoff() is a whack-a-mole |
| game so make __napi_schedule_irqoff() invoke __napi_schedule() for |
| PREEMPT_RT kernels. |
| |
| The callers of ____napi_schedule() in the networking core have been |
| audited and are correct on PREEMPT_RT kernels as well. |
| |
| Reported-by: Juri Lelli <juri.lelli@redhat.com> |
| Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| Reviewed-by: Thomas Gleixner <tglx@linutronix.de> |
| Reviewed-by: Juri Lelli <juri.lelli@redhat.com> |
| Signed-off-by: David S. Miller <davem@davemloft.net> |
| Signed-off-by: Sasha Levin <sashal@kernel.org> |
| --- |
| net/core/dev.c | 11 +++++++++-- |
| 1 file changed, 9 insertions(+), 2 deletions(-) |
| |
| diff --git a/net/core/dev.c b/net/core/dev.c |
| index 7803bd9628dc..722ae0b57f3f 100644 |
| --- a/net/core/dev.c |
| +++ b/net/core/dev.c |
| @@ -5931,11 +5931,18 @@ EXPORT_SYMBOL(napi_schedule_prep); |
| * __napi_schedule_irqoff - schedule for receive |
| * @n: entry to schedule |
| * |
| - * Variant of __napi_schedule() assuming hard irqs are masked |
| + * Variant of __napi_schedule() assuming hard irqs are masked. |
| + * |
| + * On PREEMPT_RT enabled kernels this maps to __napi_schedule() |
| + * because the interrupt disabled assumption might not be true |
| + * due to force-threaded interrupts and spinlock substitution. |
| */ |
| void __napi_schedule_irqoff(struct napi_struct *n) |
| { |
| - ____napi_schedule(this_cpu_ptr(&softnet_data), n); |
| + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) |
| + ____napi_schedule(this_cpu_ptr(&softnet_data), n); |
| + else |
| + __napi_schedule(n); |
| } |
| EXPORT_SYMBOL(__napi_schedule_irqoff); |
| |
| -- |
| 2.30.2 |
| |