| From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| Date: Wed, 21 Feb 2018 10:39:54 +0100 |
| Subject: [PATCH] net: use task_struct instead of CPU number as the queue |
| owner on -RT |
| |
| In commit ("net: move xmit_recursion to per-task variable on -RT") the |
| recursion level was changed to be per-task since we can get preempted in |
| BH on -RT. The lock owner should consequently be recorded as the task |
| that holds the lock and not the CPU. Otherwise we trigger the "Dead loop |
| on virtual device" warning on SMP systems. |
| |
| Cc: stable-rt@vger.kernel.org |
| Reported-by: Kurt Kanzenbach <kurt.kanzenbach@linutronix.de> |
| Tested-by: Kurt Kanzenbach <kurt.kanzenbach@linutronix.de> |
| Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| --- |
| include/linux/netdevice.h | 54 ++++++++++++++++++++++++++++++++++++++++------ |
| net/core/dev.c | 6 ++++- |
| 2 files changed, 53 insertions(+), 7 deletions(-) |
| |
| --- a/include/linux/netdevice.h |
| +++ b/include/linux/netdevice.h |
| @@ -572,7 +572,11 @@ struct netdev_queue { |
| * write-mostly part |
| */ |
| spinlock_t _xmit_lock ____cacheline_aligned_in_smp; |
| +#ifdef CONFIG_PREEMPT_RT_FULL |
| + struct task_struct *xmit_lock_owner; |
| +#else |
| int xmit_lock_owner; |
| +#endif |
| /* |
| * Time (in jiffies) of last Tx |
| */ |
| @@ -3596,10 +3600,48 @@ static inline u32 netif_msg_init(int deb |
| return (1 << debug_value) - 1; |
| } |
| |
| +#ifdef CONFIG_PREEMPT_RT_FULL |
| +static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu) |
| +{ |
| + txq->xmit_lock_owner = current; |
| +} |
| + |
| +static inline void netdev_queue_clear_owner(struct netdev_queue *txq) |
| +{ |
| + txq->xmit_lock_owner = NULL; |
| +} |
| + |
| +static inline bool netdev_queue_has_owner(struct netdev_queue *txq) |
| +{ |
| + if (txq->xmit_lock_owner != NULL) |
| + return true; |
| + return false; |
| +} |
| + |
| +#else |
| + |
| +static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu) |
| +{ |
| + txq->xmit_lock_owner = cpu; |
| +} |
| + |
| +static inline void netdev_queue_clear_owner(struct netdev_queue *txq) |
| +{ |
| + txq->xmit_lock_owner = -1; |
| +} |
| + |
| +static inline bool netdev_queue_has_owner(struct netdev_queue *txq) |
| +{ |
| + if (txq->xmit_lock_owner != -1) |
| + return true; |
| + return false; |
| +} |
| +#endif |
| + |
| static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) |
| { |
| spin_lock(&txq->_xmit_lock); |
| - txq->xmit_lock_owner = cpu; |
| + netdev_queue_set_owner(txq, cpu); |
| } |
| |
| static inline bool __netif_tx_acquire(struct netdev_queue *txq) |
| @@ -3616,32 +3658,32 @@ static inline void __netif_tx_release(st |
| static inline void __netif_tx_lock_bh(struct netdev_queue *txq) |
| { |
| spin_lock_bh(&txq->_xmit_lock); |
| - txq->xmit_lock_owner = smp_processor_id(); |
| + netdev_queue_set_owner(txq, smp_processor_id()); |
| } |
| |
| static inline bool __netif_tx_trylock(struct netdev_queue *txq) |
| { |
| bool ok = spin_trylock(&txq->_xmit_lock); |
| if (likely(ok)) |
| - txq->xmit_lock_owner = smp_processor_id(); |
| + netdev_queue_set_owner(txq, smp_processor_id()); |
| return ok; |
| } |
| |
| static inline void __netif_tx_unlock(struct netdev_queue *txq) |
| { |
| - txq->xmit_lock_owner = -1; |
| + netdev_queue_clear_owner(txq); |
| spin_unlock(&txq->_xmit_lock); |
| } |
| |
| static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) |
| { |
| - txq->xmit_lock_owner = -1; |
| + netdev_queue_clear_owner(txq); |
| spin_unlock_bh(&txq->_xmit_lock); |
| } |
| |
| static inline void txq_trans_update(struct netdev_queue *txq) |
| { |
| - if (txq->xmit_lock_owner != -1) |
| + if (netdev_queue_has_owner(txq)) |
| txq->trans_start = jiffies; |
| } |
| |
| --- a/net/core/dev.c |
| +++ b/net/core/dev.c |
| @@ -3545,7 +3545,11 @@ static int __dev_queue_xmit(struct sk_bu |
| if (dev->flags & IFF_UP) { |
| int cpu = smp_processor_id(); /* ok because BHs are off */ |
| |
| +#ifdef CONFIG_PREEMPT_RT_FULL |
| + if (txq->xmit_lock_owner != current) { |
| +#else |
| if (txq->xmit_lock_owner != cpu) { |
| +#endif |
| if (unlikely(xmit_rec_read() > XMIT_RECURSION_LIMIT)) |
| goto recursion_alert; |
| |
| @@ -7759,7 +7763,7 @@ static void netdev_init_one_queue(struct |
| /* Initialize queue lock */ |
| spin_lock_init(&queue->_xmit_lock); |
| netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); |
| - queue->xmit_lock_owner = -1; |
| + netdev_queue_clear_owner(queue); |
| netdev_queue_numa_node_write(queue, NUMA_NO_NODE); |
| queue->dev = dev; |
| #ifdef CONFIG_BQL |