blob: 3ed1440b5922cea2ec2568c27de8b4b786573e1f [file] [log] [blame]
From 0da0c26b3a7ee5ab582a117a1b7bc80cdccc98a5 Mon Sep 17 00:00:00 2001
From: mbeauch <mbeauch@cox.net>
Date: Fri, 3 Jul 2009 08:30:10 -0500
Subject: [PATCH] net: detect recursive calls to dev_queue_xmit() on RT
commit ea481113da8a28b622288918e90ef1977ef55dbc in tip.
Changed the real-time patch code to detect recursive calls
to dev_queue_xmit and drop the packet when detected.
Signed-off-by: Mark Beauchemin <mark.beauchemin@sycamorenet.com>
[ ported to latest upstream ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 65df1de..ef2b781 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -2863,7 +2863,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
if (unlikely(netif_tx_queue_stopped(txq)) &&
(bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
- __netif_tx_lock(txq, smp_processor_id());
+ __netif_tx_lock(txq, (void *)current);
if ((netif_tx_queue_stopped(txq)) &&
(bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
netif_tx_wake_queue(txq);
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index af67af5..28937f8 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -508,7 +508,7 @@ static void txq_maybe_wake(struct tx_queue *txq)
struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
if (netif_tx_queue_stopped(nq)) {
- __netif_tx_lock(nq, smp_processor_id());
+ __netif_tx_lock(nq, (void *)current);
if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1)
netif_tx_wake_queue(nq);
__netif_tx_unlock(nq);
@@ -901,7 +901,7 @@ static void txq_kick(struct tx_queue *txq)
u32 hw_desc_ptr;
u32 expected_ptr;
- __netif_tx_lock(nq, smp_processor_id());
+ __netif_tx_lock(nq, (void *)current);
if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index))
goto out;
@@ -925,7 +925,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
int reclaimed;
- __netif_tx_lock(nq, smp_processor_id());
+ __netif_tx_lock(nq, (void *)current);
reclaimed = 0;
while (reclaimed < budget && txq->tx_desc_count > 0) {
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 2aed2b3..324d3ec 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -3677,7 +3677,7 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
out:
if (unlikely(netif_tx_queue_stopped(txq) &&
(niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
- __netif_tx_lock(txq, smp_processor_id());
+ __netif_tx_lock(txq, (void *)current);
if (netif_tx_queue_stopped(txq) &&
(niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))
netif_tx_wake_queue(txq);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index c77360e..787be1e 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -508,7 +508,7 @@ struct netdev_queue {
* write mostly part
*/
spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
- int xmit_lock_owner;
+ void *xmit_lock_owner;
/*
* please use this field instead of dev->trans_start
*/
@@ -1745,41 +1745,41 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
return (1 << debug_value) - 1;
}
-static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
+static inline void __netif_tx_lock(struct netdev_queue *txq, void *curr)
{
spin_lock(&txq->_xmit_lock);
- txq->xmit_lock_owner = cpu;
+ txq->xmit_lock_owner = curr;
}
static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
{
spin_lock_bh(&txq->_xmit_lock);
- txq->xmit_lock_owner = raw_smp_processor_id();
+ txq->xmit_lock_owner = (void *)current;
}
static inline int __netif_tx_trylock(struct netdev_queue *txq)
{
int ok = spin_trylock(&txq->_xmit_lock);
if (likely(ok))
- txq->xmit_lock_owner = raw_smp_processor_id();
+ txq->xmit_lock_owner = (void *)current;
return ok;
}
static inline void __netif_tx_unlock(struct netdev_queue *txq)
{
- txq->xmit_lock_owner = -1;
+ txq->xmit_lock_owner = (void *)-1;
spin_unlock(&txq->_xmit_lock);
}
static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
{
- txq->xmit_lock_owner = -1;
+ txq->xmit_lock_owner = (void *)-1;
spin_unlock_bh(&txq->_xmit_lock);
}
static inline void txq_trans_update(struct netdev_queue *txq)
{
- if (txq->xmit_lock_owner != -1)
+ if (txq->xmit_lock_owner != (void *)-1)
txq->trans_start = jiffies;
}
@@ -1792,10 +1792,10 @@ static inline void txq_trans_update(struct netdev_queue *txq)
static inline void netif_tx_lock(struct net_device *dev)
{
unsigned int i;
- int cpu;
+ void *curr;
spin_lock(&dev->tx_global_lock);
- cpu = raw_smp_processor_id();
+ curr = (void *)current;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
@@ -1805,7 +1805,7 @@ static inline void netif_tx_lock(struct net_device *dev)
* the ->hard_start_xmit() handler and already
* checked the frozen bit.
*/
- __netif_tx_lock(txq, cpu);
+ __netif_tx_lock(txq, curr);
set_bit(__QUEUE_STATE_FROZEN, &txq->state);
__netif_tx_unlock(txq);
}
@@ -1840,9 +1840,9 @@ static inline void netif_tx_unlock_bh(struct net_device *dev)
local_bh_enable();
}
-#define HARD_TX_LOCK(dev, txq, cpu) { \
+#define HARD_TX_LOCK(dev, txq, curr) { \
if ((dev->features & NETIF_F_LLTX) == 0) { \
- __netif_tx_lock(txq, cpu); \
+ __netif_tx_lock(txq, curr); \
} \
}
@@ -1855,14 +1855,14 @@ static inline void netif_tx_unlock_bh(struct net_device *dev)
static inline void netif_tx_disable(struct net_device *dev)
{
unsigned int i;
- int cpu;
+ void *curr;
local_bh_disable();
- cpu = raw_smp_processor_id();
+ curr = (void *)current;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
- __netif_tx_lock(txq, cpu);
+ __netif_tx_lock(txq, curr);
netif_tx_stop_queue(txq);
__netif_tx_unlock(txq);
}
diff --git a/net/core/dev.c b/net/core/dev.c
index e96dccf..ae8ca82 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2067,13 +2067,9 @@ gso:
/*
* No need to check for recursion with threaded interrupts:
*/
-#ifdef CONFIG_PREEMPT_RT
- if (1) {
-#else
- if (txq->xmit_lock_owner != cpu) {
-#endif
+ if (txq->xmit_lock_owner != (void *)current) {
- HARD_TX_LOCK(dev, txq, cpu);
+ HARD_TX_LOCK(dev, txq, (void *)current);
if (!netif_tx_queue_stopped(txq)) {
rc = dev_hard_start_xmit(skb, dev, txq);
@@ -4882,7 +4878,7 @@ static void __netdev_init_queue_locks_one(struct net_device *dev,
{
spin_lock_init(&dev_queue->_xmit_lock);
netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
- dev_queue->xmit_lock_owner = -1;
+ dev_queue->xmit_lock_owner = (void *)-1;
}
static void netdev_init_queue_locks(struct net_device *dev)
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 9f6d1fc..4f9c6d5 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -74,7 +74,7 @@ static void queue_process(struct work_struct *work)
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
local_irq_save_nort(flags);
- __netif_tx_lock(txq, smp_processor_id());
+ __netif_tx_lock(txq, (void *)current);
if (netif_tx_queue_stopped(txq) ||
netif_tx_queue_frozen(txq) ||
ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 7fe7c55..420335b 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -78,7 +78,7 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
{
int ret;
- if (unlikely(dev_queue->xmit_lock_owner == raw_smp_processor_id())) {
+ if (unlikely(dev_queue->xmit_lock_owner == (void *)current)) {
/*
* Same CPU holding the lock. It may be a transient
* configuration error, when hard_start_xmit() recurses. We
@@ -120,7 +120,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
/* And release qdisc */
spin_unlock(root_lock);
- HARD_TX_LOCK(dev, txq, raw_smp_processor_id());
+ HARD_TX_LOCK(dev, txq, (void *)current);
if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq))
ret = dev_hard_start_xmit(skb, dev, txq);
--
1.7.1.1