blob: 9e661f90e5c4138424d6ffd94df21e25c750db9f [file] [log] [blame]
From 356d32bec945c68aa111fa01ef40b098de658a05 Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:30:11 -0500
Subject: [PATCH] net: xmit lock owner cleanup
commit 543a589a00e831de55e5f67802b73033dc2dd964 in tip.
- __netif_tx_lock() always passes in 'current' as the lock owner,
so eliminate this parameter.
- likewise for HARD_TX_LOCK()
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index ef2b781..d0bf40d 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -2863,7 +2863,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
if (unlikely(netif_tx_queue_stopped(txq)) &&
(bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
- __netif_tx_lock(txq, (void *)current);
+ __netif_tx_lock(txq);
if ((netif_tx_queue_stopped(txq)) &&
(bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
netif_tx_wake_queue(txq);
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 28937f8..3e01ec6 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -508,7 +508,7 @@ static void txq_maybe_wake(struct tx_queue *txq)
struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
if (netif_tx_queue_stopped(nq)) {
- __netif_tx_lock(nq, (void *)current);
+ __netif_tx_lock(nq);
if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1)
netif_tx_wake_queue(nq);
__netif_tx_unlock(nq);
@@ -901,7 +901,7 @@ static void txq_kick(struct tx_queue *txq)
u32 hw_desc_ptr;
u32 expected_ptr;
- __netif_tx_lock(nq, (void *)current);
+ __netif_tx_lock(nq);
if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index))
goto out;
@@ -925,7 +925,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
int reclaimed;
- __netif_tx_lock(nq, (void *)current);
+ __netif_tx_lock(nq);
reclaimed = 0;
while (reclaimed < budget && txq->tx_desc_count > 0) {
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 64cff68..c4e38ca 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -1625,7 +1625,7 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
smp_mb();
if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
- __netif_tx_lock(tx_ring->txq, smp_processor_id());
+ __netif_tx_lock(tx_ring->txq);
if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) {
netif_wake_queue(netdev);
adapter->tx_timeo_cnt = 0;
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 324d3ec..1cd2408 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -3677,7 +3677,7 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
out:
if (unlikely(netif_tx_queue_stopped(txq) &&
(niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
- __netif_tx_lock(txq, (void *)current);
+ __netif_tx_lock(txq);
if (netif_tx_queue_stopped(txq) &&
(niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))
netif_tx_wake_queue(txq);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 787be1e..a3cbe54 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1745,10 +1745,18 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
return (1 << debug_value) - 1;
}
-static inline void __netif_tx_lock(struct netdev_queue *txq, void *curr)
+static inline void __netif_tx_lock(struct netdev_queue *txq)
{
spin_lock(&txq->_xmit_lock);
- txq->xmit_lock_owner = curr;
+ txq->xmit_lock_owner = (void *)current;
+}
+
+/*
+ * Do we hold the xmit_lock already?
+ */
+static inline int netif_tx_lock_recursion(struct netdev_queue *txq)
+{
+ return txq->xmit_lock_owner == (void *)current;
}
static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
@@ -1792,10 +1800,8 @@ static inline void txq_trans_update(struct netdev_queue *txq)
static inline void netif_tx_lock(struct net_device *dev)
{
unsigned int i;
- void *curr;
spin_lock(&dev->tx_global_lock);
- curr = (void *)current;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
@@ -1805,7 +1811,7 @@ static inline void netif_tx_lock(struct net_device *dev)
* the ->hard_start_xmit() handler and already
* checked the frozen bit.
*/
- __netif_tx_lock(txq, curr);
+ __netif_tx_lock(txq);
set_bit(__QUEUE_STATE_FROZEN, &txq->state);
__netif_tx_unlock(txq);
}
@@ -1840,9 +1846,9 @@ static inline void netif_tx_unlock_bh(struct net_device *dev)
local_bh_enable();
}
-#define HARD_TX_LOCK(dev, txq, curr) { \
+#define HARD_TX_LOCK(dev, txq) { \
if ((dev->features & NETIF_F_LLTX) == 0) { \
- __netif_tx_lock(txq, curr); \
+ __netif_tx_lock(txq); \
} \
}
@@ -1855,14 +1861,12 @@ static inline void netif_tx_unlock_bh(struct net_device *dev)
static inline void netif_tx_disable(struct net_device *dev)
{
unsigned int i;
- void *curr;
local_bh_disable();
- curr = (void *)current;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
- __netif_tx_lock(txq, curr);
+ __netif_tx_lock(txq);
netif_tx_stop_queue(txq);
__netif_tx_unlock(txq);
}
diff --git a/net/core/dev.c b/net/core/dev.c
index ae8ca82..6a867ce 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2067,9 +2067,9 @@ gso:
/*
* No need to check for recursion with threaded interrupts:
*/
- if (txq->xmit_lock_owner != (void *)current) {
+ if (!netif_tx_lock_recursion(txq)) {
- HARD_TX_LOCK(dev, txq, (void *)current);
+ HARD_TX_LOCK(dev, txq);
if (!netif_tx_queue_stopped(txq)) {
rc = dev_hard_start_xmit(skb, dev, txq);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 4f9c6d5..ffd4ecb 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -74,7 +74,7 @@ static void queue_process(struct work_struct *work)
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
local_irq_save_nort(flags);
- __netif_tx_lock(txq, (void *)current);
+ __netif_tx_lock(txq);
if (netif_tx_queue_stopped(txq) ||
netif_tx_queue_frozen(txq) ||
ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 420335b..0a18bbe 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -78,7 +78,7 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
{
int ret;
- if (unlikely(dev_queue->xmit_lock_owner == (void *)current)) {
+ if (unlikely(netif_tx_lock_recursion(dev_queue))) {
/*
* Same CPU holding the lock. It may be a transient
* configuration error, when hard_start_xmit() recurses. We
@@ -120,7 +120,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
/* And release qdisc */
spin_unlock(root_lock);
- HARD_TX_LOCK(dev, txq, (void *)current);
+ HARD_TX_LOCK(dev, txq);
if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq))
ret = dev_hard_start_xmit(skb, dev, txq);
--
1.7.1.1