| Subject: skbufhead-raw-lock.patch |
| From: Thomas Gleixner <tglx@linutronix.de> |
| Date: Tue, 12 Jul 2011 15:38:34 +0200 |
| |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| --- |
| include/linux/netdevice.h | 1 + |
| include/linux/skbuff.h | 7 +++++++ |
| net/core/dev.c | 26 ++++++++++++++++++++------ |
| 3 files changed, 28 insertions(+), 6 deletions(-) |
| |
| --- a/include/linux/netdevice.h |
| +++ b/include/linux/netdevice.h |
| @@ -1783,6 +1783,7 @@ struct softnet_data { |
| unsigned int dropped; |
| struct sk_buff_head input_pkt_queue; |
| struct napi_struct backlog; |
| + struct sk_buff_head tofree_queue; |
| }; |
| |
| static inline void input_queue_head_incr(struct softnet_data *sd) |
| --- a/include/linux/skbuff.h |
| +++ b/include/linux/skbuff.h |
| @@ -132,6 +132,7 @@ struct sk_buff_head { |
| |
| __u32 qlen; |
| spinlock_t lock; |
| + raw_spinlock_t raw_lock; |
| }; |
| |
| struct sk_buff; |
| @@ -1008,6 +1009,12 @@ static inline void skb_queue_head_init(s |
| __skb_queue_head_init(list); |
| } |
| |
| +static inline void skb_queue_head_init_raw(struct sk_buff_head *list) |
| +{ |
| + raw_spin_lock_init(&list->raw_lock); |
| + __skb_queue_head_init(list); |
| +} |
| + |
| static inline void skb_queue_head_init_class(struct sk_buff_head *list, |
| struct lock_class_key *class) |
| { |
| --- a/net/core/dev.c |
| +++ b/net/core/dev.c |
| @@ -225,14 +225,14 @@ static inline struct hlist_head *dev_ind |
| static inline void rps_lock(struct softnet_data *sd) |
| { |
| #ifdef CONFIG_RPS |
| - spin_lock(&sd->input_pkt_queue.lock); |
| + raw_spin_lock(&sd->input_pkt_queue.raw_lock); |
| #endif |
| } |
| |
| static inline void rps_unlock(struct softnet_data *sd) |
| { |
| #ifdef CONFIG_RPS |
| - spin_unlock(&sd->input_pkt_queue.lock); |
| + raw_spin_unlock(&sd->input_pkt_queue.raw_lock); |
| #endif |
| } |
| |
| @@ -3537,7 +3537,7 @@ static void flush_backlog(void *arg) |
| skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { |
| if (skb->dev == dev) { |
| __skb_unlink(skb, &sd->input_pkt_queue); |
| - kfree_skb(skb); |
| + __skb_queue_tail(&sd->tofree_queue, skb); |
| input_queue_head_incr(sd); |
| } |
| } |
| @@ -3546,10 +3546,13 @@ static void flush_backlog(void *arg) |
| skb_queue_walk_safe(&sd->process_queue, skb, tmp) { |
| if (skb->dev == dev) { |
| __skb_unlink(skb, &sd->process_queue); |
| - kfree_skb(skb); |
| + __skb_queue_tail(&sd->tofree_queue, skb); |
| input_queue_head_incr(sd); |
| } |
| } |
| + |
| + if (!skb_queue_empty(&sd->tofree_queue)) |
| + raise_softirq_irqoff(NET_RX_SOFTIRQ); |
| } |
| |
| static int napi_gro_complete(struct sk_buff *skb) |
| @@ -4054,10 +4057,17 @@ static void net_rx_action(struct softirq |
| struct softnet_data *sd = &__get_cpu_var(softnet_data); |
| unsigned long time_limit = jiffies + 2; |
| int budget = netdev_budget; |
| + struct sk_buff *skb; |
| void *have; |
| |
| local_irq_disable(); |
| |
| + while ((skb = __skb_dequeue(&sd->tofree_queue))) { |
| + local_irq_enable(); |
| + kfree_skb(skb); |
| + local_irq_disable(); |
| + } |
| + |
| while (!list_empty(&sd->poll_list)) { |
| struct napi_struct *n; |
| int work, weight; |
| @@ -6539,6 +6549,9 @@ static int dev_cpu_callback(struct notif |
| netif_rx(skb); |
| input_queue_head_incr(oldsd); |
| } |
| + while ((skb = __skb_dequeue(&oldsd->tofree_queue))) { |
| + kfree_skb(skb); |
| + } |
| |
| return NOTIFY_OK; |
| } |
| @@ -6811,8 +6824,9 @@ static int __init net_dev_init(void) |
| struct softnet_data *sd = &per_cpu(softnet_data, i); |
| |
| memset(sd, 0, sizeof(*sd)); |
| - skb_queue_head_init(&sd->input_pkt_queue); |
| - skb_queue_head_init(&sd->process_queue); |
| + skb_queue_head_init_raw(&sd->input_pkt_queue); |
| + skb_queue_head_init_raw(&sd->process_queue); |
| + skb_queue_head_init_raw(&sd->tofree_queue); |
| sd->completion_queue = NULL; |
| INIT_LIST_HEAD(&sd->poll_list); |
| sd->output_queue = NULL; |