| From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| Date: Fri, 15 Jan 2016 16:33:34 +0100 |
| Subject: net/core: protect users of napi_alloc_cache against |
| reentrance |
| |
| On -RT the code running in BH can not be moved to another CPU so CPU |
| local variable remain local. However the code can be preempted |
| and another task may enter BH accessing the same CPU using the same |
| napi_alloc_cache variable. |
| This patch ensures that each user of napi_alloc_cache uses a local lock. |
| |
| Cc: stable-rt@vger.kernel.org |
| Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| --- |
| net/core/skbuff.c | 25 +++++++++++++++++++------ |
| 1 file changed, 19 insertions(+), 6 deletions(-) |
| |
| --- a/net/core/skbuff.c |
| +++ b/net/core/skbuff.c |
| @@ -361,6 +361,7 @@ struct napi_alloc_cache { |
| static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); |
| static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); |
| static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock); |
| +static DEFINE_LOCAL_IRQ_LOCK(napi_alloc_cache_lock); |
| |
| static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) |
| { |
| @@ -390,9 +391,13 @@ EXPORT_SYMBOL(netdev_alloc_frag); |
| |
| static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) |
| { |
| - struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); |
| + struct napi_alloc_cache *nc; |
| + void *data; |
| |
| - return __alloc_page_frag(&nc->page, fragsz, gfp_mask); |
| + nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache); |
| + data = __alloc_page_frag(&nc->page, fragsz, gfp_mask); |
| + put_locked_var(napi_alloc_cache_lock, napi_alloc_cache); |
| + return data; |
| } |
| |
| void *napi_alloc_frag(unsigned int fragsz) |
| @@ -486,9 +491,10 @@ EXPORT_SYMBOL(__netdev_alloc_skb); |
| struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, |
| gfp_t gfp_mask) |
| { |
| - struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); |
| + struct napi_alloc_cache *nc; |
| struct sk_buff *skb; |
| void *data; |
| + bool pfmemalloc; |
| |
| len += NET_SKB_PAD + NET_IP_ALIGN; |
| |
| @@ -506,7 +512,10 @@ struct sk_buff *__napi_alloc_skb(struct |
| if (sk_memalloc_socks()) |
| gfp_mask |= __GFP_MEMALLOC; |
| |
| + nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache); |
| data = __alloc_page_frag(&nc->page, len, gfp_mask); |
| + pfmemalloc = nc->page.pfmemalloc; |
| + put_locked_var(napi_alloc_cache_lock, napi_alloc_cache); |
| if (unlikely(!data)) |
| return NULL; |
| |
| @@ -517,7 +526,7 @@ struct sk_buff *__napi_alloc_skb(struct |
| } |
| |
| /* use OR instead of assignment to avoid clearing of bits in mask */ |
| - if (nc->page.pfmemalloc) |
| + if (pfmemalloc) |
| skb->pfmemalloc = 1; |
| skb->head_frag = 1; |
| |
| @@ -761,23 +770,26 @@ EXPORT_SYMBOL(consume_skb); |
| |
| void __kfree_skb_flush(void) |
| { |
| - struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); |
| + struct napi_alloc_cache *nc; |
| |
| + nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache); |
| /* flush skb_cache if containing objects */ |
| if (nc->skb_count) { |
| kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count, |
| nc->skb_cache); |
| nc->skb_count = 0; |
| } |
| + put_locked_var(napi_alloc_cache_lock, napi_alloc_cache); |
| } |
| |
| static inline void _kfree_skb_defer(struct sk_buff *skb) |
| { |
| - struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); |
| + struct napi_alloc_cache *nc; |
| |
| /* drop skb->head and call any destructors for packet */ |
| skb_release_all(skb); |
| |
| + nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache); |
| /* record skb to CPU local list */ |
| nc->skb_cache[nc->skb_count++] = skb; |
| |
| @@ -792,6 +804,7 @@ static inline void _kfree_skb_defer(stru |
| nc->skb_cache); |
| nc->skb_count = 0; |
| } |
| + put_locked_var(napi_alloc_cache_lock, napi_alloc_cache); |
| } |
| void __kfree_skb_defer(struct sk_buff *skb) |
| { |