mirror of
				https://github.com/linuxkit/linuxkit.git
				synced 2025-11-04 10:29:43 +00:00 
			
		
		
		
	
		
			
				
	
	
		
			119 lines
		
	
	
		
			3.9 KiB
		
	
	
	
		
			Diff
		
	
	
	
	
	
			
		
		
	
	
			119 lines
		
	
	
		
			3.9 KiB
		
	
	
	
		
			Diff
		
	
	
	
	
	
From 6cf8f87baddba1e9fc5d04d1ada5e2c89550e03a Mon Sep 17 00:00:00 2001
 | 
						|
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
 | 
						|
Date: Fri, 15 Jan 2016 16:33:34 +0100
 | 
						|
Subject: [PATCH 368/450] net/core: protect users of napi_alloc_cache against
 | 
						|
 reentrance
 | 
						|
 | 
						|
On -RT the code running in BH can not be moved to another CPU so CPU
 | 
						|
local variable remain local. However the code can be preempted
 | 
						|
and another task may enter BH accessing the same CPU using the same
 | 
						|
napi_alloc_cache variable.
 | 
						|
This patch ensures that each user of napi_alloc_cache uses a local lock.
 | 
						|
 | 
						|
Cc: stable-rt@vger.kernel.org
 | 
						|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
 | 
						|
---
 | 
						|
 net/core/skbuff.c | 25 +++++++++++++++++++------
 | 
						|
 1 file changed, 19 insertions(+), 6 deletions(-)
 | 
						|
 | 
						|
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
 | 
						|
index 1bb67588ca5a..8ec36697002b 100644
 | 
						|
--- a/net/core/skbuff.c
 | 
						|
+++ b/net/core/skbuff.c
 | 
						|
@@ -332,6 +332,7 @@ struct napi_alloc_cache {
 | 
						|
 static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
 | 
						|
 static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
 | 
						|
 static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock);
 | 
						|
+static DEFINE_LOCAL_IRQ_LOCK(napi_alloc_cache_lock);
 | 
						|
 
 | 
						|
 static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
 | 
						|
 {
 | 
						|
@@ -361,9 +362,13 @@ EXPORT_SYMBOL(netdev_alloc_frag);
 | 
						|
 
 | 
						|
 static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
 | 
						|
 {
 | 
						|
-	struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
 | 
						|
+	struct napi_alloc_cache *nc;
 | 
						|
+	void *data;
 | 
						|
 
 | 
						|
-	return page_frag_alloc(&nc->page, fragsz, gfp_mask);
 | 
						|
+	nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
 | 
						|
+	data =  page_frag_alloc(&nc->page, fragsz, gfp_mask);
 | 
						|
+	put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
 | 
						|
+	return data;
 | 
						|
 }
 | 
						|
 
 | 
						|
 void *napi_alloc_frag(unsigned int fragsz)
 | 
						|
@@ -457,9 +462,10 @@ EXPORT_SYMBOL(__netdev_alloc_skb);
 | 
						|
 struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
 | 
						|
 				 gfp_t gfp_mask)
 | 
						|
 {
 | 
						|
-	struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
 | 
						|
+	struct napi_alloc_cache *nc;
 | 
						|
 	struct sk_buff *skb;
 | 
						|
 	void *data;
 | 
						|
+	bool pfmemalloc;
 | 
						|
 
 | 
						|
 	len += NET_SKB_PAD + NET_IP_ALIGN;
 | 
						|
 
 | 
						|
@@ -477,7 +483,10 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
 | 
						|
 	if (sk_memalloc_socks())
 | 
						|
 		gfp_mask |= __GFP_MEMALLOC;
 | 
						|
 
 | 
						|
+	nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
 | 
						|
 	data = page_frag_alloc(&nc->page, len, gfp_mask);
 | 
						|
+	pfmemalloc = nc->page.pfmemalloc;
 | 
						|
+	put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
 | 
						|
 	if (unlikely(!data))
 | 
						|
 		return NULL;
 | 
						|
 
 | 
						|
@@ -488,7 +497,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
 | 
						|
 	}
 | 
						|
 
 | 
						|
 	/* use OR instead of assignment to avoid clearing of bits in mask */
 | 
						|
-	if (nc->page.pfmemalloc)
 | 
						|
+	if (pfmemalloc)
 | 
						|
 		skb->pfmemalloc = 1;
 | 
						|
 	skb->head_frag = 1;
 | 
						|
 
 | 
						|
@@ -720,23 +729,26 @@ void __consume_stateless_skb(struct sk_buff *skb)
 | 
						|
 
 | 
						|
 void __kfree_skb_flush(void)
 | 
						|
 {
 | 
						|
-	struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
 | 
						|
+	struct napi_alloc_cache *nc;
 | 
						|
 
 | 
						|
+	nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
 | 
						|
 	/* flush skb_cache if containing objects */
 | 
						|
 	if (nc->skb_count) {
 | 
						|
 		kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count,
 | 
						|
 				     nc->skb_cache);
 | 
						|
 		nc->skb_count = 0;
 | 
						|
 	}
 | 
						|
+	put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
 | 
						|
 }
 | 
						|
 
 | 
						|
 static inline void _kfree_skb_defer(struct sk_buff *skb)
 | 
						|
 {
 | 
						|
-	struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
 | 
						|
+	struct napi_alloc_cache *nc;
 | 
						|
 
 | 
						|
 	/* drop skb->head and call any destructors for packet */
 | 
						|
 	skb_release_all(skb);
 | 
						|
 
 | 
						|
+	nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
 | 
						|
 	/* record skb to CPU local list */
 | 
						|
 	nc->skb_cache[nc->skb_count++] = skb;
 | 
						|
 
 | 
						|
@@ -751,6 +763,7 @@ static inline void _kfree_skb_defer(struct sk_buff *skb)
 | 
						|
 				     nc->skb_cache);
 | 
						|
 		nc->skb_count = 0;
 | 
						|
 	}
 | 
						|
+	put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
 | 
						|
 }
 | 
						|
 void __kfree_skb_defer(struct sk_buff *skb)
 | 
						|
 {
 | 
						|
-- 
 | 
						|
2.19.2
 | 
						|
 |