| From f3e6acf1a00c6f8a87cd7d470e8b92bcdffe6cee Mon Sep 17 00:00:00 2001 |
| From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| Date: Wed, 25 Jan 2017 16:34:27 +0100 |
| Subject: [PATCH] radix-tree: use local locks |
| |
| The preload functionality uses per-CPU variables and preempt-disable to |
| ensure that it does not switch CPUs during its usage. This patch adds |
| local_locks() instead preempt_disable() for the same purpose and to |
| remain preemptible on -RT. |
| |
| Cc: stable-rt@vger.kernel.org |
| Reported-and-debugged-by: Mike Galbraith <efault@gmx.de> |
| Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| |
| diff --git a/include/linux/idr.h b/include/linux/idr.h |
| index bf70b3ef0a07..413c4dbbafb2 100644 |
| --- a/include/linux/idr.h |
| +++ b/include/linux/idr.h |
| @@ -111,10 +111,7 @@ static inline bool idr_is_empty(const struct idr *idr) |
| * Each idr_preload() should be matched with an invocation of this |
| * function. See idr_preload() for details. |
| */ |
| -static inline void idr_preload_end(void) |
| -{ |
| - preempt_enable(); |
| -} |
| +void idr_preload_end(void); |
| |
| /** |
| * idr_find - return pointer for given id |
| diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h |
| index 3e5735064b71..83177c8dfb02 100644 |
| --- a/include/linux/radix-tree.h |
| +++ b/include/linux/radix-tree.h |
| @@ -328,6 +328,8 @@ unsigned int radix_tree_gang_lookup_slot(const struct radix_tree_root *, |
| int radix_tree_preload(gfp_t gfp_mask); |
| int radix_tree_maybe_preload(gfp_t gfp_mask); |
| int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order); |
| +void radix_tree_preload_end(void); |
| + |
| void radix_tree_init(void); |
| void *radix_tree_tag_set(struct radix_tree_root *, |
| unsigned long index, unsigned int tag); |
| @@ -347,11 +349,6 @@ unsigned int radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *, |
| unsigned int max_items, unsigned int tag); |
| int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag); |
| |
| -static inline void radix_tree_preload_end(void) |
| -{ |
| - preempt_enable(); |
| -} |
| - |
| int radix_tree_split_preload(unsigned old_order, unsigned new_order, gfp_t); |
| int radix_tree_split(struct radix_tree_root *, unsigned long index, |
| unsigned new_order); |
| diff --git a/lib/radix-tree.c b/lib/radix-tree.c |
| index 79a804f1aab9..f691478665f7 100644 |
| --- a/lib/radix-tree.c |
| +++ b/lib/radix-tree.c |
| @@ -37,7 +37,7 @@ |
| #include <linux/rcupdate.h> |
| #include <linux/slab.h> |
| #include <linux/string.h> |
| - |
| +#include <linux/locallock.h> |
| |
| /* Number of nodes in fully populated tree of given height */ |
| static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly; |
| @@ -86,6 +86,7 @@ struct radix_tree_preload { |
| struct radix_tree_node *nodes; |
| }; |
| static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; |
| +static DEFINE_LOCAL_IRQ_LOCK(radix_tree_preloads_lock); |
| |
| static inline struct radix_tree_node *entry_to_node(void *ptr) |
| { |
| @@ -404,12 +405,13 @@ radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent, |
| * succeed in getting a node here (and never reach |
| * kmem_cache_alloc) |
| */ |
| - rtp = this_cpu_ptr(&radix_tree_preloads); |
| + rtp = &get_locked_var(radix_tree_preloads_lock, radix_tree_preloads); |
| if (rtp->nr) { |
| ret = rtp->nodes; |
| rtp->nodes = ret->parent; |
| rtp->nr--; |
| } |
| + put_locked_var(radix_tree_preloads_lock, radix_tree_preloads); |
| /* |
| * Update the allocation stack trace as this is more useful |
| * for debugging. |
| @@ -475,14 +477,14 @@ static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr) |
| */ |
| gfp_mask &= ~__GFP_ACCOUNT; |
| |
| - preempt_disable(); |
| + local_lock(radix_tree_preloads_lock); |
| rtp = this_cpu_ptr(&radix_tree_preloads); |
| while (rtp->nr < nr) { |
| - preempt_enable(); |
| + local_unlock(radix_tree_preloads_lock); |
| node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); |
| if (node == NULL) |
| goto out; |
| - preempt_disable(); |
| + local_lock(radix_tree_preloads_lock); |
| rtp = this_cpu_ptr(&radix_tree_preloads); |
| if (rtp->nr < nr) { |
| node->parent = rtp->nodes; |
| @@ -524,7 +526,7 @@ int radix_tree_maybe_preload(gfp_t gfp_mask) |
| if (gfpflags_allow_blocking(gfp_mask)) |
| return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE); |
| /* Preloading doesn't help anything with this gfp mask, skip it */ |
| - preempt_disable(); |
| + local_lock(radix_tree_preloads_lock); |
| return 0; |
| } |
| EXPORT_SYMBOL(radix_tree_maybe_preload); |
| @@ -562,7 +564,7 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order) |
| |
| /* Preloading doesn't help anything with this gfp mask, skip it */ |
| if (!gfpflags_allow_blocking(gfp_mask)) { |
| - preempt_disable(); |
| + local_lock(radix_tree_preloads_lock); |
| return 0; |
| } |
| |
| @@ -596,6 +598,12 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order) |
| return __radix_tree_preload(gfp_mask, nr_nodes); |
| } |
| |
| +void radix_tree_preload_end(void) |
| +{ |
| + local_unlock(radix_tree_preloads_lock); |
| +} |
| +EXPORT_SYMBOL(radix_tree_preload_end); |
| + |
| static unsigned radix_tree_load_root(const struct radix_tree_root *root, |
| struct radix_tree_node **nodep, unsigned long *maxindex) |
| { |
| @@ -2108,6 +2116,12 @@ void idr_preload(gfp_t gfp_mask) |
| } |
| EXPORT_SYMBOL(idr_preload); |
| |
| +void idr_preload_end(void) |
| +{ |
| + local_unlock(radix_tree_preloads_lock); |
| +} |
| +EXPORT_SYMBOL(idr_preload_end); |
| + |
| /** |
| * ida_pre_get - reserve resources for ida allocation |
| * @ida: ida handle |
| @@ -2124,7 +2138,7 @@ int ida_pre_get(struct ida *ida, gfp_t gfp) |
| * to return to the ida_pre_get() step. |
| */ |
| if (!__radix_tree_preload(gfp, IDA_PRELOAD_SIZE)) |
| - preempt_enable(); |
| + local_unlock(radix_tree_preloads_lock); |
| |
| if (!this_cpu_read(ida_bitmap)) { |
| struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp); |
| -- |
| 2.1.4 |
| |