| From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| Date: Wed, 25 Jan 2017 16:34:27 +0100 |
| Subject: [PATCH] radix-tree: use local locks |
| |
| The preload functionality uses per-CPU variables and preempt-disable to |
| ensure that it does not switch CPUs during its usage. This patch adds |
| local_locks() instead preempt_disable() for the same purpose and to |
| remain preemptible on -RT. |
| |
| Cc: stable-rt@vger.kernel.org |
| Reported-and-debugged-by: Mike Galbraith <efault@gmx.de> |
| Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| --- |
| include/linux/idr.h | 5 +---- |
| include/linux/radix-tree.h | 7 ++----- |
| lib/radix-tree.c | 30 ++++++++++++++++++++++-------- |
| 3 files changed, 25 insertions(+), 17 deletions(-) |
| |
| --- a/include/linux/idr.h |
| +++ b/include/linux/idr.h |
| @@ -111,10 +111,7 @@ static inline bool idr_is_empty(const st |
| * Each idr_preload() should be matched with an invocation of this |
| * function. See idr_preload() for details. |
| */ |
| -static inline void idr_preload_end(void) |
| -{ |
| - preempt_enable(); |
| -} |
| +void idr_preload_end(void); |
| |
| /** |
| * idr_find - return pointer for given id |
| --- a/include/linux/radix-tree.h |
| +++ b/include/linux/radix-tree.h |
| @@ -328,6 +328,8 @@ unsigned int radix_tree_gang_lookup_slot |
| int radix_tree_preload(gfp_t gfp_mask); |
| int radix_tree_maybe_preload(gfp_t gfp_mask); |
| int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order); |
| +void radix_tree_preload_end(void); |
| + |
| void radix_tree_init(void); |
| void *radix_tree_tag_set(struct radix_tree_root *, |
| unsigned long index, unsigned int tag); |
| @@ -347,11 +349,6 @@ unsigned int radix_tree_gang_lookup_tag_ |
| unsigned int max_items, unsigned int tag); |
| int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag); |
| |
| -static inline void radix_tree_preload_end(void) |
| -{ |
| - preempt_enable(); |
| -} |
| - |
| int radix_tree_split_preload(unsigned old_order, unsigned new_order, gfp_t); |
| int radix_tree_split(struct radix_tree_root *, unsigned long index, |
| unsigned new_order); |
| --- a/lib/radix-tree.c |
| +++ b/lib/radix-tree.c |
| @@ -37,7 +37,7 @@ |
| #include <linux/rcupdate.h> |
| #include <linux/slab.h> |
| #include <linux/string.h> |
| - |
| +#include <linux/locallock.h> |
| |
| /* Number of nodes in fully populated tree of given height */ |
| static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly; |
| @@ -86,6 +86,7 @@ struct radix_tree_preload { |
| struct radix_tree_node *nodes; |
| }; |
| static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; |
| +static DEFINE_LOCAL_IRQ_LOCK(radix_tree_preloads_lock); |
| |
| static inline struct radix_tree_node *entry_to_node(void *ptr) |
| { |
| @@ -404,12 +405,13 @@ radix_tree_node_alloc(gfp_t gfp_mask, st |
| * succeed in getting a node here (and never reach |
| * kmem_cache_alloc) |
| */ |
| - rtp = this_cpu_ptr(&radix_tree_preloads); |
| + rtp = &get_locked_var(radix_tree_preloads_lock, radix_tree_preloads); |
| if (rtp->nr) { |
| ret = rtp->nodes; |
| rtp->nodes = ret->parent; |
| rtp->nr--; |
| } |
| + put_locked_var(radix_tree_preloads_lock, radix_tree_preloads); |
| /* |
| * Update the allocation stack trace as this is more useful |
| * for debugging. |
| @@ -475,14 +477,14 @@ static int __radix_tree_preload(gfp_t gf |
| */ |
| gfp_mask &= ~__GFP_ACCOUNT; |
| |
| - preempt_disable(); |
| + local_lock(radix_tree_preloads_lock); |
| rtp = this_cpu_ptr(&radix_tree_preloads); |
| while (rtp->nr < nr) { |
| - preempt_enable(); |
| + local_unlock(radix_tree_preloads_lock); |
| node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); |
| if (node == NULL) |
| goto out; |
| - preempt_disable(); |
| + local_lock(radix_tree_preloads_lock); |
| rtp = this_cpu_ptr(&radix_tree_preloads); |
| if (rtp->nr < nr) { |
| node->parent = rtp->nodes; |
| @@ -524,7 +526,7 @@ int radix_tree_maybe_preload(gfp_t gfp_m |
| if (gfpflags_allow_blocking(gfp_mask)) |
| return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE); |
| /* Preloading doesn't help anything with this gfp mask, skip it */ |
| - preempt_disable(); |
| + local_lock(radix_tree_preloads_lock); |
| return 0; |
| } |
| EXPORT_SYMBOL(radix_tree_maybe_preload); |
| @@ -562,7 +564,7 @@ int radix_tree_maybe_preload_order(gfp_t |
| |
| /* Preloading doesn't help anything with this gfp mask, skip it */ |
| if (!gfpflags_allow_blocking(gfp_mask)) { |
| - preempt_disable(); |
| + local_lock(radix_tree_preloads_lock); |
| return 0; |
| } |
| |
| @@ -596,6 +598,12 @@ int radix_tree_maybe_preload_order(gfp_t |
| return __radix_tree_preload(gfp_mask, nr_nodes); |
| } |
| |
| +void radix_tree_preload_end(void) |
| +{ |
| + local_unlock(radix_tree_preloads_lock); |
| +} |
| +EXPORT_SYMBOL(radix_tree_preload_end); |
| + |
| static unsigned radix_tree_load_root(const struct radix_tree_root *root, |
| struct radix_tree_node **nodep, unsigned long *maxindex) |
| { |
| @@ -2107,6 +2115,12 @@ void idr_preload(gfp_t gfp_mask) |
| } |
| EXPORT_SYMBOL(idr_preload); |
| |
| +void idr_preload_end(void) |
| +{ |
| + local_unlock(radix_tree_preloads_lock); |
| +} |
| +EXPORT_SYMBOL(idr_preload_end); |
| + |
| /** |
| * ida_pre_get - reserve resources for ida allocation |
| * @ida: ida handle |
| @@ -2123,7 +2137,7 @@ int ida_pre_get(struct ida *ida, gfp_t g |
| * ida_get_new() can return -EAGAIN, prompting the caller |
| * to return to the ida_pre_get() step. |
| */ |
| - preempt_enable(); |
| + local_unlock(radix_tree_preloads_lock); |
| |
| if (!this_cpu_read(ida_bitmap)) { |
| struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp); |