radix-tree: re-indent an extra level

Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
diff --git a/patches/radix-tree-use-local-locks.patch b/patches/radix-tree-use-local-locks.patch
index 9450534..8759a71 100644
--- a/patches/radix-tree-use-local-locks.patch
+++ b/patches/radix-tree-use-local-locks.patch
@@ -1,3 +1,4 @@
+From f3e6acf1a00c6f8a87cd7d470e8b92bcdffe6cee Mon Sep 17 00:00:00 2001
 From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
 Date: Wed, 25 Jan 2017 16:34:27 +0100
 Subject: [PATCH] radix-tree: use local locks
@@ -10,15 +11,12 @@
 Cc: stable-rt@vger.kernel.org
 Reported-and-debugged-by: Mike Galbraith <efault@gmx.de>
 Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/idr.h        |    5 +----
- include/linux/radix-tree.h |    7 ++-----
- lib/radix-tree.c           |   30 ++++++++++++++++++++++--------
- 3 files changed, 25 insertions(+), 17 deletions(-)
 
+diff --git a/include/linux/idr.h b/include/linux/idr.h
+index bf70b3ef0a07..413c4dbbafb2 100644
 --- a/include/linux/idr.h
 +++ b/include/linux/idr.h
-@@ -111,10 +111,7 @@ static inline bool idr_is_empty(const st
+@@ -111,10 +111,7 @@ static inline bool idr_is_empty(const struct idr *idr)
   * Each idr_preload() should be matched with an invocation of this
   * function.  See idr_preload() for details.
   */
@@ -30,9 +28,11 @@
  
  /**
   * idr_find - return pointer for given id
+diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
+index 3e5735064b71..83177c8dfb02 100644
 --- a/include/linux/radix-tree.h
 +++ b/include/linux/radix-tree.h
-@@ -328,6 +328,8 @@ unsigned int radix_tree_gang_lookup_slot
+@@ -328,6 +328,8 @@ unsigned int radix_tree_gang_lookup_slot(const struct radix_tree_root *,
  int radix_tree_preload(gfp_t gfp_mask);
  int radix_tree_maybe_preload(gfp_t gfp_mask);
  int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order);
@@ -41,7 +41,7 @@
  void radix_tree_init(void);
  void *radix_tree_tag_set(struct radix_tree_root *,
  			unsigned long index, unsigned int tag);
-@@ -347,11 +349,6 @@ unsigned int radix_tree_gang_lookup_tag_
+@@ -347,11 +349,6 @@ unsigned int radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *,
  		unsigned int max_items, unsigned int tag);
  int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag);
  
@@ -53,6 +53,8 @@
  int radix_tree_split_preload(unsigned old_order, unsigned new_order, gfp_t);
  int radix_tree_split(struct radix_tree_root *, unsigned long index,
  			unsigned new_order);
+diff --git a/lib/radix-tree.c b/lib/radix-tree.c
+index 79a804f1aab9..f691478665f7 100644
 --- a/lib/radix-tree.c
 +++ b/lib/radix-tree.c
 @@ -37,7 +37,7 @@
@@ -72,7 +74,7 @@
  
  static inline struct radix_tree_node *entry_to_node(void *ptr)
  {
-@@ -404,12 +405,13 @@ radix_tree_node_alloc(gfp_t gfp_mask, st
+@@ -404,12 +405,13 @@ radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent,
  		 * succeed in getting a node here (and never reach
  		 * kmem_cache_alloc)
  		 */
@@ -87,7 +89,7 @@
  		/*
  		 * Update the allocation stack trace as this is more useful
  		 * for debugging.
-@@ -475,14 +477,14 @@ static int __radix_tree_preload(gfp_t gf
+@@ -475,14 +477,14 @@ static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
  	 */
  	gfp_mask &= ~__GFP_ACCOUNT;
  
@@ -105,7 +107,7 @@
  		rtp = this_cpu_ptr(&radix_tree_preloads);
  		if (rtp->nr < nr) {
  			node->parent = rtp->nodes;
-@@ -524,7 +526,7 @@ int radix_tree_maybe_preload(gfp_t gfp_m
+@@ -524,7 +526,7 @@ int radix_tree_maybe_preload(gfp_t gfp_mask)
  	if (gfpflags_allow_blocking(gfp_mask))
  		return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
  	/* Preloading doesn't help anything with this gfp mask, skip it */
@@ -114,7 +116,7 @@
  	return 0;
  }
  EXPORT_SYMBOL(radix_tree_maybe_preload);
-@@ -562,7 +564,7 @@ int radix_tree_maybe_preload_order(gfp_t
+@@ -562,7 +564,7 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order)
  
  	/* Preloading doesn't help anything with this gfp mask, skip it */
  	if (!gfpflags_allow_blocking(gfp_mask)) {
@@ -123,7 +125,7 @@
  		return 0;
  	}
  
-@@ -596,6 +598,12 @@ int radix_tree_maybe_preload_order(gfp_t
+@@ -596,6 +598,12 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order)
  	return __radix_tree_preload(gfp_mask, nr_nodes);
  }
  
@@ -136,7 +138,7 @@
  static unsigned radix_tree_load_root(const struct radix_tree_root *root,
  		struct radix_tree_node **nodep, unsigned long *maxindex)
  {
-@@ -2107,6 +2115,12 @@ void idr_preload(gfp_t gfp_mask)
+@@ -2108,6 +2116,12 @@ void idr_preload(gfp_t gfp_mask)
  }
  EXPORT_SYMBOL(idr_preload);
  
@@ -149,12 +151,15 @@
  /**
   * ida_pre_get - reserve resources for ida allocation
   * @ida: ida handle
-@@ -2123,7 +2137,7 @@ int ida_pre_get(struct ida *ida, gfp_t g
- 	 * ida_get_new() can return -EAGAIN, prompting the caller
+@@ -2124,7 +2138,7 @@ int ida_pre_get(struct ida *ida, gfp_t gfp)
  	 * to return to the ida_pre_get() step.
  	 */
--	preempt_enable();
-+	local_unlock(radix_tree_preloads_lock);
+ 	if (!__radix_tree_preload(gfp, IDA_PRELOAD_SIZE))
+-		preempt_enable();
++		local_unlock(radix_tree_preloads_lock);
  
  	if (!this_cpu_read(ida_bitmap)) {
  		struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp);
+-- 
+2.1.4
+