| Subject: OF: Convert devtree lock from rw_lock to raw spinlock |
| From: Paul Gortmaker <paul.gortmaker@windriver.com> |
| Date: Wed, 6 Feb 2013 15:30:56 -0500 |
| |
| From: Thomas Gleixner <tglx@linutronix.de> |
| |
| With the locking cleanup in place (from "OF: Fixup resursive |
| locking code paths"), we can now do the conversion from the |
| rw_lock to a raw spinlock as required for preempt-rt. |
| |
| The previous cleanup and this conversion were originally |
| separate since they predated when mainline got raw spinlock (in |
| commit c2f21ce2e31286a "locking: Implement new raw_spinlock"). |
| |
| So, at that point in time, the cleanup was considered plausible |
| for mainline, but not this conversion. In any case, we've kept |
| them separate as it makes for easier review and better bisection. |
| |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| Cc: Grant Likely <grant.likely@secretlab.ca> |
| Cc: Sam Ravnborg <sam@ravnborg.org> |
| Cc: <devicetree-discuss@lists.ozlabs.org> |
| Cc: Rob Herring <rob.herring@calxeda.com> |
| Link: http://lkml.kernel.org/r/1360182656-15898-1-git-send-email-paul.gortmaker@windriver.com |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| [PG: taken from preempt-rt, update subject & add a commit log] |
| Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com> |
| --- |
| |
| [v2: recent commit e81b329 ("powerpc+of: Add /proc device tree |
| updating to of node add/remove") added two more instances of |
| write_unlock that also needed converting to raw_spin_unlock. |
| Retested (boot) on sbc8548, defconfig builds on arm/sparc; no |
| new warnings observed.] |
| |
| arch/sparc/kernel/prom_common.c | 4 - |
| drivers/of/base.c | 100 ++++++++++++++++++++++------------------ |
| include/linux/of.h | 2 |
| 3 files changed, 59 insertions(+), 47 deletions(-) |
| |
| --- a/arch/sparc/kernel/prom_common.c |
| +++ b/arch/sparc/kernel/prom_common.c |
| @@ -64,7 +64,7 @@ int of_set_property(struct device_node * |
| err = -ENODEV; |
| |
| mutex_lock(&of_set_property_mutex); |
| - write_lock(&devtree_lock); |
| + raw_spin_lock(&devtree_lock); |
| prevp = &dp->properties; |
| while (*prevp) { |
| struct property *prop = *prevp; |
| @@ -91,7 +91,7 @@ int of_set_property(struct device_node * |
| } |
| prevp = &(*prevp)->next; |
| } |
| - write_unlock(&devtree_lock); |
| + raw_spin_unlock(&devtree_lock); |
| mutex_unlock(&of_set_property_mutex); |
| |
| /* XXX Upate procfs if necessary... */ |
| --- a/drivers/of/base.c |
| +++ b/drivers/of/base.c |
| @@ -55,7 +55,7 @@ static DEFINE_MUTEX(of_aliases_mutex); |
| /* use when traversing tree through the allnext, child, sibling, |
| * or parent members of struct device_node. |
| */ |
| -DEFINE_RWLOCK(devtree_lock); |
| +DEFINE_RAW_SPINLOCK(devtree_lock); |
| |
| int of_n_addr_cells(struct device_node *np) |
| { |
| @@ -188,10 +188,11 @@ struct property *of_find_property(const |
| int *lenp) |
| { |
| struct property *pp; |
| + unsigned long flags; |
| |
| - read_lock(&devtree_lock); |
| + raw_spin_lock_irqsave(&devtree_lock, flags); |
| pp = __of_find_property(np, name, lenp); |
| - read_unlock(&devtree_lock); |
| + raw_spin_unlock_irqrestore(&devtree_lock, flags); |
| |
| return pp; |
| } |
| @@ -209,13 +210,13 @@ struct device_node *of_find_all_nodes(st |
| { |
| struct device_node *np; |
| |
| - read_lock(&devtree_lock); |
| + raw_spin_lock(&devtree_lock); |
| np = prev ? prev->allnext : of_allnodes; |
| for (; np != NULL; np = np->allnext) |
| if (of_node_get(np)) |
| break; |
| of_node_put(prev); |
| - read_unlock(&devtree_lock); |
| + raw_spin_unlock(&devtree_lock); |
| return np; |
| } |
| EXPORT_SYMBOL(of_find_all_nodes); |
| @@ -274,11 +275,12 @@ static int __of_device_is_compatible(con |
| int of_device_is_compatible(const struct device_node *device, |
| const char *compat) |
| { |
| + unsigned long flags; |
| int res; |
| |
| - read_lock(&devtree_lock); |
| + raw_spin_lock_irqsave(&devtree_lock, flags); |
| res = __of_device_is_compatible(device, compat); |
| - read_unlock(&devtree_lock); |
| + raw_spin_unlock_irqrestore(&devtree_lock, flags); |
| return res; |
| } |
| EXPORT_SYMBOL(of_device_is_compatible); |
| @@ -340,13 +342,14 @@ EXPORT_SYMBOL(of_device_is_available); |
| struct device_node *of_get_parent(const struct device_node *node) |
| { |
| struct device_node *np; |
| + unsigned long flags; |
| |
| if (!node) |
| return NULL; |
| |
| - read_lock(&devtree_lock); |
| + raw_spin_lock_irqsave(&devtree_lock, flags); |
| np = of_node_get(node->parent); |
| - read_unlock(&devtree_lock); |
| + raw_spin_unlock_irqrestore(&devtree_lock, flags); |
| return np; |
| } |
| EXPORT_SYMBOL(of_get_parent); |
| @@ -365,14 +368,15 @@ EXPORT_SYMBOL(of_get_parent); |
| struct device_node *of_get_next_parent(struct device_node *node) |
| { |
| struct device_node *parent; |
| + unsigned long flags; |
| |
| if (!node) |
| return NULL; |
| |
| - read_lock(&devtree_lock); |
| + raw_spin_lock_irqsave(&devtree_lock, flags); |
| parent = of_node_get(node->parent); |
| of_node_put(node); |
| - read_unlock(&devtree_lock); |
| + raw_spin_unlock_irqrestore(&devtree_lock, flags); |
| return parent; |
| } |
| |
| @@ -388,14 +392,15 @@ struct device_node *of_get_next_child(co |
| struct device_node *prev) |
| { |
| struct device_node *next; |
| + unsigned long flags; |
| |
| - read_lock(&devtree_lock); |
| + raw_spin_lock_irqsave(&devtree_lock, flags); |
| next = prev ? prev->sibling : node->child; |
| for (; next; next = next->sibling) |
| if (of_node_get(next)) |
| break; |
| of_node_put(prev); |
| - read_unlock(&devtree_lock); |
| + raw_spin_unlock_irqrestore(&devtree_lock, flags); |
| return next; |
| } |
| EXPORT_SYMBOL(of_get_next_child); |
| @@ -413,7 +418,7 @@ struct device_node *of_get_next_availabl |
| { |
| struct device_node *next; |
| |
| - read_lock(&devtree_lock); |
| + raw_spin_lock(&devtree_lock); |
| next = prev ? prev->sibling : node->child; |
| for (; next; next = next->sibling) { |
| if (!of_device_is_available(next)) |
| @@ -422,7 +427,7 @@ struct device_node *of_get_next_availabl |
| break; |
| } |
| of_node_put(prev); |
| - read_unlock(&devtree_lock); |
| + raw_spin_unlock(&devtree_lock); |
| return next; |
| } |
| EXPORT_SYMBOL(of_get_next_available_child); |
| @@ -460,14 +465,15 @@ EXPORT_SYMBOL(of_get_child_by_name); |
| struct device_node *of_find_node_by_path(const char *path) |
| { |
| struct device_node *np = of_allnodes; |
| + unsigned long flags; |
| |
| - read_lock(&devtree_lock); |
| + raw_spin_lock_irqsave(&devtree_lock, flags); |
| for (; np; np = np->allnext) { |
| if (np->full_name && (of_node_cmp(np->full_name, path) == 0) |
| && of_node_get(np)) |
| break; |
| } |
| - read_unlock(&devtree_lock); |
| + raw_spin_unlock_irqrestore(&devtree_lock, flags); |
| return np; |
| } |
| EXPORT_SYMBOL(of_find_node_by_path); |
| @@ -487,15 +493,16 @@ struct device_node *of_find_node_by_name |
| const char *name) |
| { |
| struct device_node *np; |
| + unsigned long flags; |
| |
| - read_lock(&devtree_lock); |
| + raw_spin_lock_irqsave(&devtree_lock, flags); |
| np = from ? from->allnext : of_allnodes; |
| for (; np; np = np->allnext) |
| if (np->name && (of_node_cmp(np->name, name) == 0) |
| && of_node_get(np)) |
| break; |
| of_node_put(from); |
| - read_unlock(&devtree_lock); |
| + raw_spin_unlock_irqrestore(&devtree_lock, flags); |
| return np; |
| } |
| EXPORT_SYMBOL(of_find_node_by_name); |
| @@ -516,15 +523,16 @@ struct device_node *of_find_node_by_type |
| const char *type) |
| { |
| struct device_node *np; |
| + unsigned long flags; |
| |
| - read_lock(&devtree_lock); |
| + raw_spin_lock_irqsave(&devtree_lock, flags); |
| np = from ? from->allnext : of_allnodes; |
| for (; np; np = np->allnext) |
| if (np->type && (of_node_cmp(np->type, type) == 0) |
| && of_node_get(np)) |
| break; |
| of_node_put(from); |
| - read_unlock(&devtree_lock); |
| + raw_spin_unlock_irqrestore(&devtree_lock, flags); |
| return np; |
| } |
| EXPORT_SYMBOL(of_find_node_by_type); |
| @@ -547,8 +555,9 @@ struct device_node *of_find_compatible_n |
| const char *type, const char *compatible) |
| { |
| struct device_node *np; |
| + unsigned long flags; |
| |
| - read_lock(&devtree_lock); |
| + raw_spin_lock_irqsave(&devtree_lock, flags); |
| np = from ? from->allnext : of_allnodes; |
| for (; np; np = np->allnext) { |
| if (type |
| @@ -559,7 +568,7 @@ struct device_node *of_find_compatible_n |
| break; |
| } |
| of_node_put(from); |
| - read_unlock(&devtree_lock); |
| + raw_spin_unlock_irqrestore(&devtree_lock, flags); |
| return np; |
| } |
| EXPORT_SYMBOL(of_find_compatible_node); |
| @@ -581,8 +590,9 @@ struct device_node *of_find_node_with_pr |
| { |
| struct device_node *np; |
| struct property *pp; |
| + unsigned long flags; |
| |
| - read_lock(&devtree_lock); |
| + raw_spin_lock_irqsave(&devtree_lock, flags); |
| np = from ? from->allnext : of_allnodes; |
| for (; np; np = np->allnext) { |
| for (pp = np->properties; pp; pp = pp->next) { |
| @@ -594,7 +604,7 @@ struct device_node *of_find_node_with_pr |
| } |
| out: |
| of_node_put(from); |
| - read_unlock(&devtree_lock); |
| + raw_spin_unlock_irqrestore(&devtree_lock, flags); |
| return np; |
| } |
| EXPORT_SYMBOL(of_find_node_with_property); |
| @@ -635,10 +645,11 @@ const struct of_device_id *of_match_node |
| const struct device_node *node) |
| { |
| const struct of_device_id *match; |
| + unsigned long flags; |
| |
| - read_lock(&devtree_lock); |
| + raw_spin_lock_irqsave(&devtree_lock, flags); |
| match = __of_match_node(matches, node); |
| - read_unlock(&devtree_lock); |
| + raw_spin_unlock_irqrestore(&devtree_lock, flags); |
| return match; |
| } |
| EXPORT_SYMBOL(of_match_node); |
| @@ -661,11 +672,12 @@ struct device_node *of_find_matching_nod |
| const struct of_device_id **match) |
| { |
| struct device_node *np; |
| + unsigned long flags; |
| |
| if (match) |
| *match = NULL; |
| |
| - read_lock(&devtree_lock); |
| + raw_spin_lock_irqsave(&devtree_lock, flags); |
| np = from ? from->allnext : of_allnodes; |
| for (; np; np = np->allnext) { |
| if (__of_match_node(matches, np) && of_node_get(np)) { |
| @@ -675,7 +687,7 @@ struct device_node *of_find_matching_nod |
| } |
| } |
| of_node_put(from); |
| - read_unlock(&devtree_lock); |
| + raw_spin_unlock_irqrestore(&devtree_lock, flags); |
| return np; |
| } |
| EXPORT_SYMBOL(of_find_matching_node_and_match); |
| @@ -718,12 +730,12 @@ struct device_node *of_find_node_by_phan |
| { |
| struct device_node *np; |
| |
| - read_lock(&devtree_lock); |
| + raw_spin_lock(&devtree_lock); |
| for (np = of_allnodes; np; np = np->allnext) |
| if (np->phandle == handle) |
| break; |
| of_node_get(np); |
| - read_unlock(&devtree_lock); |
| + raw_spin_unlock(&devtree_lock); |
| return np; |
| } |
| EXPORT_SYMBOL(of_find_node_by_phandle); |
| @@ -1195,18 +1207,18 @@ int of_add_property(struct device_node * |
| return rc; |
| |
| prop->next = NULL; |
| - write_lock_irqsave(&devtree_lock, flags); |
| + raw_spin_lock_irqsave(&devtree_lock, flags); |
| next = &np->properties; |
| while (*next) { |
| if (strcmp(prop->name, (*next)->name) == 0) { |
| /* duplicate ! don't insert it */ |
| - write_unlock_irqrestore(&devtree_lock, flags); |
| + raw_spin_unlock_irqrestore(&devtree_lock, flags); |
| return -1; |
| } |
| next = &(*next)->next; |
| } |
| *next = prop; |
| - write_unlock_irqrestore(&devtree_lock, flags); |
| + raw_spin_unlock_irqrestore(&devtree_lock, flags); |
| |
| #ifdef CONFIG_PROC_DEVICETREE |
| /* try to add to proc as well if it was initialized */ |
| @@ -1236,7 +1248,7 @@ int of_remove_property(struct device_nod |
| if (rc) |
| return rc; |
| |
| - write_lock_irqsave(&devtree_lock, flags); |
| + raw_spin_lock_irqsave(&devtree_lock, flags); |
| next = &np->properties; |
| while (*next) { |
| if (*next == prop) { |
| @@ -1249,7 +1261,7 @@ int of_remove_property(struct device_nod |
| } |
| next = &(*next)->next; |
| } |
| - write_unlock_irqrestore(&devtree_lock, flags); |
| + raw_spin_unlock_irqrestore(&devtree_lock, flags); |
| |
| if (!found) |
| return -ENODEV; |
| @@ -1289,7 +1301,7 @@ int of_update_property(struct device_nod |
| if (!oldprop) |
| return of_add_property(np, newprop); |
| |
| - write_lock_irqsave(&devtree_lock, flags); |
| + raw_spin_lock_irqsave(&devtree_lock, flags); |
| next = &np->properties; |
| while (*next) { |
| if (*next == oldprop) { |
| @@ -1303,7 +1315,7 @@ int of_update_property(struct device_nod |
| } |
| next = &(*next)->next; |
| } |
| - write_unlock_irqrestore(&devtree_lock, flags); |
| + raw_spin_unlock_irqrestore(&devtree_lock, flags); |
| |
| if (!found) |
| return -ENODEV; |
| @@ -1376,12 +1388,12 @@ int of_attach_node(struct device_node *n |
| if (rc) |
| return rc; |
| |
| - write_lock_irqsave(&devtree_lock, flags); |
| + raw_spin_lock_irqsave(&devtree_lock, flags); |
| np->sibling = np->parent->child; |
| np->allnext = of_allnodes; |
| np->parent->child = np; |
| of_allnodes = np; |
| - write_unlock_irqrestore(&devtree_lock, flags); |
| + raw_spin_unlock_irqrestore(&devtree_lock, flags); |
| |
| of_add_proc_dt_entry(np); |
| return 0; |
| @@ -1424,17 +1436,17 @@ int of_detach_node(struct device_node *n |
| if (rc) |
| return rc; |
| |
| - write_lock_irqsave(&devtree_lock, flags); |
| + raw_spin_lock_irqsave(&devtree_lock, flags); |
| |
| if (of_node_check_flag(np, OF_DETACHED)) { |
| /* someone already detached it */ |
| - write_unlock_irqrestore(&devtree_lock, flags); |
| + raw_spin_unlock_irqrestore(&devtree_lock, flags); |
| return rc; |
| } |
| |
| parent = np->parent; |
| if (!parent) { |
| - write_unlock_irqrestore(&devtree_lock, flags); |
| + raw_spin_unlock_irqrestore(&devtree_lock, flags); |
| return rc; |
| } |
| |
| @@ -1461,7 +1473,7 @@ int of_detach_node(struct device_node *n |
| } |
| |
| of_node_set_flag(np, OF_DETACHED); |
| - write_unlock_irqrestore(&devtree_lock, flags); |
| + raw_spin_unlock_irqrestore(&devtree_lock, flags); |
| |
| of_remove_proc_dt_entry(np); |
| return rc; |
| --- a/include/linux/of.h |
| +++ b/include/linux/of.h |
| @@ -92,7 +92,7 @@ static inline void of_node_put(struct de |
| extern struct device_node *of_allnodes; |
| extern struct device_node *of_chosen; |
| extern struct device_node *of_aliases; |
| -extern rwlock_t devtree_lock; |
| +extern raw_spinlock_t devtree_lock; |
| |
| static inline bool of_have_populated_dt(void) |
| { |