| From dfcce6fe107bcf034351226b0a3c557f62310c16 Mon Sep 17 00:00:00 2001 |
| From: Yong Zhang <yong.zhang@windriver.com> |
| Date: Tue, 4 May 2010 14:16:48 +0800 |
| Subject: [PATCH] lockdep: Reduce stack_trace usage |
| |
| commit 77cb7c1cad611fa43172f5229de62512dab21824 in tip. |
| |
| When calling check_prevs_add(), if all validations passed |
| add_lock_to_list() will add new lock to dependency tree and |
| alloc stack_trace for each list_entry. |
| |
| But at this time, we are always on the same stack, so stack_trace |
| for each list_entry has the same value. This is redundant and eats |
| up lots of memory which could lead to warning on low |
| MAX_STACK_TRACE_ENTRIES. |
| |
| Use one copy of stack_trace instead. |
| |
| V2: As suggested by Peter Zijlstra, move save_trace() from |
| check_prevs_add() to check_prev_add(). |
| Add tracking for trylock dependence which is also redundant. |
| |
| [ upstream commit: 4726f2a617ebd868a4fdeb5679613b897e5f1676 ] |
| |
| Signed-off-by: Yong Zhang <yong.zhang0@windriver.com> |
| Cc: David S. Miller <davem@davemloft.net> |
| Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> |
| LKML-Reference: <20100504065711.GC10784@windriver.com> |
| Signed-off-by: Ingo Molnar <mingo@elte.hu> |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| |
| diff --git a/kernel/lockdep.c b/kernel/lockdep.c |
| index c74c909..3e3c8f4 100644 |
| --- a/kernel/lockdep.c |
| +++ b/kernel/lockdep.c |
| @@ -834,7 +834,8 @@ static struct lock_list *alloc_list_entry(void) |
| * Add a new dependency to the head of the list: |
| */ |
| static int add_lock_to_list(struct lock_class *class, struct lock_class *this, |
| - struct list_head *head, unsigned long ip, int distance) |
| + struct list_head *head, unsigned long ip, |
| + int distance, struct stack_trace *trace) |
| { |
| struct lock_list *entry; |
| /* |
| @@ -845,11 +846,9 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this, |
| if (!entry) |
| return 0; |
| |
| - if (!save_trace(&entry->trace)) |
| - return 0; |
| - |
| entry->class = this; |
| entry->distance = distance; |
| + entry->trace = *trace; |
| /* |
| * Since we never remove from the dependency list, the list can |
| * be walked lockless by other CPUs, it's only allocation |
| @@ -1653,12 +1652,20 @@ check_deadlock(struct task_struct *curr, struct held_lock *next, |
| */ |
| static int |
| check_prev_add(struct task_struct *curr, struct held_lock *prev, |
| - struct held_lock *next, int distance) |
| + struct held_lock *next, int distance, int trylock_loop) |
| { |
| struct lock_list *entry; |
| int ret; |
| struct lock_list this; |
| struct lock_list *uninitialized_var(target_entry); |
| + /* |
| + * Static variable, serialized by the graph_lock(). |
| + * |
| + * We use this static variable to save the stack trace in case |
| + * we call into this function multiple times due to encountering |
| + * trylocks in the held lock stack. |
| + */ |
| + static struct stack_trace trace; |
| |
| /* |
| * Prove that the new <prev> -> <next> dependency would not |
| @@ -1706,20 +1713,23 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, |
| } |
| } |
| |
| + if (!trylock_loop && !save_trace(&trace)) |
| + return 0; |
| + |
| /* |
| * Ok, all validations passed, add the new lock |
| * to the previous lock's dependency list: |
| */ |
| ret = add_lock_to_list(hlock_class(prev), hlock_class(next), |
| &hlock_class(prev)->locks_after, |
| - next->acquire_ip, distance); |
| + next->acquire_ip, distance, &trace); |
| |
| if (!ret) |
| return 0; |
| |
| ret = add_lock_to_list(hlock_class(next), hlock_class(prev), |
| &hlock_class(next)->locks_before, |
| - next->acquire_ip, distance); |
| + next->acquire_ip, distance, &trace); |
| if (!ret) |
| return 0; |
| |
| @@ -1749,6 +1759,7 @@ static int |
| check_prevs_add(struct task_struct *curr, struct held_lock *next) |
| { |
| int depth = curr->lockdep_depth; |
| + int trylock_loop = 0; |
| struct held_lock *hlock; |
| |
| /* |
| @@ -1774,7 +1785,8 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next) |
| * added: |
| */ |
| if (hlock->read != 2) { |
| - if (!check_prev_add(curr, hlock, next, distance)) |
| + if (!check_prev_add(curr, hlock, next, |
| + distance, trylock_loop)) |
| return 0; |
| /* |
| * Stop after the first non-trylock entry, |
| @@ -1797,6 +1809,7 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next) |
| if (curr->held_locks[depth].irq_context != |
| curr->held_locks[depth-1].irq_context) |
| break; |
| + trylock_loop = 1; |
| } |
| return 1; |
| out_bug: |
| -- |
| 1.7.1.1 |
| |