| From d91f3057263ceb691ef527e71b41a56b17f6c869 Mon Sep 17 00:00:00 2001 |
| From: Waiman Long <longman@redhat.com> |
| Date: Fri, 20 Dec 2019 08:51:28 -0500 |
| Subject: locking/lockdep: Fix buffer overrun problem in stack_trace[] |
| |
| From: Waiman Long <longman@redhat.com> |
| |
| commit d91f3057263ceb691ef527e71b41a56b17f6c869 upstream. |
| |
| If the lockdep code is really running out of the stack_trace entries, |
| it is likely that buffer overrun can happen and the data immediately |
| after stack_trace[] will be corrupted. |
| |
| If there is less than LOCK_TRACE_SIZE_IN_LONGS entries left before |
| the call to save_trace(), the max_entries computation will leave it |
| with a very large positive number because of its unsigned nature. The |
| subsequent call to stack_trace_save() will then corrupt the data after |
| stack_trace[]. Fix that by changing max_entries to a signed integer |
| and check for negative value before calling stack_trace_save(). |
| |
| Signed-off-by: Waiman Long <longman@redhat.com> |
| Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> |
| Reviewed-by: Bart Van Assche <bvanassche@acm.org> |
| Cc: Linus Torvalds <torvalds@linux-foundation.org> |
| Cc: Peter Zijlstra <peterz@infradead.org> |
| Cc: Thomas Gleixner <tglx@linutronix.de> |
| Fixes: 12593b7467f9 ("locking/lockdep: Reduce space occupied by stack traces") |
| Link: https://lkml.kernel.org/r/20191220135128.14876-1-longman@redhat.com |
| Signed-off-by: Ingo Molnar <mingo@kernel.org> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| |
| --- |
| kernel/locking/lockdep.c | 7 +++---- |
| 1 file changed, 3 insertions(+), 4 deletions(-) |
| |
| --- a/kernel/locking/lockdep.c |
| +++ b/kernel/locking/lockdep.c |
| @@ -482,7 +482,7 @@ static struct lock_trace *save_trace(voi |
| struct lock_trace *trace, *t2; |
| struct hlist_head *hash_head; |
| u32 hash; |
| - unsigned int max_entries; |
| + int max_entries; |
| |
| BUILD_BUG_ON_NOT_POWER_OF_2(STACK_TRACE_HASH_SIZE); |
| BUILD_BUG_ON(LOCK_TRACE_SIZE_IN_LONGS >= MAX_STACK_TRACE_ENTRIES); |
| @@ -490,10 +490,8 @@ static struct lock_trace *save_trace(voi |
| trace = (struct lock_trace *)(stack_trace + nr_stack_trace_entries); |
| max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries - |
| LOCK_TRACE_SIZE_IN_LONGS; |
| - trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3); |
| |
| - if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES - |
| - LOCK_TRACE_SIZE_IN_LONGS - 1) { |
| + if (max_entries <= 0) { |
| if (!debug_locks_off_graph_unlock()) |
| return NULL; |
| |
| @@ -502,6 +500,7 @@ static struct lock_trace *save_trace(voi |
| |
| return NULL; |
| } |
| + trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3); |
| |
| hash = jhash(trace->entries, trace->nr_entries * |
| sizeof(trace->entries[0]), 0); |