| From 7c45512df987c5619db041b5c9b80d281e26d3db Mon Sep 17 00:00:00 2001 |
| From: Linus Torvalds <torvalds@linux-foundation.org> |
| Date: Mon, 18 Feb 2013 09:58:02 -0800 |
| Subject: mm: fix pageblock bitmap allocation |
| |
| From: Linus Torvalds <torvalds@linux-foundation.org> |
| |
| commit 7c45512df987c5619db041b5c9b80d281e26d3db upstream. |
| |
| Commit c060f943d092 ("mm: use aligned zone start for pfn_to_bitidx |
| calculation") fixed out calculation of the index into the pageblock |
| bitmap when a !SPARSEMEM zome was not aligned to pageblock_nr_pages. |
| |
| However, the _allocation_ of that bitmap had never taken this alignment |
| requirement into accout, so depending on the exact size and alignment of |
| the zone, the use of that index could then access past the allocation, |
| resulting in some very subtle memory corruption. |
| |
| This was reported (and bisected) by Ingo Molnar: one of his random |
| config builds would hang with certain very specific kernel command line |
| options. |
| |
| In the meantime, commit c060f943d092 has been marked for stable, so this |
| fix needs to be back-ported to the stable kernels that backported the |
| commit to use the right alignment. |
| |
| Bisected-and-tested-by: Ingo Molnar <mingo@kernel.org> |
| Acked-by: Mel Gorman <mgorman@suse.de> |
| Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| |
| --- |
| mm/page_alloc.c | 15 +++++++++------ |
| 1 file changed, 9 insertions(+), 6 deletions(-) |
| |
| --- a/mm/page_alloc.c |
| +++ b/mm/page_alloc.c |
| @@ -4264,10 +4264,11 @@ static void __meminit calculate_node_tot |
| * round what is now in bits to nearest long in bits, then return it in |
| * bytes. |
| */ |
| -static unsigned long __init usemap_size(unsigned long zonesize) |
| +static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize) |
| { |
| unsigned long usemapsize; |
| |
| + zonesize += zone_start_pfn & (pageblock_nr_pages-1); |
| usemapsize = roundup(zonesize, pageblock_nr_pages); |
| usemapsize = usemapsize >> pageblock_order; |
| usemapsize *= NR_PAGEBLOCK_BITS; |
| @@ -4277,17 +4278,19 @@ static unsigned long __init usemap_size( |
| } |
| |
| static void __init setup_usemap(struct pglist_data *pgdat, |
| - struct zone *zone, unsigned long zonesize) |
| + struct zone *zone, |
| + unsigned long zone_start_pfn, |
| + unsigned long zonesize) |
| { |
| - unsigned long usemapsize = usemap_size(zonesize); |
| + unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize); |
| zone->pageblock_flags = NULL; |
| if (usemapsize) |
| zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat, |
| usemapsize); |
| } |
| #else |
| -static inline void setup_usemap(struct pglist_data *pgdat, |
| - struct zone *zone, unsigned long zonesize) {} |
| +static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone, |
| + unsigned long zone_start_pfn, unsigned long zonesize) {} |
| #endif /* CONFIG_SPARSEMEM */ |
| |
| #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE |
| @@ -4415,7 +4418,7 @@ static void __paginginit free_area_init_ |
| continue; |
| |
| set_pageblock_order(pageblock_default_order()); |
| - setup_usemap(pgdat, zone, size); |
| + setup_usemap(pgdat, zone, zone_start_pfn, size); |
| ret = init_currently_empty_zone(zone, zone_start_pfn, |
| size, MEMMAP_EARLY); |
| BUG_ON(ret); |