| From c8b74c2f6604923de91f8aa6539f8bb934736754 Mon Sep 17 00:00:00 2001 |
| From: Sonny Rao <sonnyrao@chromium.org> |
| Date: Thu, 20 Dec 2012 15:05:07 -0800 |
| Subject: mm: fix calculation of dirtyable memory |
| |
| From: Sonny Rao <sonnyrao@chromium.org> |
| |
| commit c8b74c2f6604923de91f8aa6539f8bb934736754 upstream. |
| |
| The system uses global_dirtyable_memory() to calculate number of |
| dirtyable pages/pages that can be allocated to the page cache. A bug |
| causes an underflow thus making the page count look like a big unsigned |
| number. This in turn confuses the dirty writeback throttling to |
| aggressively write back pages as they become dirty (usually 1 page at a |
| time). This generally only affects systems with highmem because the |
| underflowed count gets subtracted from the global count of dirtyable |
| memory. |
| |
| The problem was introduced with v3.2-4896-gab8fabd |
| |
| Fix is to ensure we don't get an underflowed total of either highmem or |
| global dirtyable memory. |
| |
| Signed-off-by: Sonny Rao <sonnyrao@chromium.org> |
| Signed-off-by: Puneet Kumar <puneetster@chromium.org> |
| Acked-by: Johannes Weiner <hannes@cmpxchg.org> |
| Tested-by: Damien Wyart <damien.wyart@free.fr> |
| Signed-off-by: Andrew Morton <akpm@linux-foundation.org> |
| Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| |
| --- |
| mm/page-writeback.c | 25 ++++++++++++++++++++----- |
| 1 file changed, 20 insertions(+), 5 deletions(-) |
| |
| --- a/mm/page-writeback.c |
| +++ b/mm/page-writeback.c |
| @@ -201,6 +201,18 @@ static unsigned long highmem_dirtyable_m |
| zone_reclaimable_pages(z) - z->dirty_balance_reserve; |
| } |
| /* |
| + * Unreclaimable memory (kernel memory or anonymous memory |
| + * without swap) can bring down the dirtyable pages below |
| + * the zone's dirty balance reserve and the above calculation |
| + * will underflow. However we still want to add in nodes |
| + * which are below threshold (negative values) to get a more |
| + * accurate calculation but make sure that the total never |
| + * underflows. |
| + */ |
| + if ((long)x < 0) |
| + x = 0; |
| + |
| + /* |
| * Make sure that the number of highmem pages is never larger |
| * than the number of the total dirtyable memory. This can only |
| * occur in very strange VM situations but we want to make sure |
| @@ -222,8 +234,8 @@ static unsigned long global_dirtyable_me |
| { |
| unsigned long x; |
| |
| - x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages() - |
| - dirty_balance_reserve; |
| + x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages(); |
| + x -= min(x, dirty_balance_reserve); |
| |
| if (!vm_highmem_is_dirtyable) |
| x -= highmem_dirtyable_memory(x); |
| @@ -290,9 +302,12 @@ static unsigned long zone_dirtyable_memo |
| * highmem zone can hold its share of dirty pages, so we don't |
| * care about vm_highmem_is_dirtyable here. |
| */ |
| - return zone_page_state(zone, NR_FREE_PAGES) + |
| - zone_reclaimable_pages(zone) - |
| - zone->dirty_balance_reserve; |
| + unsigned long nr_pages = zone_page_state(zone, NR_FREE_PAGES) + |
| + zone_reclaimable_pages(zone); |
| + |
| + /* don't allow this to underflow */ |
| + nr_pages -= min(nr_pages, zone->dirty_balance_reserve); |
| + return nr_pages; |
| } |
| |
| /** |