| From dhobsong@igel.co.jp Mon Oct 29 00:52:15 2012 |
| From: Damian Hobson-Garcia <dhobsong@igel.co.jp> |
| Date: Mon, 29 Oct 2012 16:50:29 +0900 |
| Subject: [PATCH v2 12/58] mm: trigger page reclaim in alloc_contig_range() to stabilise watermarks |
| To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au |
| Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp |
| Message-ID: <1351497075-32717-13-git-send-email-dhobsong@igel.co.jp> |
| |
| |
| From: Marek Szyprowski <m.szyprowski@samsung.com> |
| |
| alloc_contig_range() performs memory allocation so it also should keep |
| track on keeping the correct level of memory watermarks. This commit adds |
| a call to *_slowpath style reclaim to grab enough pages to make sure that |
| the final collection of contiguous pages from freelists will not starve |
| the system. |
| |
| Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> |
| Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> |
| CC: Michal Nazarewicz <mina86@mina86.com> |
| Tested-by: Rob Clark <rob.clark@linaro.org> |
| Tested-by: Ohad Ben-Cohen <ohad@wizery.com> |
| Tested-by: Benjamin Gaignard <benjamin.gaignard@linaro.org> |
| Tested-by: Robert Nelson <robertcnelson@gmail.com> |
| Tested-by: Barry Song <Baohua.Song@csr.com> |
| (cherry picked from commit 49f223a9cd96c7293d7258ff88c2bdf83065f69c) |
| |
| Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp> |
| Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> |
| --- |
| include/linux/mmzone.h | 9 +++++++ |
| mm/page_alloc.c | 60 +++++++++++++++++++++++++++++++++++++++++++++++++ |
| 2 files changed, 69 insertions(+) |
| |
| --- a/include/linux/mmzone.h |
| +++ b/include/linux/mmzone.h |
| @@ -63,8 +63,10 @@ enum { |
| |
| #ifdef CONFIG_CMA |
| # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) |
| +# define cma_wmark_pages(zone) zone->min_cma_pages |
| #else |
| # define is_migrate_cma(migratetype) false |
| +# define cma_wmark_pages(zone) 0 |
| #endif |
| |
| #define for_each_migratetype_order(order, type) \ |
| @@ -371,6 +373,13 @@ struct zone { |
| /* see spanned/present_pages for more description */ |
| seqlock_t span_seqlock; |
| #endif |
| +#ifdef CONFIG_CMA |
| + /* |
| + * CMA needs to increase watermark levels during the allocation |
| + * process to make sure that the system is not starved. |
| + */ |
| + unsigned long min_cma_pages; |
| +#endif |
| struct free_area free_area[MAX_ORDER]; |
| |
| #ifndef CONFIG_SPARSEMEM |
| --- a/mm/page_alloc.c |
| +++ b/mm/page_alloc.c |
| @@ -5086,6 +5086,11 @@ static void __setup_per_zone_wmarks(void |
| |
| zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2); |
| zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1); |
| + |
| + zone->watermark[WMARK_MIN] += cma_wmark_pages(zone); |
| + zone->watermark[WMARK_LOW] += cma_wmark_pages(zone); |
| + zone->watermark[WMARK_HIGH] += cma_wmark_pages(zone); |
| + |
| setup_zone_migrate_reserve(zone); |
| spin_unlock_irqrestore(&zone->lock, flags); |
| } |
| @@ -5691,6 +5696,54 @@ static int __alloc_contig_migrate_range( |
| return ret > 0 ? 0 : ret; |
| } |
| |
| +/* |
| + * Update zone's cma pages counter used for watermark level calculation. |
| + */ |
| +static inline void __update_cma_watermarks(struct zone *zone, int count) |
| +{ |
| + unsigned long flags; |
| + spin_lock_irqsave(&zone->lock, flags); |
| + zone->min_cma_pages += count; |
| + spin_unlock_irqrestore(&zone->lock, flags); |
| + setup_per_zone_wmarks(); |
| +} |
| + |
| +/* |
| + * Trigger memory pressure bump to reclaim some pages in order to be able to |
| + * allocate 'count' pages in single page units. Does similar work as |
| + *__alloc_pages_slowpath() function. |
| + */ |
| +static int __reclaim_pages(struct zone *zone, gfp_t gfp_mask, int count) |
| +{ |
| + enum zone_type high_zoneidx = gfp_zone(gfp_mask); |
| + struct zonelist *zonelist = node_zonelist(0, gfp_mask); |
| + int did_some_progress = 0; |
| + int order = 1; |
| + |
| + /* |
| + * Increase level of watermarks to force kswapd do his job |
| + * to stabilise at new watermark level. |
| + */ |
| + __update_cma_watermarks(zone, count); |
| + |
| + /* Obey watermarks as if the page was being allocated */ |
| + while (!zone_watermark_ok(zone, 0, low_wmark_pages(zone), 0, 0)) { |
| + wake_all_kswapd(order, zonelist, high_zoneidx, zone_idx(zone)); |
| + |
| + did_some_progress = __perform_reclaim(gfp_mask, order, zonelist, |
| + NULL); |
| + if (!did_some_progress) { |
| + /* Exhausted what can be done so it's blamo time */ |
| + out_of_memory(zonelist, gfp_mask, order, NULL, false); |
| + } |
| + } |
| + |
| + /* Restore original watermark levels. */ |
| + __update_cma_watermarks(zone, -count); |
| + |
| + return count; |
| +} |
| + |
| /** |
| * alloc_contig_range() -- tries to allocate given range of pages |
| * @start: start PFN to allocate |
| @@ -5789,6 +5842,13 @@ int alloc_contig_range(unsigned long sta |
| goto done; |
| } |
| |
| + /* |
| + * Reclaim enough pages to make sure that contiguous allocation |
| + * will not starve the system. |
| + */ |
| + __reclaim_pages(zone, GFP_HIGHUSER_MOVABLE, end-start); |
| + |
| + /* Grab isolated pages from freelists. */ |
| outer_end = isolate_freepages_range(outer_start, end); |
| if (!outer_end) { |
| ret = -EBUSY; |