| From dhobsong@igel.co.jp Mon Oct 29 00:52:12 2012 |
| From: Damian Hobson-Garcia <dhobsong@igel.co.jp> |
| Date: Mon, 29 Oct 2012 16:50:28 +0900 |
| Subject: [PATCH v2 11/58] mm: extract reclaim code from __alloc_pages_direct_reclaim() |
| To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au |
| Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp |
| Message-ID: <1351497075-32717-12-git-send-email-dhobsong@igel.co.jp> |
| |
| |
| From: Marek Szyprowski <m.szyprowski@samsung.com> |
| |
| This patch extracts common reclaim code from __alloc_pages_direct_reclaim() |
| function to separate function: __perform_reclaim() which can be later used |
| by alloc_contig_range(). |
| |
| Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> |
| Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> |
| Cc: Michal Nazarewicz <mina86@mina86.com> |
| Acked-by: Mel Gorman <mel@csn.ul.ie> |
| Tested-by: Rob Clark <rob.clark@linaro.org> |
| Tested-by: Ohad Ben-Cohen <ohad@wizery.com> |
| Tested-by: Benjamin Gaignard <benjamin.gaignard@linaro.org> |
| Tested-by: Robert Nelson <robertcnelson@gmail.com> |
| Tested-by: Barry Song <Baohua.Song@csr.com> |
| (cherry picked from commit bba9071087108d3de70bea274e35064cc480487b) |
| |
| Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp> |
| Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> |
| --- |
| mm/page_alloc.c | 30 +++++++++++++++++++++--------- |
| 1 file changed, 21 insertions(+), 9 deletions(-) |
| |
| --- a/mm/page_alloc.c |
| +++ b/mm/page_alloc.c |
| @@ -2137,16 +2137,13 @@ __alloc_pages_direct_compact(gfp_t gfp_m |
| } |
| #endif /* CONFIG_COMPACTION */ |
| |
| -/* The really slow allocator path where we enter direct reclaim */ |
| -static inline struct page * |
| -__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, |
| - struct zonelist *zonelist, enum zone_type high_zoneidx, |
| - nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, |
| - int migratetype, unsigned long *did_some_progress) |
| +/* Perform direct synchronous page reclaim */ |
| +static int |
| +__perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, |
| + nodemask_t *nodemask) |
| { |
| - struct page *page = NULL; |
| struct reclaim_state reclaim_state; |
| - bool drained = false; |
| + int progress; |
| |
| cond_resched(); |
| |
| @@ -2157,7 +2154,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_m |
| reclaim_state.reclaimed_slab = 0; |
| current->reclaim_state = &reclaim_state; |
| |
| - *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask); |
| + progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask); |
| |
| current->reclaim_state = NULL; |
| lockdep_clear_current_reclaim_state(); |
| @@ -2165,6 +2162,21 @@ __alloc_pages_direct_reclaim(gfp_t gfp_m |
| |
| cond_resched(); |
| |
| + return progress; |
| +} |
| + |
| +/* The really slow allocator path where we enter direct reclaim */ |
| +static inline struct page * |
| +__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, |
| + struct zonelist *zonelist, enum zone_type high_zoneidx, |
| + nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, |
| + int migratetype, unsigned long *did_some_progress) |
| +{ |
| + struct page *page = NULL; |
| + bool drained = false; |
| + |
| + *did_some_progress = __perform_reclaim(gfp_mask, order, zonelist, |
| + nodemask); |
| if (unlikely(!(*did_some_progress))) |
| return NULL; |
| |