| From dhobsong@igel.co.jp Mon Oct 29 00:51:58 2012 |
| From: Damian Hobson-Garcia <dhobsong@igel.co.jp> |
| Date: Mon, 29 Oct 2012 16:50:22 +0900 |
| Subject: [PATCH v2 05/58] mm: compaction: export some of the functions |
| To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au |
| Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp |
| Message-ID: <1351497075-32717-6-git-send-email-dhobsong@igel.co.jp> |
| |
| |
| From: Michal Nazarewicz <mina86@mina86.com> |
| |
| This commit exports some of the functions from compaction.c file |
| outside of it adding their declaration into internal.h header |
| file so that other mm related code can use them. |
| |
| This forced compaction.c to always be compiled (as opposed to being |
| compiled only if CONFIG_COMPACTION is defined) but as to avoid |
| introducing code that user did not ask for, part of the compaction.c |
| is now wrapped in on #ifdef. |
| |
| Signed-off-by: Michal Nazarewicz <mina86@mina86.com> |
| Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> |
| Acked-by: Mel Gorman <mel@csn.ul.ie> |
| Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> |
| Tested-by: Rob Clark <rob.clark@linaro.org> |
| Tested-by: Ohad Ben-Cohen <ohad@wizery.com> |
| Tested-by: Benjamin Gaignard <benjamin.gaignard@linaro.org> |
| Tested-by: Robert Nelson <robertcnelson@gmail.com> |
| Tested-by: Barry Song <Baohua.Song@csr.com> |
| (cherry picked from commit ff9543fd32060917beb080b1eb2d1d41ec7f39e0) |
| |
| Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp> |
| Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> |
| --- |
| mm/Makefile | 3 |
| mm/compaction.c | 328 ++++++++++++++++++++++++++------------------------------ |
| mm/internal.h | 33 +++++ |
| 3 files changed, 191 insertions(+), 173 deletions(-) |
| |
| --- a/mm/Makefile |
| +++ b/mm/Makefile |
| @@ -13,7 +13,7 @@ obj-y := filemap.o mempool.o oom_kill. |
| readahead.o swap.o truncate.o vmscan.o shmem.o \ |
| prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \ |
| page_isolation.o mm_init.o mmu_context.o percpu.o \ |
| - $(mmu-y) |
| + compaction.o $(mmu-y) |
| obj-y += init-mm.o |
| |
| ifdef CONFIG_NO_BOOTMEM |
| @@ -32,7 +32,6 @@ obj-$(CONFIG_NUMA) += mempolicy.o |
| obj-$(CONFIG_SPARSEMEM) += sparse.o |
| obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o |
| obj-$(CONFIG_SLOB) += slob.o |
| -obj-$(CONFIG_COMPACTION) += compaction.o |
| obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o |
| obj-$(CONFIG_KSM) += ksm.o |
| obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o |
| --- a/mm/compaction.c |
| +++ b/mm/compaction.c |
| @@ -16,30 +16,11 @@ |
| #include <linux/sysfs.h> |
| #include "internal.h" |
| |
| +#if defined CONFIG_COMPACTION || defined CONFIG_CMA |
| + |
| #define CREATE_TRACE_POINTS |
| #include <trace/events/compaction.h> |
| |
| -/* |
| - * compact_control is used to track pages being migrated and the free pages |
| - * they are being migrated to during memory compaction. The free_pfn starts |
| - * at the end of a zone and migrate_pfn begins at the start. Movable pages |
| - * are moved to the end of a zone during a compaction run and the run |
| - * completes when free_pfn <= migrate_pfn |
| - */ |
| -struct compact_control { |
| - struct list_head freepages; /* List of free pages to migrate to */ |
| - struct list_head migratepages; /* List of pages being migrated */ |
| - unsigned long nr_freepages; /* Number of isolated free pages */ |
| - unsigned long nr_migratepages; /* Number of pages to migrate */ |
| - unsigned long free_pfn; /* isolate_freepages search base */ |
| - unsigned long migrate_pfn; /* isolate_migratepages search base */ |
| - bool sync; /* Synchronous migration */ |
| - |
| - int order; /* order a direct compactor needs */ |
| - int migratetype; /* MOVABLE, RECLAIMABLE etc */ |
| - struct zone *zone; |
| -}; |
| - |
| static unsigned long release_freepages(struct list_head *freelist) |
| { |
| struct page *page, *next; |
| @@ -54,6 +35,16 @@ static unsigned long release_freepages(s |
| return count; |
| } |
| |
| +static void map_pages(struct list_head *list) |
| +{ |
| + struct page *page; |
| + |
| + list_for_each_entry(page, list, lru) { |
| + arch_alloc_page(page, 0); |
| + kernel_map_pages(page, 1, 1); |
| + } |
| +} |
| + |
| /* |
| * Isolate free pages onto a private freelist. Caller must hold zone->lock. |
| * If @strict is true, will abort returning 0 on any invalid PFNs or non-free |
| @@ -122,7 +113,7 @@ static unsigned long isolate_freepages_b |
| * (which may be greater then end_pfn if end fell in a middle of |
| * a free page). |
| */ |
| -static unsigned long |
| +unsigned long |
| isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn) |
| { |
| unsigned long isolated, pfn, block_end_pfn, flags; |
| @@ -176,127 +167,6 @@ isolate_freepages_range(unsigned long st |
| return pfn; |
| } |
| |
| -/* Returns true if the page is within a block suitable for migration to */ |
| -static bool suitable_migration_target(struct page *page) |
| -{ |
| - |
| - int migratetype = get_pageblock_migratetype(page); |
| - |
| - /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */ |
| - if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE) |
| - return false; |
| - |
| - /* If the page is a large free page, then allow migration */ |
| - if (PageBuddy(page) && page_order(page) >= pageblock_order) |
| - return true; |
| - |
| - /* If the block is MIGRATE_MOVABLE, allow migration */ |
| - if (migratetype == MIGRATE_MOVABLE) |
| - return true; |
| - |
| - /* Otherwise skip the block */ |
| - return false; |
| -} |
| - |
| -static void map_pages(struct list_head *list) |
| -{ |
| - struct page *page; |
| - |
| - list_for_each_entry(page, list, lru) { |
| - arch_alloc_page(page, 0); |
| - kernel_map_pages(page, 1, 1); |
| - } |
| -} |
| - |
| -/* |
| - * Based on information in the current compact_control, find blocks |
| - * suitable for isolating free pages from and then isolate them. |
| - */ |
| -static void isolate_freepages(struct zone *zone, |
| - struct compact_control *cc) |
| -{ |
| - struct page *page; |
| - unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn; |
| - unsigned long flags; |
| - int nr_freepages = cc->nr_freepages; |
| - struct list_head *freelist = &cc->freepages; |
| - |
| - /* |
| - * Initialise the free scanner. The starting point is where we last |
| - * scanned from (or the end of the zone if starting). The low point |
| - * is the end of the pageblock the migration scanner is using. |
| - */ |
| - pfn = cc->free_pfn; |
| - low_pfn = cc->migrate_pfn + pageblock_nr_pages; |
| - |
| - /* |
| - * Take care that if the migration scanner is at the end of the zone |
| - * that the free scanner does not accidentally move to the next zone |
| - * in the next isolation cycle. |
| - */ |
| - high_pfn = min(low_pfn, pfn); |
| - |
| - zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; |
| - |
| - /* |
| - * Isolate free pages until enough are available to migrate the |
| - * pages on cc->migratepages. We stop searching if the migrate |
| - * and free page scanners meet or enough free pages are isolated. |
| - */ |
| - for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages; |
| - pfn -= pageblock_nr_pages) { |
| - unsigned long isolated; |
| - |
| - if (!pfn_valid(pfn)) |
| - continue; |
| - |
| - /* |
| - * Check for overlapping nodes/zones. It's possible on some |
| - * configurations to have a setup like |
| - * node0 node1 node0 |
| - * i.e. it's possible that all pages within a zones range of |
| - * pages do not belong to a single zone. |
| - */ |
| - page = pfn_to_page(pfn); |
| - if (page_zone(page) != zone) |
| - continue; |
| - |
| - /* Check the block is suitable for migration */ |
| - if (!suitable_migration_target(page)) |
| - continue; |
| - |
| - /* |
| - * Found a block suitable for isolating free pages from. Now |
| - * we disabled interrupts, double check things are ok and |
| - * isolate the pages. This is to minimise the time IRQs |
| - * are disabled |
| - */ |
| - isolated = 0; |
| - spin_lock_irqsave(&zone->lock, flags); |
| - if (suitable_migration_target(page)) { |
| - end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn); |
| - isolated = isolate_freepages_block(pfn, end_pfn, |
| - freelist, false); |
| - nr_freepages += isolated; |
| - } |
| - spin_unlock_irqrestore(&zone->lock, flags); |
| - |
| - /* |
| - * Record the highest PFN we isolated pages from. When next |
| - * looking for free pages, the search will restart here as |
| - * page migration may have returned some pages to the allocator |
| - */ |
| - if (isolated) |
| - high_pfn = max(high_pfn, pfn); |
| - } |
| - |
| - /* split_free_page does not map the pages */ |
| - map_pages(freelist); |
| - |
| - cc->free_pfn = high_pfn; |
| - cc->nr_freepages = nr_freepages; |
| -} |
| - |
| /* Update the number of anon and file isolated pages in the zone */ |
| static void acct_isolated(struct zone *zone, struct compact_control *cc) |
| { |
| @@ -325,13 +195,6 @@ static bool too_many_isolated(struct zon |
| return isolated > (inactive + active) / 2; |
| } |
| |
| -/* possible outcome of isolate_migratepages */ |
| -typedef enum { |
| - ISOLATE_ABORT, /* Abort compaction now */ |
| - ISOLATE_NONE, /* No pages isolated, continue scanning */ |
| - ISOLATE_SUCCESS, /* Pages isolated, migrate */ |
| -} isolate_migrate_t; |
| - |
| /** |
| * isolate_migratepages_range() - isolate all migrate-able pages in range. |
| * @zone: Zone pages are in. |
| @@ -351,7 +214,7 @@ typedef enum { |
| * does not modify any cc's fields, in particular it does not modify |
| * (or read for that matter) cc->migrate_pfn. |
| */ |
| -static unsigned long |
| +unsigned long |
| isolate_migratepages_range(struct zone *zone, struct compact_control *cc, |
| unsigned long low_pfn, unsigned long end_pfn) |
| { |
| @@ -487,35 +350,118 @@ isolate_migratepages_range(struct zone * |
| return low_pfn; |
| } |
| |
| +#endif /* CONFIG_COMPACTION || CONFIG_CMA */ |
| +#ifdef CONFIG_COMPACTION |
| + |
| +/* Returns true if the page is within a block suitable for migration to */ |
| +static bool suitable_migration_target(struct page *page) |
| +{ |
| + |
| + int migratetype = get_pageblock_migratetype(page); |
| + |
| + /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */ |
| + if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE) |
| + return false; |
| + |
| + /* If the page is a large free page, then allow migration */ |
| + if (PageBuddy(page) && page_order(page) >= pageblock_order) |
| + return true; |
| + |
| + /* If the block is MIGRATE_MOVABLE, allow migration */ |
| + if (migratetype == MIGRATE_MOVABLE) |
| + return true; |
| + |
| + /* Otherwise skip the block */ |
| + return false; |
| +} |
| + |
| /* |
| - * Isolate all pages that can be migrated from the block pointed to by |
| - * the migrate scanner within compact_control. |
| + * Based on information in the current compact_control, find blocks |
| + * suitable for isolating free pages from and then isolate them. |
| */ |
| -static isolate_migrate_t isolate_migratepages(struct zone *zone, |
| - struct compact_control *cc) |
| +static void isolate_freepages(struct zone *zone, |
| + struct compact_control *cc) |
| { |
| - unsigned long low_pfn, end_pfn; |
| + struct page *page; |
| + unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn; |
| + unsigned long flags; |
| + int nr_freepages = cc->nr_freepages; |
| + struct list_head *freelist = &cc->freepages; |
| |
| - /* Do not scan outside zone boundaries */ |
| - low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn); |
| + /* |
| + * Initialise the free scanner. The starting point is where we last |
| + * scanned from (or the end of the zone if starting). The low point |
| + * is the end of the pageblock the migration scanner is using. |
| + */ |
| + pfn = cc->free_pfn; |
| + low_pfn = cc->migrate_pfn + pageblock_nr_pages; |
| |
| - /* Only scan within a pageblock boundary */ |
| - end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages); |
| + /* |
| + * Take care that if the migration scanner is at the end of the zone |
| + * that the free scanner does not accidentally move to the next zone |
| + * in the next isolation cycle. |
| + */ |
| + high_pfn = min(low_pfn, pfn); |
| |
| - /* Do not cross the free scanner or scan within a memory hole */ |
| - if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) { |
| - cc->migrate_pfn = end_pfn; |
| - return ISOLATE_NONE; |
| - } |
| + zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; |
| |
| - /* Perform the isolation */ |
| - low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn); |
| - if (!low_pfn) |
| - return ISOLATE_ABORT; |
| + /* |
| + * Isolate free pages until enough are available to migrate the |
| + * pages on cc->migratepages. We stop searching if the migrate |
| + * and free page scanners meet or enough free pages are isolated. |
| + */ |
| + for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages; |
| + pfn -= pageblock_nr_pages) { |
| + unsigned long isolated; |
| |
| - cc->migrate_pfn = low_pfn; |
| + if (!pfn_valid(pfn)) |
| + continue; |
| |
| - return ISOLATE_SUCCESS; |
| + /* |
| + * Check for overlapping nodes/zones. It's possible on some |
| + * configurations to have a setup like |
| + * node0 node1 node0 |
| + * i.e. it's possible that all pages within a zones range of |
| + * pages do not belong to a single zone. |
| + */ |
| + page = pfn_to_page(pfn); |
| + if (page_zone(page) != zone) |
| + continue; |
| + |
| + /* Check the block is suitable for migration */ |
| + if (!suitable_migration_target(page)) |
| + continue; |
| + |
| + /* |
| + * Found a block suitable for isolating free pages from. Now |
| + * we disabled interrupts, double check things are ok and |
| + * isolate the pages. This is to minimise the time IRQs |
| + * are disabled |
| + */ |
| + isolated = 0; |
| + spin_lock_irqsave(&zone->lock, flags); |
| + if (suitable_migration_target(page)) { |
| + end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn); |
| + isolated = isolate_freepages_block(pfn, end_pfn, |
| + freelist, false); |
| + nr_freepages += isolated; |
| + } |
| + spin_unlock_irqrestore(&zone->lock, flags); |
| + |
| + /* |
| + * Record the highest PFN we isolated pages from. When next |
| + * looking for free pages, the search will restart here as |
| + * page migration may have returned some pages to the allocator |
| + */ |
| + if (isolated) |
| + high_pfn = max(high_pfn, pfn); |
| + } |
| + |
| + /* split_free_page does not map the pages */ |
| + map_pages(freelist); |
| + |
| + cc->free_pfn = high_pfn; |
| + cc->nr_freepages = nr_freepages; |
| } |
| |
| /* |
| @@ -564,6 +510,44 @@ static void update_nr_listpages(struct c |
| cc->nr_freepages = nr_freepages; |
| } |
| |
| +/* possible outcome of isolate_migratepages */ |
| +typedef enum { |
| + ISOLATE_ABORT, /* Abort compaction now */ |
| + ISOLATE_NONE, /* No pages isolated, continue scanning */ |
| + ISOLATE_SUCCESS, /* Pages isolated, migrate */ |
| +} isolate_migrate_t; |
| + |
| +/* |
| + * Isolate all pages that can be migrated from the block pointed to by |
| + * the migrate scanner within compact_control. |
| + */ |
| +static isolate_migrate_t isolate_migratepages(struct zone *zone, |
| + struct compact_control *cc) |
| +{ |
| + unsigned long low_pfn, end_pfn; |
| + |
| + /* Do not scan outside zone boundaries */ |
| + low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn); |
| + |
| + /* Only scan within a pageblock boundary */ |
| + end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages); |
| + |
| + /* Do not cross the free scanner or scan within a memory hole */ |
| + if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) { |
| + cc->migrate_pfn = end_pfn; |
| + return ISOLATE_NONE; |
| + } |
| + |
| + /* Perform the isolation */ |
| + low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn); |
| + if (!low_pfn) |
| + return ISOLATE_ABORT; |
| + |
| + cc->migrate_pfn = low_pfn; |
| + |
| + return ISOLATE_SUCCESS; |
| +} |
| + |
| static int compact_finished(struct zone *zone, |
| struct compact_control *cc) |
| { |
| @@ -911,3 +895,5 @@ void compaction_unregister_node(struct n |
| return device_remove_file(&node->dev, &dev_attr_compact); |
| } |
| #endif /* CONFIG_SYSFS && CONFIG_NUMA */ |
| + |
| +#endif /* CONFIG_COMPACTION */ |
| --- a/mm/internal.h |
| +++ b/mm/internal.h |
| @@ -100,6 +100,39 @@ extern void prep_compound_page(struct pa |
| extern bool is_free_buddy_page(struct page *page); |
| #endif |
| |
| +#if defined CONFIG_COMPACTION || defined CONFIG_CMA |
| + |
| +/* |
| + * in mm/compaction.c |
| + */ |
| +/* |
| + * compact_control is used to track pages being migrated and the free pages |
| + * they are being migrated to during memory compaction. The free_pfn starts |
| + * at the end of a zone and migrate_pfn begins at the start. Movable pages |
| + * are moved to the end of a zone during a compaction run and the run |
| + * completes when free_pfn <= migrate_pfn |
| + */ |
| +struct compact_control { |
| + struct list_head freepages; /* List of free pages to migrate to */ |
| + struct list_head migratepages; /* List of pages being migrated */ |
| + unsigned long nr_freepages; /* Number of isolated free pages */ |
| + unsigned long nr_migratepages; /* Number of pages to migrate */ |
| + unsigned long free_pfn; /* isolate_freepages search base */ |
| + unsigned long migrate_pfn; /* isolate_migratepages search base */ |
| + bool sync; /* Synchronous migration */ |
| + |
| + int order; /* order a direct compactor needs */ |
| + int migratetype; /* MOVABLE, RECLAIMABLE etc */ |
| + struct zone *zone; |
| +}; |
| + |
| +unsigned long |
| +isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn); |
| +unsigned long |
| +isolate_migratepages_range(struct zone *zone, struct compact_control *cc, |
| + unsigned long low_pfn, unsigned long end_pfn); |
| + |
| +#endif |
| |
| /* |
| * function for dealing with page's order in buddy system. |