| From dhobsong@igel.co.jp Mon Oct 29 00:52:06 2012 |
| From: Damian Hobson-Garcia <dhobsong@igel.co.jp> |
| Date: Mon, 29 Oct 2012 16:50:25 +0900 |
| Subject: [PATCH v2 08/58] mm: mmzone: MIGRATE_CMA migration type added |
| To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au |
| Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp |
| Message-ID: <1351497075-32717-9-git-send-email-dhobsong@igel.co.jp> |
| |
| |
| From: Michal Nazarewicz <mina86@mina86.com> |
| |
| The MIGRATE_CMA migration type has two main characteristics: |
| (i) only movable pages can be allocated from MIGRATE_CMA |
| pageblocks and (ii) page allocator will never change migration |
| type of MIGRATE_CMA pageblocks. |
| |
| This guarantees (to some degree) that page in a MIGRATE_CMA page |
| block can always be migrated somewhere else (unless there's no |
| memory left in the system). |
| |
| It is designed to be used for allocating big chunks (eg. 10MiB) |
| of physically contiguous memory. Once driver requests |
| contiguous memory, pages from MIGRATE_CMA pageblocks may be |
| migrated away to create a contiguous block. |
| |
| To minimise number of migrations, MIGRATE_CMA migration type |
| is the last type tried when page allocator falls back to other |
| migration types when requested. |
| |
| Signed-off-by: Michal Nazarewicz <mina86@mina86.com> |
| Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> |
| Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> |
| Acked-by: Mel Gorman <mel@csn.ul.ie> |
| Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> |
| Tested-by: Rob Clark <rob.clark@linaro.org> |
| Tested-by: Ohad Ben-Cohen <ohad@wizery.com> |
| Tested-by: Benjamin Gaignard <benjamin.gaignard@linaro.org> |
| Tested-by: Robert Nelson <robertcnelson@gmail.com> |
| Tested-by: Barry Song <Baohua.Song@csr.com> |
| (cherry picked from commit 47118af076f64844b4f423bc2f545b2da9dab50d) |
| |
| Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp> |
| Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> |
| --- |
| include/linux/gfp.h | 3 + |
| include/linux/mmzone.h | 38 +++++++++++++++++++----- |
| mm/Kconfig | 2 - |
| mm/compaction.c | 11 +++++-- |
| mm/page_alloc.c | 76 ++++++++++++++++++++++++++++++++++++++----------- |
| mm/vmstat.c | 3 + |
| 6 files changed, 106 insertions(+), 27 deletions(-) |
| |
| --- a/include/linux/gfp.h |
| +++ b/include/linux/gfp.h |
| @@ -397,6 +397,9 @@ static inline bool pm_suspended_storage( |
| extern int alloc_contig_range(unsigned long start, unsigned long end); |
| extern void free_contig_range(unsigned long pfn, unsigned nr_pages); |
| |
| +/* CMA stuff */ |
| +extern void init_cma_reserved_pageblock(struct page *page); |
| + |
| #endif |
| |
| #endif /* __LINUX_GFP_H */ |
| --- a/include/linux/mmzone.h |
| +++ b/include/linux/mmzone.h |
| @@ -35,13 +35,37 @@ |
| */ |
| #define PAGE_ALLOC_COSTLY_ORDER 3 |
| |
| -#define MIGRATE_UNMOVABLE 0 |
| -#define MIGRATE_RECLAIMABLE 1 |
| -#define MIGRATE_MOVABLE 2 |
| -#define MIGRATE_PCPTYPES 3 /* the number of types on the pcp lists */ |
| -#define MIGRATE_RESERVE 3 |
| -#define MIGRATE_ISOLATE 4 /* can't allocate from here */ |
| -#define MIGRATE_TYPES 5 |
| +enum { |
| + MIGRATE_UNMOVABLE, |
| + MIGRATE_RECLAIMABLE, |
| + MIGRATE_MOVABLE, |
| + MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ |
| + MIGRATE_RESERVE = MIGRATE_PCPTYPES, |
| +#ifdef CONFIG_CMA |
| + /* |
| + * MIGRATE_CMA migration type is designed to mimic the way |
| + * ZONE_MOVABLE works. Only movable pages can be allocated |
| + * from MIGRATE_CMA pageblocks and page allocator never |
| + * implicitly change migration type of MIGRATE_CMA pageblock. |
| + * |
| + * The way to use it is to change migratetype of a range of |
| + * pageblocks to MIGRATE_CMA which can be done by |
| + * __free_pageblock_cma() function. What is important though |
| + * is that a range of pageblocks must be aligned to |
| + * MAX_ORDER_NR_PAGES should biggest page be bigger then |
| + * a single pageblock. |
| + */ |
| + MIGRATE_CMA, |
| +#endif |
| + MIGRATE_ISOLATE, /* can't allocate from here */ |
| + MIGRATE_TYPES |
| +}; |
| + |
| +#ifdef CONFIG_CMA |
| +# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) |
| +#else |
| +# define is_migrate_cma(migratetype) false |
| +#endif |
| |
| #define for_each_migratetype_order(order, type) \ |
| for (order = 0; order < MAX_ORDER; order++) \ |
| --- a/mm/Kconfig |
| +++ b/mm/Kconfig |
| @@ -198,7 +198,7 @@ config COMPACTION |
| config MIGRATION |
| bool "Page migration" |
| def_bool y |
| - depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION |
| + depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION || CMA |
| help |
| Allows the migration of the physical location of pages of processes |
| while the virtual addresses are not changed. This is useful in |
| --- a/mm/compaction.c |
| +++ b/mm/compaction.c |
| @@ -45,6 +45,11 @@ static void map_pages(struct list_head * |
| } |
| } |
| |
| +static inline bool migrate_async_suitable(int migratetype) |
| +{ |
| + return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE; |
| +} |
| + |
| /* |
| * Isolate free pages onto a private freelist. Caller must hold zone->lock. |
| * If @strict is true, will abort returning 0 on any invalid PFNs or non-free |
| @@ -299,7 +304,7 @@ isolate_migratepages_range(struct zone * |
| */ |
| pageblock_nr = low_pfn >> pageblock_order; |
| if (!cc->sync && last_pageblock_nr != pageblock_nr && |
| - get_pageblock_migratetype(page) != MIGRATE_MOVABLE) { |
| + !migrate_async_suitable(get_pageblock_migratetype(page))) { |
| low_pfn += pageblock_nr_pages; |
| low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1; |
| last_pageblock_nr = pageblock_nr; |
| @@ -367,8 +372,8 @@ static bool suitable_migration_target(st |
| if (PageBuddy(page) && page_order(page) >= pageblock_order) |
| return true; |
| |
| - /* If the block is MIGRATE_MOVABLE, allow migration */ |
| - if (migratetype == MIGRATE_MOVABLE) |
| + /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ |
| + if (migrate_async_suitable(migratetype)) |
| return true; |
| |
| /* Otherwise skip the block */ |
| --- a/mm/page_alloc.c |
| +++ b/mm/page_alloc.c |
| @@ -750,6 +750,24 @@ void __meminit __free_pages_bootmem(stru |
| __free_pages(page, order); |
| } |
| |
| +#ifdef CONFIG_CMA |
| +/* Free whole pageblock and set it's migration type to MIGRATE_CMA. */ |
| +void __init init_cma_reserved_pageblock(struct page *page) |
| +{ |
| + unsigned i = pageblock_nr_pages; |
| + struct page *p = page; |
| + |
| + do { |
| + __ClearPageReserved(p); |
| + set_page_count(p, 0); |
| + } while (++p, --i); |
| + |
| + set_page_refcounted(page); |
| + set_pageblock_migratetype(page, MIGRATE_CMA); |
| + __free_pages(page, pageblock_order); |
| + totalram_pages += pageblock_nr_pages; |
| +} |
| +#endif |
| |
| /* |
| * The order of subdivision here is critical for the IO subsystem. |
| @@ -875,10 +893,15 @@ struct page *__rmqueue_smallest(struct z |
| * This array describes the order lists are fallen back to when |
| * the free lists for the desirable migrate type are depleted |
| */ |
| -static int fallbacks[MIGRATE_TYPES][3] = { |
| - [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, |
| - [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, |
| - [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE }, |
| +static int fallbacks[MIGRATE_TYPES][4] = { |
| + [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, |
| + [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, |
| +#ifdef CONFIG_CMA |
| + [MIGRATE_MOVABLE] = { MIGRATE_CMA, MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE }, |
| + [MIGRATE_CMA] = { MIGRATE_RESERVE }, /* Never used */ |
| +#else |
| + [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE }, |
| +#endif |
| [MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */ |
| [MIGRATE_ISOLATE] = { MIGRATE_RESERVE }, /* Never used */ |
| }; |
| @@ -995,11 +1018,18 @@ __rmqueue_fallback(struct zone *zone, in |
| * pages to the preferred allocation list. If falling |
| * back for a reclaimable kernel allocation, be more |
| * aggressive about taking ownership of free pages |
| + * |
| + * On the other hand, never change migration |
| + * type of MIGRATE_CMA pageblocks nor move CMA |
| + * pages on different free lists. We don't |
| + * want unmovable pages to be allocated from |
| + * MIGRATE_CMA areas. |
| */ |
| - if (unlikely(current_order >= (pageblock_order >> 1)) || |
| - start_migratetype == MIGRATE_RECLAIMABLE || |
| - page_group_by_mobility_disabled) { |
| - unsigned long pages; |
| + if (!is_migrate_cma(migratetype) && |
| + (unlikely(current_order >= pageblock_order / 2) || |
| + start_migratetype == MIGRATE_RECLAIMABLE || |
| + page_group_by_mobility_disabled)) { |
| + int pages; |
| pages = move_freepages_block(zone, page, |
| start_migratetype); |
| |
| @@ -1017,11 +1047,14 @@ __rmqueue_fallback(struct zone *zone, in |
| rmv_page_order(page); |
| |
| /* Take ownership for orders >= pageblock_order */ |
| - if (current_order >= pageblock_order) |
| + if (current_order >= pageblock_order && |
| + !is_migrate_cma(migratetype)) |
| change_pageblock_range(page, current_order, |
| start_migratetype); |
| |
| - expand(zone, page, order, current_order, area, migratetype); |
| + expand(zone, page, order, current_order, area, |
| + is_migrate_cma(migratetype) |
| + ? migratetype : start_migratetype); |
| |
| trace_mm_page_alloc_extfrag(page, order, current_order, |
| start_migratetype, migratetype); |
| @@ -1072,7 +1105,7 @@ static int rmqueue_bulk(struct zone *zon |
| unsigned long count, struct list_head *list, |
| int migratetype, int cold) |
| { |
| - int i; |
| + int mt = migratetype, i; |
| |
| spin_lock(&zone->lock); |
| for (i = 0; i < count; ++i) { |
| @@ -1093,7 +1126,12 @@ static int rmqueue_bulk(struct zone *zon |
| list_add(&page->lru, list); |
| else |
| list_add_tail(&page->lru, list); |
| - set_page_private(page, migratetype); |
| + if (IS_ENABLED(CONFIG_CMA)) { |
| + mt = get_pageblock_migratetype(page); |
| + if (!is_migrate_cma(mt) && mt != MIGRATE_ISOLATE) |
| + mt = migratetype; |
| + } |
| + set_page_private(page, mt); |
| list = &page->lru; |
| } |
| __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); |
| @@ -1373,8 +1411,12 @@ int split_free_page(struct page *page) |
| |
| if (order >= pageblock_order - 1) { |
| struct page *endpage = page + (1 << order) - 1; |
| - for (; page < endpage; page += pageblock_nr_pages) |
| - set_pageblock_migratetype(page, MIGRATE_MOVABLE); |
| + for (; page < endpage; page += pageblock_nr_pages) { |
| + int mt = get_pageblock_migratetype(page); |
| + if (mt != MIGRATE_ISOLATE && !is_migrate_cma(mt)) |
| + set_pageblock_migratetype(page, |
| + MIGRATE_MOVABLE); |
| + } |
| } |
| |
| return 1 << order; |
| @@ -5421,14 +5463,16 @@ static int |
| __count_immobile_pages(struct zone *zone, struct page *page, int count) |
| { |
| unsigned long pfn, iter, found; |
| + int mt; |
| + |
| /* |
| * For avoiding noise data, lru_add_drain_all() should be called |
| * If ZONE_MOVABLE, the zone never contains immobile pages |
| */ |
| if (zone_idx(zone) == ZONE_MOVABLE) |
| return true; |
| - |
| - if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE) |
| + mt = get_pageblock_migratetype(page); |
| + if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt)) |
| return true; |
| |
| pfn = page_to_pfn(page); |
| --- a/mm/vmstat.c |
| +++ b/mm/vmstat.c |
| @@ -613,6 +613,9 @@ static char * const migratetype_names[MI |
| "Reclaimable", |
| "Movable", |
| "Reserve", |
| +#ifdef CONFIG_CMA |
| + "CMA", |
| +#endif |
| "Isolate", |
| }; |
| |