| From: Kefeng Wang <wangkefeng.wang@huawei.com> |
| Subject: mm: add pageblock_align() macro |
| Date: Wed, 7 Sep 2022 14:08:43 +0800 |
| |
| Add pageblock_align() macro and use it to simplify code. |
| |
| Link: https://lkml.kernel.org/r/20220907060844.126891-2-wangkefeng.wang@huawei.com |
| Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> |
| Acked-by: Mike Rapoport <rppt@linux.ibm.com> |
| Reviewed-by: David Hildenbrand <david@redhat.com> |
| Cc: Oscar Salvador <osalvador@suse.de> |
| Cc: Vlastimil Babka <vbabka@suse.cz> |
| Signed-off-by: Andrew Morton <akpm@linux-foundation.org> |
| --- |
| |
| include/linux/pageblock-flags.h | 1 + |
| mm/memblock.c | 4 ++-- |
| mm/page_isolation.c | 4 ++-- |
| 3 files changed, 5 insertions(+), 4 deletions(-) |
| |
| --- a/include/linux/pageblock-flags.h~mm-add-pageblock_align-macro |
| +++ a/include/linux/pageblock-flags.h |
| @@ -53,6 +53,7 @@ extern unsigned int pageblock_order; |
| #endif /* CONFIG_HUGETLB_PAGE */ |
| |
| #define pageblock_nr_pages (1UL << pageblock_order) |
| +#define pageblock_align(pfn) ALIGN((pfn), pageblock_nr_pages) |
| #define pageblock_start_pfn(pfn) ALIGN_DOWN((pfn), pageblock_nr_pages) |
| #define pageblock_end_pfn(pfn) ALIGN((pfn) + 1, pageblock_nr_pages) |
| |
| --- a/mm/memblock.c~mm-add-pageblock_align-macro |
| +++ a/mm/memblock.c |
| @@ -2014,12 +2014,12 @@ static void __init free_unused_memmap(vo |
| * presume that there are no holes in the memory map inside |
| * a pageblock |
| */ |
| - prev_end = ALIGN(end, pageblock_nr_pages); |
| + prev_end = pageblock_align(end); |
| } |
| |
| #ifdef CONFIG_SPARSEMEM |
| if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) { |
| - prev_end = ALIGN(end, pageblock_nr_pages); |
| + prev_end = pageblock_align(end); |
| free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION)); |
| } |
| #endif |
| --- a/mm/page_isolation.c~mm-add-pageblock_align-macro |
| +++ a/mm/page_isolation.c |
| @@ -533,7 +533,7 @@ int start_isolate_page_range(unsigned lo |
| struct page *page; |
| /* isolation is done at page block granularity */ |
| unsigned long isolate_start = pageblock_start_pfn(start_pfn); |
| - unsigned long isolate_end = ALIGN(end_pfn, pageblock_nr_pages); |
| + unsigned long isolate_end = pageblock_align(end_pfn); |
| int ret; |
| bool skip_isolation = false; |
| |
| @@ -580,7 +580,7 @@ void undo_isolate_page_range(unsigned lo |
| unsigned long pfn; |
| struct page *page; |
| unsigned long isolate_start = pageblock_start_pfn(start_pfn); |
| - unsigned long isolate_end = ALIGN(end_pfn, pageblock_nr_pages); |
| + unsigned long isolate_end = pageblock_align(end_pfn); |
| |
| for (pfn = isolate_start; |
| pfn < isolate_end; |
| _ |