| From 1251de84ec057211d59031a364994e858567c6c4 Mon Sep 17 00:00:00 2001 |
| From: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com> |
| Date: Mon, 3 Feb 2020 17:36:46 -0800 |
| Subject: [PATCH] powerpc/mmu_gather: enable RCU_TABLE_FREE even for !SMP case |
| |
| commit 12e4d53f3f04e81f9e83d6fc10edc7314ab9f6b9 upstream. |
| |
| Patch series "Fixup page directory freeing", v4. |
| |
| This is a repost of patch series from Peter with the arch specific changes |
| except ppc64 dropped. ppc64 changes are added here because we are redoing |
| the patch series on top of ppc64 changes. This makes it easy to backport |
| these changes. Only the first 2 patches need to be backported to stable. |
| |
| The thing is, on anything SMP, freeing page directories should observe the |
| exact same order as normal page freeing: |
| |
| 1) unhook page/directory |
| 2) TLB invalidate |
| 3) free page/directory |
| |
| Without this, any concurrent page-table walk could end up with a |
| Use-after-Free. This is esp. trivial for anything that has software |
| page-table walkers (HAVE_FAST_GUP / software TLB fill) or the hardware |
| caches partial page-walks (ie. caches page directories). |
| |
| Even on UP this might give issues since mmu_gather is preemptible these |
| days. An interrupt or preempted task accessing user pages might stumble |
| into the free page if the hardware caches page directories. |
| |
| This patch series fixes ppc64 and add generic MMU_GATHER changes to |
| support the conversion of other architectures. I haven't added patches |
| w.r.t other architecture because they are yet to be acked. |
| |
| This patch (of 9): |
| |
| A followup patch is going to make sure we correctly invalidate page walk |
| cache before we free page table pages. In order to keep things simple |
| enable RCU_TABLE_FREE even for !SMP so that we don't have to fixup the |
| !SMP case differently in the followup patch |
| |
| !SMP case is right now broken for radix translation w.r.t page walk |
| cache flush. We can get interrupted in between page table free and |
| that would imply we have page walk cache entries pointing to tables |
| which got freed already. Michael said "both our platforms that run on |
| Power9 force SMP on in Kconfig, so the !SMP case is unlikely to be a |
| problem for anyone in practice, unless they've hacked their kernel to |
| build it !SMP." |
| |
| Link: http://lkml.kernel.org/r/20200116064531.483522-2-aneesh.kumar@linux.ibm.com |
| Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> |
| Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> |
| Acked-by: Michael Ellerman <mpe@ellerman.id.au> |
| Cc: <stable@vger.kernel.org> |
| |
| Signed-off-by: Andrew Morton <akpm@linux-foundation.org> |
| Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> |
| Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com> |
| |
| diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig |
| index f7a363cbc1bb..c8d228846ab6 100644 |
| --- a/arch/powerpc/Kconfig |
| +++ b/arch/powerpc/Kconfig |
| @@ -214,7 +214,7 @@ config PPC |
| select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH |
| select HAVE_PERF_REGS |
| select HAVE_PERF_USER_STACK_DUMP |
| - select HAVE_RCU_TABLE_FREE if SMP |
| + select HAVE_RCU_TABLE_FREE |
| select HAVE_RCU_TABLE_NO_INVALIDATE if HAVE_RCU_TABLE_FREE |
| select HAVE_MMU_GATHER_PAGE_SIZE |
| select HAVE_REGS_AND_STACK_ACCESS_API |
| diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h |
| index 998317702630..dc5c039eb28e 100644 |
| --- a/arch/powerpc/include/asm/book3s/32/pgalloc.h |
| +++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h |
| @@ -49,7 +49,6 @@ static inline void pgtable_free(void *table, unsigned index_size) |
| |
| #define get_hugepd_cache_index(x) (x) |
| |
| -#ifdef CONFIG_SMP |
| static inline void pgtable_free_tlb(struct mmu_gather *tlb, |
| void *table, int shift) |
| { |
| @@ -66,13 +65,6 @@ static inline void __tlb_remove_table(void *_table) |
| |
| pgtable_free(table, shift); |
| } |
| -#else |
| -static inline void pgtable_free_tlb(struct mmu_gather *tlb, |
| - void *table, int shift) |
| -{ |
| - pgtable_free(table, shift); |
| -} |
| -#endif |
| |
| static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, |
| unsigned long address) |
| diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h |
| index d5a44912902f..cae9e814593a 100644 |
| --- a/arch/powerpc/include/asm/book3s/64/pgalloc.h |
| +++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h |
| @@ -19,9 +19,7 @@ extern struct vmemmap_backing *vmemmap_list; |
| extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long); |
| extern void pmd_fragment_free(unsigned long *); |
| extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift); |
| -#ifdef CONFIG_SMP |
| extern void __tlb_remove_table(void *_table); |
| -#endif |
| void pte_frag_destroy(void *pte_frag); |
| |
| static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm) |
| diff --git a/arch/powerpc/include/asm/nohash/pgalloc.h b/arch/powerpc/include/asm/nohash/pgalloc.h |
| index 332b13b4ecdb..29c43665a753 100644 |
| --- a/arch/powerpc/include/asm/nohash/pgalloc.h |
| +++ b/arch/powerpc/include/asm/nohash/pgalloc.h |
| @@ -46,7 +46,6 @@ static inline void pgtable_free(void *table, int shift) |
| |
| #define get_hugepd_cache_index(x) (x) |
| |
| -#ifdef CONFIG_SMP |
| static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift) |
| { |
| unsigned long pgf = (unsigned long)table; |
| @@ -64,13 +63,6 @@ static inline void __tlb_remove_table(void *_table) |
| pgtable_free(table, shift); |
| } |
| |
| -#else |
| -static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift) |
| -{ |
| - pgtable_free(table, shift); |
| -} |
| -#endif |
| - |
| static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, |
| unsigned long address) |
| { |
| diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c |
| index 01bc9663360d..c7e3cc805cef 100644 |
| --- a/arch/powerpc/mm/book3s64/pgtable.c |
| +++ b/arch/powerpc/mm/book3s64/pgtable.c |
| @@ -354,7 +354,6 @@ static inline void pgtable_free(void *table, int index) |
| } |
| } |
| |
| -#ifdef CONFIG_SMP |
| void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index) |
| { |
| unsigned long pgf = (unsigned long)table; |
| @@ -371,12 +370,6 @@ void __tlb_remove_table(void *_table) |
| |
| return pgtable_free(table, index); |
| } |
| -#else |
| -void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index) |
| -{ |
| - return pgtable_free(table, index); |
| -} |
| -#endif |
| |
| #ifdef CONFIG_PROC_FS |
| atomic_long_t direct_pages_count[MMU_PAGE_COUNT]; |
| -- |
| 2.7.4 |
| |