| From 3a860d165eb5f4d7cf0bf81ef6a5b5c5e1754422 Mon Sep 17 00:00:00 2001 |
| From: Max Filippov <jcmvbkbc@gmail.com> |
| Date: Mon, 16 Nov 2020 01:38:59 -0800 |
| Subject: xtensa: disable preemption around cache alias management calls |
| |
| From: Max Filippov <jcmvbkbc@gmail.com> |
| |
| commit 3a860d165eb5f4d7cf0bf81ef6a5b5c5e1754422 upstream. |
| |
| Although cache alias management calls set up and tear down TLB entries |
| and fast_second_level_miss is able to restore TLB entry should it be |
| evicted they absolutely cannot preempt each other because they use the |
| same TLBTEMP area for different purposes. |
| Disable preemption around all cache alias management calls to enforce |
| that. |
| |
| Cc: stable@vger.kernel.org |
| Signed-off-by: Max Filippov <jcmvbkbc@gmail.com> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| |
| --- |
| arch/xtensa/mm/cache.c | 14 ++++++++++++++ |
| 1 file changed, 14 insertions(+) |
| |
| --- a/arch/xtensa/mm/cache.c |
| +++ b/arch/xtensa/mm/cache.c |
| @@ -71,8 +71,10 @@ static inline void kmap_invalidate_coher |
| kvaddr = TLBTEMP_BASE_1 + |
| (page_to_phys(page) & DCACHE_ALIAS_MASK); |
| |
| + preempt_disable(); |
| __invalidate_dcache_page_alias(kvaddr, |
| page_to_phys(page)); |
| + preempt_enable(); |
| } |
| } |
| } |
| @@ -157,6 +159,7 @@ void flush_dcache_page(struct page *page |
| if (!alias && !mapping) |
| return; |
| |
| + preempt_disable(); |
| virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK); |
| __flush_invalidate_dcache_page_alias(virt, phys); |
| |
| @@ -167,6 +170,7 @@ void flush_dcache_page(struct page *page |
| |
| if (mapping) |
| __invalidate_icache_page_alias(virt, phys); |
| + preempt_enable(); |
| } |
| |
| /* There shouldn't be an entry in the cache for this page anymore. */ |
| @@ -200,8 +204,10 @@ void local_flush_cache_page(struct vm_ar |
| unsigned long phys = page_to_phys(pfn_to_page(pfn)); |
| unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK); |
| |
| + preempt_disable(); |
| __flush_invalidate_dcache_page_alias(virt, phys); |
| __invalidate_icache_page_alias(virt, phys); |
| + preempt_enable(); |
| } |
| EXPORT_SYMBOL(local_flush_cache_page); |
| |
| @@ -228,11 +234,13 @@ update_mmu_cache(struct vm_area_struct * |
| unsigned long phys = page_to_phys(page); |
| unsigned long tmp; |
| |
| + preempt_disable(); |
| tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK); |
| __flush_invalidate_dcache_page_alias(tmp, phys); |
| tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK); |
| __flush_invalidate_dcache_page_alias(tmp, phys); |
| __invalidate_icache_page_alias(tmp, phys); |
| + preempt_enable(); |
| |
| clear_bit(PG_arch_1, &page->flags); |
| } |
| @@ -266,7 +274,9 @@ void copy_to_user_page(struct vm_area_st |
| |
| if (alias) { |
| unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); |
| + preempt_disable(); |
| __flush_invalidate_dcache_page_alias(t, phys); |
| + preempt_enable(); |
| } |
| |
| /* Copy data */ |
| @@ -281,9 +291,11 @@ void copy_to_user_page(struct vm_area_st |
| if (alias) { |
| unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); |
| |
| + preempt_disable(); |
| __flush_invalidate_dcache_range((unsigned long) dst, len); |
| if ((vma->vm_flags & VM_EXEC) != 0) |
| __invalidate_icache_page_alias(t, phys); |
| + preempt_enable(); |
| |
| } else if ((vma->vm_flags & VM_EXEC) != 0) { |
| __flush_dcache_range((unsigned long)dst,len); |
| @@ -305,7 +317,9 @@ extern void copy_from_user_page(struct v |
| |
| if (alias) { |
| unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); |
| + preempt_disable(); |
| __flush_invalidate_dcache_page_alias(t, phys); |
| + preempt_enable(); |
| } |
| |
| memcpy(dst, src, len); |