| From c6f3c5ee40c10bb65725047a220570f718507001 Mon Sep 17 00:00:00 2001 |
| From: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com> |
| Date: Fri, 5 Apr 2019 18:39:10 -0700 |
| Subject: mm/huge_memory.c: fix modifying of page protection by insert_pfn_pmd() |
| |
| From: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> |
| |
| commit c6f3c5ee40c10bb65725047a220570f718507001 upstream. |
| |
| With some architectures like ppc64, set_pmd_at() cannot cope with a |
| situation where there is already some (different) valid entry present. |
| |
| Use pmdp_set_access_flags() instead to modify the pfn which is built to |
| deal with modifying existing PMD entries. |
| |
| This is similar to commit cae85cb8add3 ("mm/memory.c: fix modifying of |
| page protection by insert_pfn()") |
| |
| We also do similar update w.r.t insert_pfn_pud eventhough ppc64 don't |
| support pud pfn entries now. |
| |
| Without this patch we also see the below message in kernel log "BUG: |
| non-zero pgtables_bytes on freeing mm:" |
| |
| Link: http://lkml.kernel.org/r/20190402115125.18803-1-aneesh.kumar@linux.ibm.com |
| Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> |
| Reported-by: Chandan Rajendra <chandan@linux.ibm.com> |
| Reviewed-by: Jan Kara <jack@suse.cz> |
| Cc: Dan Williams <dan.j.williams@intel.com> |
| Cc: <stable@vger.kernel.org> |
| Signed-off-by: Andrew Morton <akpm@linux-foundation.org> |
| Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| |
| --- |
| mm/huge_memory.c | 36 ++++++++++++++++++++++++++++++++++++ |
| 1 file changed, 36 insertions(+) |
| |
| --- a/mm/huge_memory.c |
| +++ b/mm/huge_memory.c |
| @@ -753,6 +753,21 @@ static void insert_pfn_pmd(struct vm_are |
| spinlock_t *ptl; |
| |
| ptl = pmd_lock(mm, pmd); |
| + if (!pmd_none(*pmd)) { |
| + if (write) { |
| + if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) { |
| + WARN_ON_ONCE(!is_huge_zero_pmd(*pmd)); |
| + goto out_unlock; |
| + } |
| + entry = pmd_mkyoung(*pmd); |
| + entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); |
| + if (pmdp_set_access_flags(vma, addr, pmd, entry, 1)) |
| + update_mmu_cache_pmd(vma, addr, pmd); |
| + } |
| + |
| + goto out_unlock; |
| + } |
| + |
| entry = pmd_mkhuge(pfn_t_pmd(pfn, prot)); |
| if (pfn_t_devmap(pfn)) |
| entry = pmd_mkdevmap(entry); |
| @@ -764,11 +779,16 @@ static void insert_pfn_pmd(struct vm_are |
| if (pgtable) { |
| pgtable_trans_huge_deposit(mm, pmd, pgtable); |
| mm_inc_nr_ptes(mm); |
| + pgtable = NULL; |
| } |
| |
| set_pmd_at(mm, addr, pmd, entry); |
| update_mmu_cache_pmd(vma, addr, pmd); |
| + |
| +out_unlock: |
| spin_unlock(ptl); |
| + if (pgtable) |
| + pte_free(mm, pgtable); |
| } |
| |
| vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, |
| @@ -819,6 +839,20 @@ static void insert_pfn_pud(struct vm_are |
| spinlock_t *ptl; |
| |
| ptl = pud_lock(mm, pud); |
| + if (!pud_none(*pud)) { |
| + if (write) { |
| + if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) { |
| + WARN_ON_ONCE(!is_huge_zero_pud(*pud)); |
| + goto out_unlock; |
| + } |
| + entry = pud_mkyoung(*pud); |
| + entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma); |
| + if (pudp_set_access_flags(vma, addr, pud, entry, 1)) |
| + update_mmu_cache_pud(vma, addr, pud); |
| + } |
| + goto out_unlock; |
| + } |
| + |
| entry = pud_mkhuge(pfn_t_pud(pfn, prot)); |
| if (pfn_t_devmap(pfn)) |
| entry = pud_mkdevmap(entry); |
| @@ -828,6 +862,8 @@ static void insert_pfn_pud(struct vm_are |
| } |
| set_pud_at(mm, addr, pud, entry); |
| update_mmu_cache_pud(vma, addr, pud); |
| + |
| +out_unlock: |
| spin_unlock(ptl); |
| } |
| |