| From: Levi Yun <ppbuk5246@gmail.com> |
| Subject: damon: use pmdp_get instead of drectly dereferencing pmd |
| Date: Fri, 28 Jul 2023 06:21:57 +0900 |
| |
| As ptep_get, Use the pmdp_get wrapper when we accessing pmdval instead of |
| directly dereferencing pmd. |
| |
| Link: https://lkml.kernel.org/r/20230727212157.2985025-1-ppbuk5246@gmail.com |
| Signed-off-by: Levi Yun <ppbuk5246@gmail.com> |
| Reviewed-by: SeongJae Park <sj@kernel.org> |
| Signed-off-by: Andrew Morton <akpm@linux-foundation.org> |
| --- |
| |
| mm/damon/ops-common.c | 2 +- |
| mm/damon/paddr.c | 2 +- |
| mm/damon/vaddr.c | 23 +++++++++++++++-------- |
| 3 files changed, 17 insertions(+), 10 deletions(-) |
| |
| --- a/mm/damon/ops-common.c~damon-use-pmdp_get-instead-of-drectly-dereferencing-pmd |
| +++ a/mm/damon/ops-common.c |
| @@ -54,7 +54,7 @@ void damon_ptep_mkold(pte_t *pte, struct |
| void damon_pmdp_mkold(pmd_t *pmd, struct vm_area_struct *vma, unsigned long addr) |
| { |
| #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| - struct folio *folio = damon_get_folio(pmd_pfn(*pmd)); |
| + struct folio *folio = damon_get_folio(pmd_pfn(pmdp_get(pmd))); |
| |
| if (!folio) |
| return; |
| --- a/mm/damon/paddr.c~damon-use-pmdp_get-instead-of-drectly-dereferencing-pmd |
| +++ a/mm/damon/paddr.c |
| @@ -94,7 +94,7 @@ static bool __damon_pa_young(struct foli |
| mmu_notifier_test_young(vma->vm_mm, addr); |
| } else { |
| #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| - *accessed = pmd_young(*pvmw.pmd) || |
| + *accessed = pmd_young(pmdp_get(pvmw.pmd)) || |
| !folio_test_idle(folio) || |
| mmu_notifier_test_young(vma->vm_mm, addr); |
| #else |
| --- a/mm/damon/vaddr.c~damon-use-pmdp_get-instead-of-drectly-dereferencing-pmd |
| +++ a/mm/damon/vaddr.c |
| @@ -301,16 +301,19 @@ static int damon_mkold_pmd_entry(pmd_t * |
| unsigned long next, struct mm_walk *walk) |
| { |
| pte_t *pte; |
| + pmd_t pmde; |
| spinlock_t *ptl; |
| |
| - if (pmd_trans_huge(*pmd)) { |
| + if (pmd_trans_huge(pmdp_get(pmd))) { |
| ptl = pmd_lock(walk->mm, pmd); |
| - if (!pmd_present(*pmd)) { |
| + pmde = pmdp_get(pmd); |
| + |
| + if (!pmd_present(pmde)) { |
| spin_unlock(ptl); |
| return 0; |
| } |
| |
| - if (pmd_trans_huge(*pmd)) { |
| + if (pmd_trans_huge(pmde)) { |
| damon_pmdp_mkold(pmd, walk->vma, addr); |
| spin_unlock(ptl); |
| return 0; |
| @@ -439,21 +442,25 @@ static int damon_young_pmd_entry(pmd_t * |
| struct damon_young_walk_private *priv = walk->private; |
| |
| #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| - if (pmd_trans_huge(*pmd)) { |
| + if (pmd_trans_huge(pmdp_get(pmd))) { |
| + pmd_t pmde; |
| + |
| ptl = pmd_lock(walk->mm, pmd); |
| - if (!pmd_present(*pmd)) { |
| + pmde = pmdp_get(pmd); |
| + |
| + if (!pmd_present(pmde)) { |
| spin_unlock(ptl); |
| return 0; |
| } |
| |
| - if (!pmd_trans_huge(*pmd)) { |
| + if (!pmd_trans_huge(pmde)) { |
| spin_unlock(ptl); |
| goto regular_page; |
| } |
| - folio = damon_get_folio(pmd_pfn(*pmd)); |
| + folio = damon_get_folio(pmd_pfn(pmde)); |
| if (!folio) |
| goto huge_out; |
| - if (pmd_young(*pmd) || !folio_test_idle(folio) || |
| + if (pmd_young(pmde) || !folio_test_idle(folio) || |
| mmu_notifier_test_young(walk->mm, |
| addr)) |
| priv->young = true; |
| _ |