| From: Dan Williams <dan.j.williams@intel.com> |
| Subject: fsdax: hold dax lock over mapping insertion |
| Date: Fri, 14 Oct 2022 16:57:37 -0700 |
| |
| In preparation for dax_insert_entry() to start taking page and pgmap |
| references ensure that page->pgmap is valid by holding the dax_read_lock() |
| over both dax_direct_access() and dax_insert_entry(). |
| |
| I.e. the code that wants to elevate the reference count of a pgmap page |
| from 0 -> 1 must ensure that the pgmap is not exiting and will not start |
| exiting until the proper references have been taken. |
| |
| Link: https://lkml.kernel.org/r/166579185727.2236710.8711235794537270051.stgit@dwillia2-xfh.jf.intel.com |
| Signed-off-by: Dan Williams <dan.j.williams@intel.com> |
| Cc: Matthew Wilcox <willy@infradead.org> |
| Cc: Jan Kara <jack@suse.cz> |
| Cc: "Darrick J. Wong" <djwong@kernel.org> |
| Cc: Jason Gunthorpe <jgg@nvidia.com> |
| Cc: Christoph Hellwig <hch@lst.de> |
| Cc: John Hubbard <jhubbard@nvidia.com> |
| Cc: Alex Deucher <alexander.deucher@amd.com> |
| Cc: Alistair Popple <apopple@nvidia.com> |
| Cc: Ben Skeggs <bskeggs@redhat.com> |
| Cc: "Christian König" <christian.koenig@amd.com> |
| Cc: Daniel Vetter <daniel@ffwll.ch> |
| Cc: Dave Chinner <david@fromorbit.com> |
| Cc: David Airlie <airlied@linux.ie> |
| Cc: Felix Kuehling <Felix.Kuehling@amd.com> |
| Cc: Jerome Glisse <jglisse@redhat.com> |
| Cc: Karol Herbst <kherbst@redhat.com> |
| Cc: kernel test robot <lkp@intel.com> |
| Cc: Lyude Paul <lyude@redhat.com> |
| Cc: "Pan, Xinhui" <Xinhui.Pan@amd.com> |
| Signed-off-by: Andrew Morton <akpm@linux-foundation.org> |
| --- |
| |
| fs/dax.c | 12 +++++++----- |
| 1 file changed, 7 insertions(+), 5 deletions(-) |
| |
| --- a/fs/dax.c~fsdax-hold-dax-lock-over-mapping-insertion |
| +++ a/fs/dax.c |
| @@ -1107,10 +1107,9 @@ static int dax_iomap_direct_access(const |
| size_t size, void **kaddr, pfn_t *pfnp) |
| { |
| pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); |
| - int id, rc = 0; |
| long length; |
| + int rc = 0; |
| |
| - id = dax_read_lock(); |
| length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size), |
| DAX_ACCESS, kaddr, pfnp); |
| if (length < 0) { |
| @@ -1135,7 +1134,6 @@ out_check_addr: |
| if (!*kaddr) |
| rc = -EFAULT; |
| out: |
| - dax_read_unlock(id); |
| return rc; |
| } |
| |
| @@ -1591,7 +1589,7 @@ static vm_fault_t dax_fault_iter(struct |
| loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT; |
| bool write = iter->flags & IOMAP_WRITE; |
| unsigned long entry_flags = pmd ? DAX_PMD : 0; |
| - int err = 0; |
| + int err = 0, id; |
| pfn_t pfn; |
| void *kaddr; |
| |
| @@ -1611,11 +1609,15 @@ static vm_fault_t dax_fault_iter(struct |
| return pmd ? VM_FAULT_FALLBACK : VM_FAULT_SIGBUS; |
| } |
| |
| + id = dax_read_lock(); |
| err = dax_iomap_direct_access(iomap, pos, size, &kaddr, &pfn); |
| - if (err) |
| + if (err) { |
| + dax_read_unlock(id); |
| return pmd ? VM_FAULT_FALLBACK : dax_fault_return(err); |
| + } |
| |
| *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, entry_flags); |
| + dax_read_unlock(id); |
| |
| if (write && |
| srcmap->type != IOMAP_HOLE && srcmap->addr != iomap->addr) { |
| _ |