| From 9ff1b6466a291a33389c4a9c7f3f9b64d62df40a Mon Sep 17 00:00:00 2001 |
| From: Yishai Hadas <yishaih@mellanox.com> |
| Date: Sun, 22 Dec 2019 14:46:49 +0200 |
| Subject: IB/core: Fix ODP with IB_ACCESS_HUGETLB handling |
| |
| From: Yishai Hadas <yishaih@mellanox.com> |
| |
| commit 9ff1b6466a291a33389c4a9c7f3f9b64d62df40a upstream. |
| |
| As VMAs for a given range might not be available as part of the |
| registration phase in ODP. |
| |
| ib_init_umem_odp() considered the expected page shift value that was |
| previously set and initializes its internals accordingly. |
| |
| If memory isn't backed by physical contiguous pages aligned to a hugepage |
| boundary an error will be set as part of the page fault flow and come back |
| to the user as some failed RDMA operation. |
| |
| Fixes: 0008b84ea9af ("IB/umem: Add support to huge ODP") |
| Link: https://lore.kernel.org/r/20191222124649.52300-4-leon@kernel.org |
| Signed-off-by: Yishai Hadas <yishaih@mellanox.com> |
| Reviewed-by: Artemy Kovalyov <artemyko@mellanox.com> |
| Signed-off-by: Leon Romanovsky <leonro@mellanox.com> |
| Reviewed-by: Jason Gunthorpe <jgg@mellanox.com> |
| Signed-off-by: Jason Gunthorpe <jgg@mellanox.com> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| |
| --- |
| drivers/infiniband/core/umem_odp.c | 21 ++++----------------- |
| 1 file changed, 4 insertions(+), 17 deletions(-) |
| |
| --- a/drivers/infiniband/core/umem_odp.c |
| +++ b/drivers/infiniband/core/umem_odp.c |
| @@ -241,22 +241,10 @@ struct ib_umem_odp *ib_umem_odp_get(stru |
| umem_odp->umem.owning_mm = mm = current->mm; |
| umem_odp->notifier.ops = ops; |
| |
| - umem_odp->page_shift = PAGE_SHIFT; |
| - if (access & IB_ACCESS_HUGETLB) { |
| - struct vm_area_struct *vma; |
| - struct hstate *h; |
| - |
| - down_read(&mm->mmap_sem); |
| - vma = find_vma(mm, ib_umem_start(umem_odp)); |
| - if (!vma || !is_vm_hugetlb_page(vma)) { |
| - up_read(&mm->mmap_sem); |
| - ret = -EINVAL; |
| - goto err_free; |
| - } |
| - h = hstate_vma(vma); |
| - umem_odp->page_shift = huge_page_shift(h); |
| - up_read(&mm->mmap_sem); |
| - } |
| + if (access & IB_ACCESS_HUGETLB) |
| + umem_odp->page_shift = HPAGE_SHIFT; |
| + else |
| + umem_odp->page_shift = PAGE_SHIFT; |
| |
| umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID); |
| ret = ib_init_umem_odp(umem_odp, ops); |
| @@ -266,7 +254,6 @@ struct ib_umem_odp *ib_umem_odp_get(stru |
| |
| err_put_pid: |
| put_pid(umem_odp->tgid); |
| -err_free: |
| kfree(umem_odp); |
| return ERR_PTR(ret); |
| } |