| From 697848045218a25e85ed2e7dc4d984dcff9c69b4 Mon Sep 17 00:00:00 2001 |
| From: Danit Goldberg <danitg@mellanox.com> |
| Date: Mon, 16 Sep 2019 09:48:17 +0300 |
| Subject: [PATCH] IB/mlx5: Use the original address for the page during |
| free_pages |
| |
| commit 130c2c576e75efaea9cd321ec4b171cc93cd0030 upstream. |
| |
| The removal of 'buffer' in the patch below caused free_page() to use a |
| value that had been offset since the wqe pointer is adjusted while the |
| routine runs. |
| |
| The current implementation of free_pages() rounds down to a pfn, |
| discarding the adjustment, but this is not the right way to use the |
| API. Preserve the initial value and use it for free_page(). |
| |
| Fixes: 0f51427bd097 ("RDMA/mlx5: Cleanup WQE page fault handler") |
| Link: https://lore.kernel.org/r/20190916064818.19823-2-leon@kernel.org |
| Signed-off-by: Danit Goldberg <danitg@mellanox.com> |
| Reviewed-by: Yishai Hadas <yishaih@mellanox.com> |
| Signed-off-by: Leon Romanovsky <leonro@mellanox.com> |
| Reviewed-by: Jason Gunthorpe <jgg@mellanox.com> |
| Signed-off-by: Jason Gunthorpe <jgg@mellanox.com> |
| Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com> |
| |
| diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c |
| index fda3dfd6f87b..e0ecb540c67e 100644 |
| --- a/drivers/infiniband/hw/mlx5/odp.c |
| +++ b/drivers/infiniband/hw/mlx5/odp.c |
| @@ -1205,7 +1205,7 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev, |
| { |
| bool sq = pfault->type & MLX5_PFAULT_REQUESTOR; |
| u16 wqe_index = pfault->wqe.wqe_index; |
| - void *wqe = NULL, *wqe_end = NULL; |
| + void *wqe, *wqe_start = NULL, *wqe_end = NULL; |
| u32 bytes_mapped, total_wqe_bytes; |
| struct mlx5_core_rsc_common *res; |
| int resume_with_error = 1; |
| @@ -1226,12 +1226,13 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev, |
| goto resolve_page_fault; |
| } |
| |
| - wqe = (void *)__get_free_page(GFP_KERNEL); |
| - if (!wqe) { |
| + wqe_start = (void *)__get_free_page(GFP_KERNEL); |
| + if (!wqe_start) { |
| mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n"); |
| goto resolve_page_fault; |
| } |
| |
| + wqe = wqe_start; |
| qp = (res->res == MLX5_RES_QP) ? res_to_qp(res) : NULL; |
| if (qp && sq) { |
| ret = mlx5_ib_read_user_wqe_sq(qp, wqe_index, wqe, PAGE_SIZE, |
| @@ -1286,7 +1287,7 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev, |
| pfault->wqe.wq_num, resume_with_error, |
| pfault->type); |
| mlx5_core_res_put(res); |
| - free_page((unsigned long)wqe); |
| + free_page((unsigned long)wqe_start); |
| } |
| |
| static int pages_in_range(u64 address, u32 length) |
| -- |
| 2.27.0 |
| |