| From 404e5a12691fe797486475fe28cc0b80cb8bef2c Mon Sep 17 00:00:00 2001 |
| From: Shay Drory <shayd@nvidia.com> |
| Date: Thu, 3 Jun 2021 16:19:39 +0300 |
| Subject: RDMA/mlx4: Do not map the core_clock page to user space unless enabled |
| |
| From: Shay Drory <shayd@nvidia.com> |
| |
| commit 404e5a12691fe797486475fe28cc0b80cb8bef2c upstream. |
| |
| Currently when mlx4 maps the hca_core_clock page to the user space there |
| are read-modifiable registers, one of which is semaphore, on this page as |
| well as the clock counter. If user reads the wrong offset, it can modify |
| the semaphore and hang the device. |
| |
| Do not map the hca_core_clock page to the user space unless the device has |
| been put in a backwards compatibility mode to support this feature. |
| |
| After this patch, mlx4 core_clock won't be mapped to user space on the |
| majority of existing devices and the uverbs device time feature in |
| ibv_query_rt_values_ex() will be disabled. |
| |
| Fixes: 52033cfb5aab ("IB/mlx4: Add mmap call to map the hardware clock") |
| Link: https://lore.kernel.org/r/9632304e0d6790af84b3b706d8c18732bc0d5e27.1622726305.git.leonro@nvidia.com |
| Signed-off-by: Shay Drory <shayd@nvidia.com> |
| Signed-off-by: Leon Romanovsky <leonro@nvidia.com> |
| Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| --- |
| drivers/infiniband/hw/mlx4/main.c | 5 +---- |
| drivers/net/ethernet/mellanox/mlx4/fw.c | 3 +++ |
| drivers/net/ethernet/mellanox/mlx4/fw.h | 1 + |
| drivers/net/ethernet/mellanox/mlx4/main.c | 6 ++++++ |
| include/linux/mlx4/device.h | 1 + |
| 5 files changed, 12 insertions(+), 4 deletions(-) |
| |
| --- a/drivers/infiniband/hw/mlx4/main.c |
| +++ b/drivers/infiniband/hw/mlx4/main.c |
| @@ -580,12 +580,9 @@ static int mlx4_ib_query_device(struct i |
| props->cq_caps.max_cq_moderation_count = MLX4_MAX_CQ_COUNT; |
| props->cq_caps.max_cq_moderation_period = MLX4_MAX_CQ_PERIOD; |
| |
| - if (!mlx4_is_slave(dev->dev)) |
| - err = mlx4_get_internal_clock_params(dev->dev, &clock_params); |
| - |
| if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) { |
| resp.response_length += sizeof(resp.hca_core_clock_offset); |
| - if (!err && !mlx4_is_slave(dev->dev)) { |
| + if (!mlx4_get_internal_clock_params(dev->dev, &clock_params)) { |
| resp.comp_mask |= MLX4_IB_QUERY_DEV_RESP_MASK_CORE_CLOCK_OFFSET; |
| resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE; |
| } |
| --- a/drivers/net/ethernet/mellanox/mlx4/fw.c |
| +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c |
| @@ -823,6 +823,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev * |
| #define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0 |
| #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET 0xa8 |
| #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET 0xac |
| +#define QUERY_DEV_CAP_MAP_CLOCK_TO_USER 0xc1 |
| #define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET 0xcc |
| #define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET 0xd0 |
| #define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET 0xd2 |
| @@ -841,6 +842,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev * |
| |
| if (mlx4_is_mfunc(dev)) |
| disable_unsupported_roce_caps(outbox); |
| + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAP_CLOCK_TO_USER); |
| + dev_cap->map_clock_to_user = field & 0x80; |
| MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET); |
| dev_cap->reserved_qps = 1 << (field & 0xf); |
| MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET); |
| --- a/drivers/net/ethernet/mellanox/mlx4/fw.h |
| +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h |
| @@ -131,6 +131,7 @@ struct mlx4_dev_cap { |
| u32 health_buffer_addrs; |
| struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1]; |
| bool wol_port[MLX4_MAX_PORTS + 1]; |
| + bool map_clock_to_user; |
| }; |
| |
| struct mlx4_func_cap { |
| --- a/drivers/net/ethernet/mellanox/mlx4/main.c |
| +++ b/drivers/net/ethernet/mellanox/mlx4/main.c |
| @@ -498,6 +498,7 @@ static int mlx4_dev_cap(struct mlx4_dev |
| } |
| } |
| |
| + dev->caps.map_clock_to_user = dev_cap->map_clock_to_user; |
| dev->caps.uar_page_size = PAGE_SIZE; |
| dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; |
| dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay; |
| @@ -1948,6 +1949,11 @@ int mlx4_get_internal_clock_params(struc |
| if (mlx4_is_slave(dev)) |
| return -EOPNOTSUPP; |
| |
| + if (!dev->caps.map_clock_to_user) { |
| + mlx4_dbg(dev, "Map clock to user is not supported.\n"); |
| + return -EOPNOTSUPP; |
| + } |
| + |
| if (!params) |
| return -EINVAL; |
| |
| --- a/include/linux/mlx4/device.h |
| +++ b/include/linux/mlx4/device.h |
| @@ -630,6 +630,7 @@ struct mlx4_caps { |
| bool wol_port[MLX4_MAX_PORTS + 1]; |
| struct mlx4_rate_limit_caps rl_caps; |
| u32 health_buffer_addrs; |
| + bool map_clock_to_user; |
| }; |
| |
| struct mlx4_buf_list { |