| From 840ed120ff1bcde7cbbe8a7005800e07211492bf Mon Sep 17 00:00:00 2001 |
| From: Sasha Levin <sashal@kernel.org> |
| Date: Tue, 28 Aug 2018 14:45:29 +0300 |
| Subject: IB/mlx5: Don't hold spin lock while checking device state |
| |
| From: Parav Pandit <parav@mellanox.com> |
| |
| [ Upstream commit 6c75520f7e5a6a353f3b332509d205e213d05855 ] |
| |
| mdev->state device state is not protected by the QP for which WRs are |
| being processed. Therefore, there is no need to hold spin lock while |
| checking mdev state. |
| |
| Given that device fatal error is unlikely situation, wrap the condition |
| check with unlikely(). |
| |
| Additionally, kernel QP1 is also a kernel ULP for which soft CQEs needs |
| to be generated. Therefore, check for device fatal error before |
| processing QP1 work requests. |
| |
| Fixes: 89ea94a7b6c4 ("IB/mlx5: Reset flow support for IB kernel ULPs") |
| Signed-off-by: Parav Pandit <parav@mellanox.com> |
| Reviewed-by: Daniel Jurgens <danielj@mellanox.com> |
| Reviewed-by: Maor Gottlieb <maorg@mellanox.com> |
| Signed-off-by: Leon Romanovsky <leonro@mellanox.com> |
| Signed-off-by: Jason Gunthorpe <jgg@mellanox.com> |
| Signed-off-by: Sasha Levin <sashal@kernel.org> |
| --- |
| drivers/infiniband/hw/mlx5/qp.c | 26 ++++++++++++-------------- |
| 1 file changed, 12 insertions(+), 14 deletions(-) |
| |
| diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c |
| index 2e7230392a498..ef0f710587ad8 100644 |
| --- a/drivers/infiniband/hw/mlx5/qp.c |
| +++ b/drivers/infiniband/hw/mlx5/qp.c |
| @@ -4407,6 +4407,12 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, |
| u8 next_fence = 0; |
| u8 fence; |
| |
| + if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR && |
| + !drain)) { |
| + *bad_wr = wr; |
| + return -EIO; |
| + } |
| + |
| if (unlikely(ibqp->qp_type == IB_QPT_GSI)) |
| return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr); |
| |
| @@ -4416,13 +4422,6 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, |
| |
| spin_lock_irqsave(&qp->sq.lock, flags); |
| |
| - if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR && !drain) { |
| - err = -EIO; |
| - *bad_wr = wr; |
| - nreq = 0; |
| - goto out; |
| - } |
| - |
| for (nreq = 0; wr; nreq++, wr = wr->next) { |
| if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) { |
| mlx5_ib_warn(dev, "\n"); |
| @@ -4737,18 +4736,17 @@ static int _mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, |
| int ind; |
| int i; |
| |
| + if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR && |
| + !drain)) { |
| + *bad_wr = wr; |
| + return -EIO; |
| + } |
| + |
| if (unlikely(ibqp->qp_type == IB_QPT_GSI)) |
| return mlx5_ib_gsi_post_recv(ibqp, wr, bad_wr); |
| |
| spin_lock_irqsave(&qp->rq.lock, flags); |
| |
| - if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR && !drain) { |
| - err = -EIO; |
| - *bad_wr = wr; |
| - nreq = 0; |
| - goto out; |
| - } |
| - |
| ind = qp->rq.head & (qp->rq.wqe_cnt - 1); |
| |
| for (nreq = 0; wr; nreq++, wr = wr->next) { |
| -- |
| 2.20.1 |
| |