blob: a966af33a8ad0f729192e2f7bc8ba4f363663ea8 [file] [log] [blame]
From 83327e24af0bde71b5325006ce4cc22938ed2e3a Mon Sep 17 00:00:00 2001
From: Yuval Basson <ybason@marvell.com>
Date: Wed, 8 Jul 2020 22:55:26 +0300
Subject: [PATCH] RDMA/qedr: SRQ's bug fixes
commit acca72e2b031b9fbb4184511072bd246a0abcebc upstream.
QP's with the same SRQ, working on different CQs and running in parallel
on different CPUs could lead to a race when maintaining the SRQ consumer
count, and leads to FW running out of SRQs. Update the consumer
atomically. Make sure the wqe_prod is updated after the sge_prod due to
FW requirements.
Fixes: 3491c9e799fb ("qedr: Add support for kernel mode SRQ's")
Link: https://lore.kernel.org/r/20200708195526.31040-1-ybason@marvell.com
Signed-off-by: Michal Kalderon <mkalderon@marvell.com>
Signed-off-by: Yuval Basson <ybason@marvell.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 8a9b0ee7dbc5..bbf72944b720 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -347,10 +347,10 @@ struct qedr_srq_hwq_info {
u32 wqe_prod;
u32 sge_prod;
u32 wr_prod_cnt;
- u32 wr_cons_cnt;
+ atomic_t wr_cons_cnt;
u32 num_elems;
- u32 *virt_prod_pair_addr;
+ struct rdma_srq_producers *virt_prod_pair_addr;
dma_addr_t phy_prod_pair_addr;
};
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 2b1ec2d43e74..9d3204410ead 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -3497,7 +3497,7 @@ static u32 qedr_srq_elem_left(struct qedr_srq_hwq_info *hw_srq)
* count and consumer count and subtract it from max
* work request supported so that we get elements left.
*/
- used = hw_srq->wr_prod_cnt - hw_srq->wr_cons_cnt;
+ used = hw_srq->wr_prod_cnt - (u32)atomic_read(&hw_srq->wr_cons_cnt);
return hw_srq->max_wr - used;
}
@@ -3512,7 +3512,6 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
unsigned long flags;
int status = 0;
u32 num_sge;
- u32 offset;
spin_lock_irqsave(&srq->lock, flags);
@@ -3525,7 +3524,8 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
if (!qedr_srq_elem_left(hw_srq) ||
wr->num_sge > srq->hw_srq.max_sges) {
DP_ERR(dev, "Can't post WR (%d,%d) || (%d > %d)\n",
- hw_srq->wr_prod_cnt, hw_srq->wr_cons_cnt,
+ hw_srq->wr_prod_cnt,
+ atomic_read(&hw_srq->wr_cons_cnt),
wr->num_sge, srq->hw_srq.max_sges);
status = -ENOMEM;
*bad_wr = wr;
@@ -3559,22 +3559,20 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
hw_srq->sge_prod++;
}
- /* Flush WQE and SGE information before
+ /* Update WQE and SGE information before
* updating producer.
*/
- wmb();
+ dma_wmb();
/* SRQ producer is 8 bytes. Need to update SGE producer index
* in first 4 bytes and need to update WQE producer in
* next 4 bytes.
*/
- *srq->hw_srq.virt_prod_pair_addr = hw_srq->sge_prod;
- offset = offsetof(struct rdma_srq_producers, wqe_prod);
- *((u8 *)srq->hw_srq.virt_prod_pair_addr + offset) =
- hw_srq->wqe_prod;
+ srq->hw_srq.virt_prod_pair_addr->sge_prod = hw_srq->sge_prod;
+ /* Make sure sge producer is updated first */
+ dma_wmb();
+ srq->hw_srq.virt_prod_pair_addr->wqe_prod = hw_srq->wqe_prod;
- /* Flush producer after updating it. */
- wmb();
wr = wr->next;
}
@@ -3993,7 +3991,7 @@ static int process_resp_one_srq(struct qedr_dev *dev, struct qedr_qp *qp,
} else {
__process_resp_one(dev, qp, cq, wc, resp, wr_id);
}
- srq->hw_srq.wr_cons_cnt++;
+ atomic_inc(&srq->hw_srq.wr_cons_cnt);
return 1;
}
--
2.27.0