| From 43dacbb1c5257b7a4174a165c3709fd257ef5926 Mon Sep 17 00:00:00 2001 |
| From: Jason Gunthorpe <jgg@mellanox.com> |
| Date: Tue, 10 Mar 2020 11:25:31 +0200 |
| Subject: [PATCH] RDMA/cm: Fix ordering of xa_alloc_cyclic() in |
| ib_create_cm_id() |
| |
| commit e8dc4e885c459343970b25acd9320fe9ee5492e7 upstream. |
| |
| xa_alloc_cyclic() is a SMP release to be paired with some later acquire |
| during xa_load() as part of cm_acquire_id(). |
| |
| As such, xa_alloc_cyclic() must be done after the cm_id is fully |
| initialized, in particular, it absolutely must be after the |
| refcount_set(), otherwise the refcount_inc() in cm_acquire_id() may not |
| see the set. |
| |
| As there are several cases where a reader will be able to use the |
| id.local_id after cm_acquire_id in the IB_CM_IDLE state there needs to be |
| an unfortunate split into a NULL allocate and a finalizing xa_store. |
| |
| Fixes: a977049dacde ("[PATCH] IB: Add the kernel CM implementation") |
| Link: https://lore.kernel.org/r/20200310092545.251365-2-leon@kernel.org |
| Signed-off-by: Leon Romanovsky <leonro@mellanox.com> |
| Signed-off-by: Jason Gunthorpe <jgg@mellanox.com> |
| Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com> |
| |
| diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c |
| index 319e4b4ae639..793fa96944b4 100644 |
| --- a/drivers/infiniband/core/cm.c |
| +++ b/drivers/infiniband/core/cm.c |
| @@ -597,18 +597,6 @@ static int cm_init_av_by_path(struct sa_path_rec *path, |
| return 0; |
| } |
| |
| -static int cm_alloc_id(struct cm_id_private *cm_id_priv) |
| -{ |
| - int err; |
| - u32 id; |
| - |
| - err = xa_alloc_cyclic_irq(&cm.local_id_table, &id, cm_id_priv, |
| - xa_limit_32b, &cm.local_id_next, GFP_KERNEL); |
| - |
| - cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand; |
| - return err; |
| -} |
| - |
| static u32 cm_local_id(__be32 local_id) |
| { |
| return (__force u32) (local_id ^ cm.random_id_operand); |
| @@ -862,6 +850,7 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device, |
| void *context) |
| { |
| struct cm_id_private *cm_id_priv; |
| + u32 id; |
| int ret; |
| |
| cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL); |
| @@ -873,9 +862,6 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device, |
| cm_id_priv->id.cm_handler = cm_handler; |
| cm_id_priv->id.context = context; |
| cm_id_priv->id.remote_cm_qpn = 1; |
| - ret = cm_alloc_id(cm_id_priv); |
| - if (ret) |
| - goto error; |
| |
| spin_lock_init(&cm_id_priv->lock); |
| init_completion(&cm_id_priv->comp); |
| @@ -884,11 +870,20 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device, |
| INIT_LIST_HEAD(&cm_id_priv->altr_list); |
| atomic_set(&cm_id_priv->work_count, -1); |
| atomic_set(&cm_id_priv->refcount, 1); |
| + |
| + ret = xa_alloc_cyclic_irq(&cm.local_id_table, &id, NULL, xa_limit_32b, |
| + &cm.local_id_next, GFP_KERNEL); |
| + if (ret) |
| + goto error; |
| + cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand; |
| + xa_store_irq(&cm.local_id_table, cm_local_id(cm_id_priv->id.local_id), |
| + cm_id_priv, GFP_KERNEL); |
| + |
| return &cm_id_priv->id; |
| |
| error: |
| kfree(cm_id_priv); |
| - return ERR_PTR(-ENOMEM); |
| + return ERR_PTR(ret); |
| } |
| EXPORT_SYMBOL(ib_create_cm_id); |
| |
| -- |
| 2.7.4 |
| |