IB/mlx5: Flow through a more detailed return code from get_prefetchable_mr()
authorJason Gunthorpe <jgg@nvidia.com>
Fri, 3 Sep 2021 08:48:15 +0000 (16:48 +0800)
committerJason Gunthorpe <jgg@nvidia.com>
Fri, 1 Oct 2021 14:40:07 +0000 (11:40 -0300)
The error returns for various cases detected by get_prefetchable_mr() get
confused as it flows back to userspace. Properly label each error path and
flow the error code properly back to the system call.

Link: https://lore.kernel.org/r/20210928170846.GA1721590@nvidia.com
Suggested-by: Li Zhijian <lizhijian@cn.fujitsu.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/hw/mlx5/odp.c

index d0d98e584ebcc312b6f890e5330f16ccb4d73e7f..77890a85fc2dd32d1b1d9f4f373aadf4b30726ad 100644 (file)
@@ -1708,20 +1708,26 @@ get_prefetchable_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
 
        xa_lock(&dev->odp_mkeys);
        mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(lkey));
-       if (!mmkey || mmkey->key != lkey || mmkey->type != MLX5_MKEY_MR)
+       if (!mmkey || mmkey->key != lkey) {
+               mr = ERR_PTR(-ENOENT);
                goto end;
+       }
+       if (mmkey->type != MLX5_MKEY_MR) {
+               mr = ERR_PTR(-EINVAL);
+               goto end;
+       }
 
        mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
 
        if (mr->ibmr.pd != pd) {
-               mr = NULL;
+               mr = ERR_PTR(-EPERM);
                goto end;
        }
 
        /* prefetch with write-access must be supported by the MR */
        if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
            !mr->umem->writable) {
-               mr = NULL;
+               mr = ERR_PTR(-EPERM);
                goto end;
        }
 
@@ -1753,7 +1759,7 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *w)
        destroy_prefetch_work(work);
 }
 
-static bool init_prefetch_work(struct ib_pd *pd,
+static int init_prefetch_work(struct ib_pd *pd,
                               enum ib_uverbs_advise_mr_advice advice,
                               u32 pf_flags, struct prefetch_mr_work *work,
                               struct ib_sge *sg_list, u32 num_sge)
@@ -1764,17 +1770,19 @@ static bool init_prefetch_work(struct ib_pd *pd,
        work->pf_flags = pf_flags;
 
        for (i = 0; i < num_sge; ++i) {
-               work->frags[i].io_virt = sg_list[i].addr;
-               work->frags[i].length = sg_list[i].length;
-               work->frags[i].mr =
-                       get_prefetchable_mr(pd, advice, sg_list[i].lkey);
-               if (!work->frags[i].mr) {
+               struct mlx5_ib_mr *mr;
+
+               mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
+               if (IS_ERR(mr)) {
                        work->num_sge = i;
-                       return false;
+                       return PTR_ERR(mr);
                }
+               work->frags[i].io_virt = sg_list[i].addr;
+               work->frags[i].length = sg_list[i].length;
+               work->frags[i].mr = mr;
        }
        work->num_sge = num_sge;
-       return true;
+       return 0;
 }
 
 static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
@@ -1790,8 +1798,8 @@ static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
                struct mlx5_ib_mr *mr;
 
                mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
-               if (!mr)
-                       return -ENOENT;
+               if (IS_ERR(mr))
+                       return PTR_ERR(mr);
                ret = pagefault_mr(mr, sg_list[i].addr, sg_list[i].length,
                                   &bytes_mapped, pf_flags);
                if (ret < 0) {
@@ -1811,6 +1819,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
 {
        u32 pf_flags = 0;
        struct prefetch_mr_work *work;
+       int rc;
 
        if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH)
                pf_flags |= MLX5_PF_FLAGS_DOWNGRADE;
@@ -1826,9 +1835,10 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
        if (!work)
                return -ENOMEM;
 
-       if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) {
+       rc = init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge);
+       if (rc) {
                destroy_prefetch_work(work);
-               return -EINVAL;
+               return rc;
        }
        queue_work(system_unbound_wq, &work->work);
        return 0;