]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
RDMA/vmw_pvrdma: Do not re-calculate npages
authorYuval Shaia <yuval.shaia@oracle.com>
Sun, 26 Nov 2017 11:51:35 +0000 (13:51 +0200)
committerJason Gunthorpe <jgg@mellanox.com>
Mon, 11 Dec 2017 23:19:42 +0000 (16:19 -0700)
There is no need to re-calculate the number of pages since it is already
done in ib_umem_get.

Signed-off-by: Yuval Shaia <yuval.shaia@oracle.com>
Acked-by: Adit Ranadive <aditr@vmware.com>
Tested-by: Adit Ranadive <aditr@vmware.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c

index 8519f3212e52b10b3bb883a2950bec9ed64d48bf..fa96fa4fb82990bc808ecd8390bd2915e8dc534a 100644 (file)
@@ -119,10 +119,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
        union pvrdma_cmd_resp rsp;
        struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
        struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
-       int nchunks;
        int ret;
-       int entry;
-       struct scatterlist *sg;
 
        if (length == 0 || length > dev->dsr->caps.max_mr_size) {
                dev_warn(&dev->pdev->dev, "invalid mem region length\n");
@@ -137,13 +134,9 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
                return ERR_CAST(umem);
        }
 
-       nchunks = 0;
-       for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry)
-               nchunks += sg_dma_len(sg) >> PAGE_SHIFT;
-
-       if (nchunks < 0 || nchunks > PVRDMA_PAGE_DIR_MAX_PAGES) {
+       if (umem->npages < 0 || umem->npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
                dev_warn(&dev->pdev->dev, "overflow %d pages in mem region\n",
-                        nchunks);
+                        umem->npages);
                ret = -EINVAL;
                goto err_umem;
        }
@@ -158,7 +151,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
        mr->mmr.size = length;
        mr->umem = umem;
 
-       ret = pvrdma_page_dir_init(dev, &mr->pdir, nchunks, false);
+       ret = pvrdma_page_dir_init(dev, &mr->pdir, umem->npages, false);
        if (ret) {
                dev_warn(&dev->pdev->dev,
                         "could not allocate page directory\n");
@@ -175,7 +168,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
        cmd->length = length;
        cmd->pd_handle = to_vpd(pd)->pd_handle;
        cmd->access_flags = access_flags;
-       cmd->nchunks = nchunks;
+       cmd->nchunks = umem->npages;
        cmd->pdir_dma = mr->pdir.dir_dma;
 
        ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);