]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
RDMA/umem: Minor optimizations
authorDoug Ledford <dledford@redhat.com>
Fri, 21 Sep 2018 15:30:12 +0000 (11:30 -0400)
committerJason Gunthorpe <jgg@mellanox.com>
Tue, 25 Sep 2018 21:19:06 +0000 (15:19 -0600)
Noticed while reviewing commit d4b4dd1b9706 ("RDMA/umem: Do not use
current->tgid to track the mm_struct") patch.  Why would we take a lock,
adjust a protected variable, drop the lock, and *then* check the input
into our protected variable adjustment?  Then we have to take the lock
again on our error unwind.  Let's just check the input early and skip
taking the locks needlessly if the input isn't valid.

It was also noticed that we set mm = current->mm, we then never modify
mm, but we still go back and reference current->mm a number of times
needlessly.  Be consistent in using the stored reference in mm.

Signed-off-by: Doug Ledford <dledford@redhat.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
drivers/infiniband/core/umem.c

index fec5d489e311c9e4813b3fe3f000dd26d6145d55..1886d77099117d5d022bf682c60a0073e3c1a846 100644 (file)
@@ -152,6 +152,10 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
                umem->hugetlb = 0;
 
        npages = ib_umem_num_pages(umem);
+       if (npages == 0 || npages > UINT_MAX) {
+               ret = -EINVAL;
+               goto out;
+       }
 
        lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 
@@ -166,11 +170,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
 
        cur_base = addr & PAGE_MASK;
 
-       if (npages == 0 || npages > UINT_MAX) {
-               ret = -EINVAL;
-               goto vma;
-       }
-
        ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL);
        if (ret)
                goto vma;
@@ -224,9 +223,9 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
 umem_release:
        __ib_umem_release(context->device, umem, 0);
 vma:
-       down_write(&current->mm->mmap_sem);
-       current->mm->pinned_vm -= ib_umem_num_pages(umem);
-       up_write(&current->mm->mmap_sem);
+       down_write(&mm->mmap_sem);
+       mm->pinned_vm -= ib_umem_num_pages(umem);
+       up_write(&mm->mmap_sem);
 out:
        if (vma_list)
                free_page((unsigned long) vma_list);