]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
RDMA/mlx5: Fix MR npages calculation for IB_ACCESS_HUGETLB
authorJason Gunthorpe <jgg@mellanox.com>
Thu, 15 Aug 2019 08:38:30 +0000 (11:38 +0300)
committerDoug Ledford <dledford@redhat.com>
Tue, 20 Aug 2019 17:44:43 +0000 (13:44 -0400)
When ODP is enabled with IB_ACCESS_HUGETLB then the required pages
should be calculated based on the extent of the MR, which is rounded
to the nearest huge page alignment.

Fixes: d2183c6f1958 ("RDMA/umem: Move page_shift from ib_umem to ib_odp_umem")
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Link: https://lore.kernel.org/r/20190815083834.9245-5-leon@kernel.org
Signed-off-by: Doug Ledford <dledford@redhat.com>
drivers/infiniband/core/umem.c
drivers/infiniband/hw/mlx5/mem.c

index 08da840ed7eebc151b67a61b15626d90578dea54..56553668256f5e9279de8676ce3f2d443391db30 100644 (file)
@@ -379,14 +379,9 @@ EXPORT_SYMBOL(ib_umem_release);
 
 int ib_umem_page_count(struct ib_umem *umem)
 {
-       int i;
-       int n;
+       int i, n = 0;
        struct scatterlist *sg;
 
-       if (umem->is_odp)
-               return ib_umem_num_pages(umem);
-
-       n = 0;
        for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
                n += sg_dma_len(sg) >> PAGE_SHIFT;
 
index fe1a76d8531cee8b5d7e347c54fdbc1c51caaac3..a40e0abf233809c9c168b0a58d9e7f2b6c076614 100644 (file)
@@ -57,9 +57,10 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
        int entry;
 
        if (umem->is_odp) {
-               unsigned int page_shift = to_ib_umem_odp(umem)->page_shift;
+               struct ib_umem_odp *odp = to_ib_umem_odp(umem);
+               unsigned int page_shift = odp->page_shift;
 
-               *ncont = ib_umem_page_count(umem);
+               *ncont = ib_umem_odp_num_pages(odp);
                *count = *ncont << (page_shift - PAGE_SHIFT);
                *shift = page_shift;
                if (order)