From c1d383b5785b1e0fb5fb862864712a7208219e6a Mon Sep 17 00:00:00 2001 From: Guy Shapiro Date: Wed, 15 Apr 2015 18:17:56 +0300 Subject: [PATCH] IB/core: dma map/unmap locking optimizations Currently, while mapping or unmapping pages for ODP, the umem mutex is locked and unlocked once for each page. Such lock/unlock operation take few tens to hundreds of nsecs. This makes a significant impact when mapping or unmapping few MBs of memory. To avoid this, the mutex should be locked only once per operation, and not per page. Signed-off-by: Guy Shapiro Acked-by: Shachar Raindel Reviewed-by: Sagi Grimberg Signed-off-by: Doug Ledford --- drivers/infiniband/core/umem_odp.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index 8b8cc6fa0ab0..aba47398880d 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -446,7 +446,6 @@ static int ib_umem_odp_map_dma_single_page( int remove_existing_mapping = 0; int ret = 0; - mutex_lock(&umem->odp_data->umem_mutex); /* * Note: we avoid writing if seq is different from the initial seq, to * handle case of a racing notifier. This check also allows us to bail @@ -479,8 +478,6 @@ static int ib_umem_odp_map_dma_single_page( } out: - mutex_unlock(&umem->odp_data->umem_mutex); - /* On Demand Paging - avoid pinning the page */ if (umem->context->invalidate_range || !stored_page) put_page(page); @@ -586,6 +583,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt, bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt); user_virt += npages << PAGE_SHIFT; + mutex_lock(&umem->odp_data->umem_mutex); for (j = 0; j < npages; ++j) { ret = ib_umem_odp_map_dma_single_page( umem, k, base_virt_addr, local_page_list[j], @@ -594,6 +592,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt, break; k++; } + mutex_unlock(&umem->odp_data->umem_mutex); if (ret < 0) { /* Release left over pages when handling errors. */ @@ -633,9 +632,9 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt, * faults from completion. We might be racing with other * invalidations, so we must make sure we free each page only * once. */ + mutex_lock(&umem->odp_data->umem_mutex); for (addr = virt; addr < bound; addr += (u64)umem->page_size) { idx = (addr - ib_umem_start(umem)) / PAGE_SIZE; - mutex_lock(&umem->odp_data->umem_mutex); if (umem->odp_data->page_list[idx]) { struct page *page = umem->odp_data->page_list[idx]; struct page *head_page = compound_head(page); @@ -663,7 +662,7 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt, umem->odp_data->page_list[idx] = NULL; umem->odp_data->dma_list[idx] = 0; } - mutex_unlock(&umem->odp_data->umem_mutex); } + mutex_unlock(&umem->odp_data->umem_mutex); } EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages); -- 2.45.2