]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
mm/hmm: Use hmm_mirror not mm as an argument for hmm_range_register
authorJason Gunthorpe <jgg@mellanox.com>
Thu, 23 May 2019 12:41:19 +0000 (09:41 -0300)
committerJason Gunthorpe <jgg@mellanox.com>
Mon, 10 Jun 2019 13:10:30 +0000 (10:10 -0300)
Ralph observes that hmm_range_register() can only be called by a driver
while a mirror is registered. Make this clear in the API by passing in the
mirror structure as a parameter.

This also simplifies understanding the lifetime model for struct hmm, as
the hmm pointer must be valid as part of a registered mirror so all we
need in hmm_register_range() is a simple kref_get.

Suggested-by: Ralph Campbell <rcampbell@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Reviewed-by: John Hubbard <jhubbard@nvidia.com>
Reviewed-by: Ralph Campbell <rcampbell@nvidia.com>
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Tested-by: Philip Yang <Philip.Yang@amd.com>
drivers/gpu/drm/nouveau/nouveau_svm.c
include/linux/hmm.h
mm/hmm.c

index 93ed43c413f0bbff28c393675b527376518c4319..8c92374afcf227980d659748052fad072173b773 100644 (file)
@@ -649,7 +649,7 @@ nouveau_svm_fault(struct nvif_notify *notify)
                range.values = nouveau_svm_pfn_values;
                range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT;
 again:
-               ret = hmm_vma_fault(&range, true);
+               ret = hmm_vma_fault(&svmm->mirror, &range, true);
                if (ret == 0) {
                        mutex_lock(&svmm->mutex);
                        if (!hmm_vma_range_done(&range)) {
index cb01cf1fa3c08bc7f53a03a24b27b21def7aaf7b..1fba6979adf4609f5d44e3a2dae5136fdbcfe980 100644 (file)
@@ -496,7 +496,7 @@ static inline bool hmm_mirror_mm_is_alive(struct hmm_mirror *mirror)
  * Please see Documentation/vm/hmm.rst for how to use the range API.
  */
 int hmm_range_register(struct hmm_range *range,
-                      struct mm_struct *mm,
+                      struct hmm_mirror *mirror,
                       unsigned long start,
                       unsigned long end,
                       unsigned page_shift);
@@ -532,7 +532,8 @@ static inline bool hmm_vma_range_done(struct hmm_range *range)
 }
 
 /* This is a temporary helper to avoid merge conflict between trees. */
-static inline int hmm_vma_fault(struct hmm_range *range, bool block)
+static inline int hmm_vma_fault(struct hmm_mirror *mirror,
+                               struct hmm_range *range, bool block)
 {
        long ret;
 
@@ -545,7 +546,7 @@ static inline int hmm_vma_fault(struct hmm_range *range, bool block)
        range->default_flags = 0;
        range->pfn_flags_mask = -1UL;
 
-       ret = hmm_range_register(range, range->vma->vm_mm,
+       ret = hmm_range_register(range, mirror,
                                 range->start, range->end,
                                 PAGE_SHIFT);
        if (ret)
index f6956d78e3cb2566fa59075c91a1165ee4d86028..22a97ada108b4e6fa7114292265c8b773a5b1b33 100644 (file)
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -914,13 +914,13 @@ static void hmm_pfns_clear(struct hmm_range *range,
  * Track updates to the CPU page table see include/linux/hmm.h
  */
 int hmm_range_register(struct hmm_range *range,
-                      struct mm_struct *mm,
+                      struct hmm_mirror *mirror,
                       unsigned long start,
                       unsigned long end,
                       unsigned page_shift)
 {
        unsigned long mask = ((1UL << page_shift) - 1UL);
-       struct hmm *hmm;
+       struct hmm *hmm = mirror->hmm;
 
        range->valid = false;
        range->hmm = NULL;
@@ -934,20 +934,15 @@ int hmm_range_register(struct hmm_range *range,
        range->start = start;
        range->end = end;
 
-       hmm = hmm_get_or_create(mm);
-       if (!hmm)
-               return -EFAULT;
-
        /* Check if hmm_mm_destroy() was call. */
-       if (hmm->mm == NULL || hmm->dead) {
-               hmm_put(hmm);
+       if (hmm->mm == NULL || hmm->dead)
                return -EFAULT;
-       }
 
        /* Initialize range to track CPU page table updates. */
        mutex_lock(&hmm->lock);
 
        range->hmm = hmm;
+       kref_get(&hmm->kref);
        list_add_rcu(&range->list, &hmm->ranges);
 
        /*