]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - mm/memory.c
drm/prime: Ditch gem_prime_res_obj hook
[linux.git] / mm / memory.c
index 53bd595798617a2bcab2f483b93bacb0464943cb..e2bb51b6242e5814896b79cce637008f73a073bb 100644 (file)
@@ -571,8 +571,8 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
  * PFNMAP mappings in order to support COWable mappings.
  *
  */
-struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
-                            pte_t pte, bool with_public_device)
+struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
+                           pte_t pte)
 {
        unsigned long pfn = pte_pfn(pte);
 
@@ -585,29 +585,6 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
                        return NULL;
                if (is_zero_pfn(pfn))
                        return NULL;
-
-               /*
-                * Device public pages are special pages (they are ZONE_DEVICE
-                * pages but different from persistent memory). They behave
-                * allmost like normal pages. The difference is that they are
-                * not on the lru and thus should never be involve with any-
-                * thing that involve lru manipulation (mlock, numa balancing,
-                * ...).
-                *
-                * This is why we still want to return NULL for such page from
-                * vm_normal_page() so that we do not have to special case all
-                * call site of vm_normal_page().
-                */
-               if (likely(pfn <= highest_memmap_pfn)) {
-                       struct page *page = pfn_to_page(pfn);
-
-                       if (is_device_public_page(page)) {
-                               if (with_public_device)
-                                       return page;
-                               return NULL;
-                       }
-               }
-
                if (pte_devmap(pte))
                        return NULL;
 
@@ -797,17 +774,6 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                rss[mm_counter(page)]++;
        } else if (pte_devmap(pte)) {
                page = pte_page(pte);
-
-               /*
-                * Cache coherent device memory behave like regular page and
-                * not like persistent memory page. For more informations see
-                * MEMORY_DEVICE_CACHE_COHERENT in memory_hotplug.h
-                */
-               if (is_device_public_page(page)) {
-                       get_page(page);
-                       page_dup_rmap(page, false);
-                       rss[mm_counter(page)]++;
-               }
        }
 
 out_set_pte:
@@ -1063,7 +1029,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
                if (pte_present(ptent)) {
                        struct page *page;
 
-                       page = _vm_normal_page(vma, addr, ptent, true);
+                       page = vm_normal_page(vma, addr, ptent);
                        if (unlikely(details) && page) {
                                /*
                                 * unmap_shared_mapping_pages() wants to
@@ -2777,13 +2743,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
                        migration_entry_wait(vma->vm_mm, vmf->pmd,
                                             vmf->address);
                } else if (is_device_private_entry(entry)) {
-                       /*
-                        * For un-addressable device memory we call the pgmap
-                        * fault handler callback. The callback must migrate
-                        * the page back to some CPU accessible page.
-                        */
-                       ret = device_private_entry_fault(vma, vmf->address, entry,
-                                                vmf->flags, vmf->pmd);
+                       vmf->page = device_private_entry_to_page(entry);
+                       ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
                } else if (is_hwpoison_entry(entry)) {
                        ret = VM_FAULT_HWPOISON;
                } else {
@@ -3201,19 +3162,6 @@ static vm_fault_t pte_alloc_one_map(struct vm_fault *vmf)
 }
 
 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
-
-#define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1)
-static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
-               unsigned long haddr)
-{
-       if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) !=
-                       (vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK))
-               return false;
-       if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
-               return false;
-       return true;
-}
-
 static void deposit_prealloc_pte(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;