2 * Copyright 2013 Red Hat Inc.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * Authors: Jérôme Glisse <jglisse@redhat.com>
17 * Refer to include/linux/hmm.h for information about heterogeneous memory
18 * management or HMM for short.
21 #include <linux/hmm.h>
22 #include <linux/init.h>
23 #include <linux/rmap.h>
24 #include <linux/swap.h>
25 #include <linux/slab.h>
26 #include <linux/sched.h>
27 #include <linux/mmzone.h>
28 #include <linux/pagemap.h>
29 #include <linux/swapops.h>
30 #include <linux/hugetlb.h>
31 #include <linux/memremap.h>
32 #include <linux/jump_label.h>
33 #include <linux/mmu_notifier.h>
34 #include <linux/memory_hotplug.h>
36 #define PA_SECTION_SIZE (1UL << PA_SECTION_SHIFT)
38 #if defined(CONFIG_DEVICE_PRIVATE) || defined(CONFIG_DEVICE_PUBLIC)
40 * Device private memory see HMM (Documentation/vm/hmm.txt) or hmm.h
42 DEFINE_STATIC_KEY_FALSE(device_private_key);
43 EXPORT_SYMBOL(device_private_key);
44 #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
47 #if IS_ENABLED(CONFIG_HMM_MIRROR)
48 static const struct mmu_notifier_ops hmm_mmu_notifier_ops;
51 * struct hmm - HMM per mm struct
53 * @mm: mm struct this HMM struct is bound to
54 * @lock: lock protecting ranges list
55 * @sequence: we track updates to the CPU page table with a sequence number
56 * @ranges: list of range being snapshotted
57 * @mirrors: list of mirrors for this mm
58 * @mmu_notifier: mmu notifier to track updates to CPU page table
59 * @mirrors_sem: read/write semaphore protecting the mirrors list
65 struct list_head ranges;
66 struct list_head mirrors;
67 struct mmu_notifier mmu_notifier;
68 struct rw_semaphore mirrors_sem;
72 * hmm_register - register HMM against an mm (HMM internal)
74 * @mm: mm struct to attach to
76 * This is not intended to be used directly by device drivers. It allocates an
77 * HMM struct if mm does not have one, and initializes it.
79 static struct hmm *hmm_register(struct mm_struct *mm)
81 struct hmm *hmm = READ_ONCE(mm->hmm);
85 * The hmm struct can only be freed once the mm_struct goes away,
86 * hence we should always have pre-allocated an new hmm struct
92 hmm = kmalloc(sizeof(*hmm), GFP_KERNEL);
95 INIT_LIST_HEAD(&hmm->mirrors);
96 init_rwsem(&hmm->mirrors_sem);
97 atomic_set(&hmm->sequence, 0);
98 hmm->mmu_notifier.ops = NULL;
99 INIT_LIST_HEAD(&hmm->ranges);
100 spin_lock_init(&hmm->lock);
104 * We should only get here if hold the mmap_sem in write mode ie on
105 * registration of first mirror through hmm_mirror_register()
107 hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops;
108 if (__mmu_notifier_register(&hmm->mmu_notifier, mm)) {
113 spin_lock(&mm->page_table_lock);
118 spin_unlock(&mm->page_table_lock);
121 mmu_notifier_unregister(&hmm->mmu_notifier, mm);
128 void hmm_mm_destroy(struct mm_struct *mm)
133 static void hmm_invalidate_range(struct hmm *hmm,
134 enum hmm_update_type action,
138 struct hmm_mirror *mirror;
139 struct hmm_range *range;
141 spin_lock(&hmm->lock);
142 list_for_each_entry(range, &hmm->ranges, list) {
143 unsigned long addr, idx, npages;
145 if (end < range->start || start >= range->end)
148 range->valid = false;
149 addr = max(start, range->start);
150 idx = (addr - range->start) >> PAGE_SHIFT;
151 npages = (min(range->end, end) - addr) >> PAGE_SHIFT;
152 memset(&range->pfns[idx], 0, sizeof(*range->pfns) * npages);
154 spin_unlock(&hmm->lock);
156 down_read(&hmm->mirrors_sem);
157 list_for_each_entry(mirror, &hmm->mirrors, list)
158 mirror->ops->sync_cpu_device_pagetables(mirror, action,
160 up_read(&hmm->mirrors_sem);
163 static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
165 struct hmm_mirror *mirror;
166 struct hmm *hmm = mm->hmm;
168 down_write(&hmm->mirrors_sem);
169 mirror = list_first_entry_or_null(&hmm->mirrors, struct hmm_mirror,
172 list_del_init(&mirror->list);
173 if (mirror->ops->release) {
175 * Drop mirrors_sem so callback can wait on any pending
176 * work that might itself trigger mmu_notifier callback
177 * and thus would deadlock with us.
179 up_write(&hmm->mirrors_sem);
180 mirror->ops->release(mirror);
181 down_write(&hmm->mirrors_sem);
183 mirror = list_first_entry_or_null(&hmm->mirrors,
184 struct hmm_mirror, list);
186 up_write(&hmm->mirrors_sem);
189 static void hmm_invalidate_range_start(struct mmu_notifier *mn,
190 struct mm_struct *mm,
194 struct hmm *hmm = mm->hmm;
198 atomic_inc(&hmm->sequence);
201 static void hmm_invalidate_range_end(struct mmu_notifier *mn,
202 struct mm_struct *mm,
206 struct hmm *hmm = mm->hmm;
210 hmm_invalidate_range(mm->hmm, HMM_UPDATE_INVALIDATE, start, end);
213 static const struct mmu_notifier_ops hmm_mmu_notifier_ops = {
214 .release = hmm_release,
215 .invalidate_range_start = hmm_invalidate_range_start,
216 .invalidate_range_end = hmm_invalidate_range_end,
220 * hmm_mirror_register() - register a mirror against an mm
222 * @mirror: new mirror struct to register
223 * @mm: mm to register against
225 * To start mirroring a process address space, the device driver must register
226 * an HMM mirror struct.
228 * THE mm->mmap_sem MUST BE HELD IN WRITE MODE !
230 int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm)
233 if (!mm || !mirror || !mirror->ops)
237 mirror->hmm = hmm_register(mm);
241 down_write(&mirror->hmm->mirrors_sem);
242 if (mirror->hmm->mm == NULL) {
244 * A racing hmm_mirror_unregister() is about to destroy the hmm
245 * struct. Try again to allocate a new one.
247 up_write(&mirror->hmm->mirrors_sem);
251 list_add(&mirror->list, &mirror->hmm->mirrors);
252 up_write(&mirror->hmm->mirrors_sem);
257 EXPORT_SYMBOL(hmm_mirror_register);
260 * hmm_mirror_unregister() - unregister a mirror
262 * @mirror: new mirror struct to register
264 * Stop mirroring a process address space, and cleanup.
266 void hmm_mirror_unregister(struct hmm_mirror *mirror)
268 bool should_unregister = false;
269 struct mm_struct *mm;
272 if (mirror->hmm == NULL)
276 down_write(&hmm->mirrors_sem);
277 list_del_init(&mirror->list);
278 should_unregister = list_empty(&hmm->mirrors);
282 up_write(&hmm->mirrors_sem);
284 if (!should_unregister || mm == NULL)
287 spin_lock(&mm->page_table_lock);
290 spin_unlock(&mm->page_table_lock);
292 mmu_notifier_unregister_no_release(&hmm->mmu_notifier, mm);
295 EXPORT_SYMBOL(hmm_mirror_unregister);
297 struct hmm_vma_walk {
298 struct hmm_range *range;
305 static int hmm_vma_do_fault(struct mm_walk *walk,
309 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_REMOTE;
310 struct hmm_vma_walk *hmm_vma_walk = walk->private;
311 struct vm_area_struct *vma = walk->vma;
314 flags |= hmm_vma_walk->block ? 0 : FAULT_FLAG_ALLOW_RETRY;
315 flags |= hmm_vma_walk->write ? FAULT_FLAG_WRITE : 0;
316 r = handle_mm_fault(vma, addr, flags);
317 if (r & VM_FAULT_RETRY)
319 if (r & VM_FAULT_ERROR) {
320 *pfn = HMM_PFN_ERROR;
327 static int hmm_pfns_bad(unsigned long addr,
329 struct mm_walk *walk)
331 struct hmm_vma_walk *hmm_vma_walk = walk->private;
332 struct hmm_range *range = hmm_vma_walk->range;
333 uint64_t *pfns = range->pfns;
336 i = (addr - range->start) >> PAGE_SHIFT;
337 for (; addr < end; addr += PAGE_SIZE, i++)
338 pfns[i] = HMM_PFN_ERROR;
343 static void hmm_pfns_clear(uint64_t *pfns,
347 for (; addr < end; addr += PAGE_SIZE, pfns++)
352 * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s)
353 * @start: range virtual start address (inclusive)
354 * @end: range virtual end address (exclusive)
355 * @walk: mm_walk structure
356 * Returns: 0 on success, -EAGAIN after page fault, or page fault error
358 * This function will be called whenever pmd_none() or pte_none() returns true,
359 * or whenever there is no page directory covering the virtual address range.
361 static int hmm_vma_walk_hole(unsigned long addr,
363 struct mm_walk *walk)
365 struct hmm_vma_walk *hmm_vma_walk = walk->private;
366 struct hmm_range *range = hmm_vma_walk->range;
367 uint64_t *pfns = range->pfns;
370 hmm_vma_walk->last = addr;
371 i = (addr - range->start) >> PAGE_SHIFT;
372 for (; addr < end; addr += PAGE_SIZE, i++) {
374 if (hmm_vma_walk->fault) {
377 ret = hmm_vma_do_fault(walk, addr, &pfns[i]);
383 return hmm_vma_walk->fault ? -EAGAIN : 0;
386 static int hmm_vma_walk_pmd(pmd_t *pmdp,
389 struct mm_walk *walk)
391 struct hmm_vma_walk *hmm_vma_walk = walk->private;
392 struct hmm_range *range = hmm_vma_walk->range;
393 struct vm_area_struct *vma = walk->vma;
394 uint64_t *pfns = range->pfns;
395 unsigned long addr = start, i;
399 i = (addr - range->start) >> PAGE_SHIFT;
400 write_fault = hmm_vma_walk->fault & hmm_vma_walk->write;
404 return hmm_vma_walk_hole(start, end, walk);
406 if (pmd_huge(*pmdp) && vma->vm_flags & VM_HUGETLB)
407 return hmm_pfns_bad(start, end, walk);
409 if (pmd_devmap(*pmdp) || pmd_trans_huge(*pmdp)) {
415 * No need to take pmd_lock here, even if some other threads
416 * is splitting the huge pmd we will get that event through
417 * mmu_notifier callback.
419 * So just read pmd value and check again its a transparent
420 * huge or device mapping one and compute corresponding pfn
423 pmd = pmd_read_atomic(pmdp);
425 if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
427 if (pmd_protnone(pmd))
428 return hmm_vma_walk_hole(start, end, walk);
430 if (write_fault && !pmd_write(pmd))
431 return hmm_vma_walk_hole(start, end, walk);
433 pfn = pmd_pfn(pmd) + pte_index(addr);
434 flag |= pmd_write(pmd) ? HMM_PFN_WRITE : 0;
435 for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
436 pfns[i] = hmm_pfn_from_pfn(pfn) | flag;
441 return hmm_pfns_bad(start, end, walk);
443 ptep = pte_offset_map(pmdp, addr);
444 for (; addr < end; addr += PAGE_SIZE, ptep++, i++) {
451 if (hmm_vma_walk->fault)
456 if (!pte_present(pte)) {
457 swp_entry_t entry = pte_to_swp_entry(pte);
459 if (!non_swap_entry(entry)) {
460 if (hmm_vma_walk->fault)
466 * This is a special swap entry, ignore migration, use
467 * device and report anything else as error.
469 if (is_device_private_entry(entry)) {
470 pfns[i] = hmm_pfn_from_pfn(swp_offset(entry));
471 if (is_write_device_private_entry(entry)) {
472 pfns[i] |= HMM_PFN_WRITE;
473 } else if (write_fault)
475 pfns[i] |= HMM_PFN_DEVICE_PRIVATE;
476 } else if (is_migration_entry(entry)) {
477 if (hmm_vma_walk->fault) {
479 hmm_vma_walk->last = addr;
480 migration_entry_wait(vma->vm_mm,
486 /* Report error for everything else */
487 pfns[i] = HMM_PFN_ERROR;
492 if (write_fault && !pte_write(pte))
495 pfns[i] = hmm_pfn_from_pfn(pte_pfn(pte));
496 pfns[i] |= pte_write(pte) ? HMM_PFN_WRITE : 0;
501 /* Fault any virtual address we were asked to fault */
502 return hmm_vma_walk_hole(start, end, walk);
509 static void hmm_pfns_special(struct hmm_range *range)
511 unsigned long addr = range->start, i = 0;
513 for (; addr < range->end; addr += PAGE_SIZE, i++)
514 range->pfns[i] = HMM_PFN_SPECIAL;
518 * hmm_vma_get_pfns() - snapshot CPU page table for a range of virtual addresses
519 * @range: range being snapshotted
520 * Returns: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
521 * vma permission, 0 success
523 * This snapshots the CPU page table for a range of virtual addresses. Snapshot
524 * validity is tracked by range struct. See hmm_vma_range_done() for further
527 * The range struct is initialized here. It tracks the CPU page table, but only
528 * if the function returns success (0), in which case the caller must then call
529 * hmm_vma_range_done() to stop CPU page table update tracking on this range.
531 * NOT CALLING hmm_vma_range_done() IF FUNCTION RETURNS 0 WILL LEAD TO SERIOUS
532 * MEMORY CORRUPTION ! YOU HAVE BEEN WARNED !
534 int hmm_vma_get_pfns(struct hmm_range *range)
536 struct vm_area_struct *vma = range->vma;
537 struct hmm_vma_walk hmm_vma_walk;
538 struct mm_walk mm_walk;
541 /* Sanity check, this really should not happen ! */
542 if (range->start < vma->vm_start || range->start >= vma->vm_end)
544 if (range->end < vma->vm_start || range->end > vma->vm_end)
547 hmm = hmm_register(vma->vm_mm);
550 /* Caller must have registered a mirror, via hmm_mirror_register() ! */
551 if (!hmm->mmu_notifier.ops)
554 /* FIXME support hugetlb fs */
555 if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) {
556 hmm_pfns_special(range);
560 if (!(vma->vm_flags & VM_READ)) {
562 * If vma do not allow read access, then assume that it does
563 * not allow write access, either. Architecture that allow
564 * write without read access are not supported by HMM, because
565 * operations such has atomic access would not work.
567 hmm_pfns_clear(range->pfns, range->start, range->end);
571 /* Initialize range to track CPU page table update */
572 spin_lock(&hmm->lock);
574 list_add_rcu(&range->list, &hmm->ranges);
575 spin_unlock(&hmm->lock);
577 hmm_vma_walk.fault = false;
578 hmm_vma_walk.range = range;
579 mm_walk.private = &hmm_vma_walk;
582 mm_walk.mm = vma->vm_mm;
583 mm_walk.pte_entry = NULL;
584 mm_walk.test_walk = NULL;
585 mm_walk.hugetlb_entry = NULL;
586 mm_walk.pmd_entry = hmm_vma_walk_pmd;
587 mm_walk.pte_hole = hmm_vma_walk_hole;
589 walk_page_range(range->start, range->end, &mm_walk);
592 EXPORT_SYMBOL(hmm_vma_get_pfns);
595 * hmm_vma_range_done() - stop tracking change to CPU page table over a range
596 * @range: range being tracked
597 * Returns: false if range data has been invalidated, true otherwise
599 * Range struct is used to track updates to the CPU page table after a call to
600 * either hmm_vma_get_pfns() or hmm_vma_fault(). Once the device driver is done
601 * using the data, or wants to lock updates to the data it got from those
602 * functions, it must call the hmm_vma_range_done() function, which will then
603 * stop tracking CPU page table updates.
605 * Note that device driver must still implement general CPU page table update
606 * tracking either by using hmm_mirror (see hmm_mirror_register()) or by using
607 * the mmu_notifier API directly.
609 * CPU page table update tracking done through hmm_range is only temporary and
610 * to be used while trying to duplicate CPU page table contents for a range of
613 * There are two ways to use this :
615 * hmm_vma_get_pfns(range); or hmm_vma_fault(...);
616 * trans = device_build_page_table_update_transaction(pfns);
617 * device_page_table_lock();
618 * if (!hmm_vma_range_done(range)) {
619 * device_page_table_unlock();
622 * device_commit_transaction(trans);
623 * device_page_table_unlock();
626 * hmm_vma_get_pfns(range); or hmm_vma_fault(...);
627 * device_page_table_lock();
628 * hmm_vma_range_done(range);
629 * device_update_page_table(range->pfns);
630 * device_page_table_unlock();
632 bool hmm_vma_range_done(struct hmm_range *range)
634 unsigned long npages = (range->end - range->start) >> PAGE_SHIFT;
637 if (range->end <= range->start) {
642 hmm = hmm_register(range->vma->vm_mm);
644 memset(range->pfns, 0, sizeof(*range->pfns) * npages);
648 spin_lock(&hmm->lock);
649 list_del_rcu(&range->list);
650 spin_unlock(&hmm->lock);
654 EXPORT_SYMBOL(hmm_vma_range_done);
657 * hmm_vma_fault() - try to fault some address in a virtual address range
658 * @range: range being faulted
659 * @write: is it a write fault
660 * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
661 * Returns: 0 success, error otherwise (-EAGAIN means mmap_sem have been drop)
663 * This is similar to a regular CPU page fault except that it will not trigger
664 * any memory migration if the memory being faulted is not accessible by CPUs.
666 * On error, for one virtual address in the range, the function will mark the
667 * corresponding HMM pfn entry with an error flag.
669 * Expected use pattern:
671 * down_read(&mm->mmap_sem);
672 * // Find vma and address device wants to fault, initialize hmm_pfn_t
673 * // array accordingly
674 * ret = hmm_vma_fault(range, write, block);
677 * hmm_vma_range_done(range);
678 * // You might want to rate limit or yield to play nicely, you may
679 * // also commit any valid pfn in the array assuming that you are
680 * // getting true from hmm_vma_range_monitor_end()
689 * up_read(&mm->mmap_sem)
692 * // Take device driver lock that serialize device page table update
693 * driver_lock_device_page_table_update();
694 * hmm_vma_range_done(range);
695 * // Commit pfns we got from hmm_vma_fault()
696 * driver_unlock_device_page_table_update();
697 * up_read(&mm->mmap_sem)
699 * YOU MUST CALL hmm_vma_range_done() AFTER THIS FUNCTION RETURN SUCCESS (0)
700 * BEFORE FREEING THE range struct OR YOU WILL HAVE SERIOUS MEMORY CORRUPTION !
702 * YOU HAVE BEEN WARNED !
704 int hmm_vma_fault(struct hmm_range *range, bool write, bool block)
706 struct vm_area_struct *vma = range->vma;
707 unsigned long start = range->start;
708 struct hmm_vma_walk hmm_vma_walk;
709 struct mm_walk mm_walk;
713 /* Sanity check, this really should not happen ! */
714 if (range->start < vma->vm_start || range->start >= vma->vm_end)
716 if (range->end < vma->vm_start || range->end > vma->vm_end)
719 hmm = hmm_register(vma->vm_mm);
721 hmm_pfns_clear(range->pfns, range->start, range->end);
724 /* Caller must have registered a mirror using hmm_mirror_register() */
725 if (!hmm->mmu_notifier.ops)
728 /* FIXME support hugetlb fs */
729 if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) {
730 hmm_pfns_special(range);
734 if (!(vma->vm_flags & VM_READ)) {
736 * If vma do not allow read access, then assume that it does
737 * not allow write access, either. Architecture that allow
738 * write without read access are not supported by HMM, because
739 * operations such has atomic access would not work.
741 hmm_pfns_clear(range->pfns, range->start, range->end);
745 /* Initialize range to track CPU page table update */
746 spin_lock(&hmm->lock);
748 list_add_rcu(&range->list, &hmm->ranges);
749 spin_unlock(&hmm->lock);
751 hmm_vma_walk.fault = true;
752 hmm_vma_walk.write = write;
753 hmm_vma_walk.block = block;
754 hmm_vma_walk.range = range;
755 mm_walk.private = &hmm_vma_walk;
756 hmm_vma_walk.last = range->start;
759 mm_walk.mm = vma->vm_mm;
760 mm_walk.pte_entry = NULL;
761 mm_walk.test_walk = NULL;
762 mm_walk.hugetlb_entry = NULL;
763 mm_walk.pmd_entry = hmm_vma_walk_pmd;
764 mm_walk.pte_hole = hmm_vma_walk_hole;
767 ret = walk_page_range(start, range->end, &mm_walk);
768 start = hmm_vma_walk.last;
769 } while (ret == -EAGAIN);
774 i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
775 hmm_pfns_clear(&range->pfns[i], hmm_vma_walk.last, range->end);
776 hmm_vma_range_done(range);
780 EXPORT_SYMBOL(hmm_vma_fault);
781 #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
784 #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC)
785 struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma,
790 page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
796 EXPORT_SYMBOL(hmm_vma_alloc_locked_page);
799 static void hmm_devmem_ref_release(struct percpu_ref *ref)
801 struct hmm_devmem *devmem;
803 devmem = container_of(ref, struct hmm_devmem, ref);
804 complete(&devmem->completion);
807 static void hmm_devmem_ref_exit(void *data)
809 struct percpu_ref *ref = data;
810 struct hmm_devmem *devmem;
812 devmem = container_of(ref, struct hmm_devmem, ref);
813 percpu_ref_exit(ref);
814 devm_remove_action(devmem->device, &hmm_devmem_ref_exit, data);
817 static void hmm_devmem_ref_kill(void *data)
819 struct percpu_ref *ref = data;
820 struct hmm_devmem *devmem;
822 devmem = container_of(ref, struct hmm_devmem, ref);
823 percpu_ref_kill(ref);
824 wait_for_completion(&devmem->completion);
825 devm_remove_action(devmem->device, &hmm_devmem_ref_kill, data);
828 static int hmm_devmem_fault(struct vm_area_struct *vma,
830 const struct page *page,
834 struct hmm_devmem *devmem = page->pgmap->data;
836 return devmem->ops->fault(devmem, vma, addr, page, flags, pmdp);
839 static void hmm_devmem_free(struct page *page, void *data)
841 struct hmm_devmem *devmem = data;
843 devmem->ops->free(devmem, page);
846 static DEFINE_MUTEX(hmm_devmem_lock);
847 static RADIX_TREE(hmm_devmem_radix, GFP_KERNEL);
849 static void hmm_devmem_radix_release(struct resource *resource)
851 resource_size_t key, align_start, align_size;
853 align_start = resource->start & ~(PA_SECTION_SIZE - 1);
854 align_size = ALIGN(resource_size(resource), PA_SECTION_SIZE);
856 mutex_lock(&hmm_devmem_lock);
857 for (key = resource->start;
858 key <= resource->end;
859 key += PA_SECTION_SIZE)
860 radix_tree_delete(&hmm_devmem_radix, key >> PA_SECTION_SHIFT);
861 mutex_unlock(&hmm_devmem_lock);
864 static void hmm_devmem_release(struct device *dev, void *data)
866 struct hmm_devmem *devmem = data;
867 struct resource *resource = devmem->resource;
868 unsigned long start_pfn, npages;
872 if (percpu_ref_tryget_live(&devmem->ref)) {
873 dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
874 percpu_ref_put(&devmem->ref);
877 /* pages are dead and unused, undo the arch mapping */
878 start_pfn = (resource->start & ~(PA_SECTION_SIZE - 1)) >> PAGE_SHIFT;
879 npages = ALIGN(resource_size(resource), PA_SECTION_SIZE) >> PAGE_SHIFT;
881 page = pfn_to_page(start_pfn);
882 zone = page_zone(page);
885 if (resource->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY)
886 __remove_pages(zone, start_pfn, npages, NULL);
888 arch_remove_memory(start_pfn << PAGE_SHIFT,
889 npages << PAGE_SHIFT, NULL);
892 hmm_devmem_radix_release(resource);
895 static struct hmm_devmem *hmm_devmem_find(resource_size_t phys)
897 WARN_ON_ONCE(!rcu_read_lock_held());
899 return radix_tree_lookup(&hmm_devmem_radix, phys >> PA_SECTION_SHIFT);
902 static int hmm_devmem_pages_create(struct hmm_devmem *devmem)
904 resource_size_t key, align_start, align_size, align_end;
905 struct device *device = devmem->device;
906 int ret, nid, is_ram;
909 align_start = devmem->resource->start & ~(PA_SECTION_SIZE - 1);
910 align_size = ALIGN(devmem->resource->start +
911 resource_size(devmem->resource),
912 PA_SECTION_SIZE) - align_start;
914 is_ram = region_intersects(align_start, align_size,
915 IORESOURCE_SYSTEM_RAM,
917 if (is_ram == REGION_MIXED) {
918 WARN_ONCE(1, "%s attempted on mixed region %pr\n",
919 __func__, devmem->resource);
922 if (is_ram == REGION_INTERSECTS)
925 if (devmem->resource->desc == IORES_DESC_DEVICE_PUBLIC_MEMORY)
926 devmem->pagemap.type = MEMORY_DEVICE_PUBLIC;
928 devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
930 devmem->pagemap.res = *devmem->resource;
931 devmem->pagemap.page_fault = hmm_devmem_fault;
932 devmem->pagemap.page_free = hmm_devmem_free;
933 devmem->pagemap.dev = devmem->device;
934 devmem->pagemap.ref = &devmem->ref;
935 devmem->pagemap.data = devmem;
937 mutex_lock(&hmm_devmem_lock);
938 align_end = align_start + align_size - 1;
939 for (key = align_start; key <= align_end; key += PA_SECTION_SIZE) {
940 struct hmm_devmem *dup;
943 dup = hmm_devmem_find(key);
946 dev_err(device, "%s: collides with mapping for %s\n",
947 __func__, dev_name(dup->device));
948 mutex_unlock(&hmm_devmem_lock);
952 ret = radix_tree_insert(&hmm_devmem_radix,
953 key >> PA_SECTION_SHIFT,
956 dev_err(device, "%s: failed: %d\n", __func__, ret);
957 mutex_unlock(&hmm_devmem_lock);
961 mutex_unlock(&hmm_devmem_lock);
963 nid = dev_to_node(device);
969 * For device private memory we call add_pages() as we only need to
970 * allocate and initialize struct page for the device memory. More-
971 * over the device memory is un-accessible thus we do not want to
972 * create a linear mapping for the memory like arch_add_memory()
975 * For device public memory, which is accesible by the CPU, we do
976 * want the linear mapping and thus use arch_add_memory().
978 if (devmem->pagemap.type == MEMORY_DEVICE_PUBLIC)
979 ret = arch_add_memory(nid, align_start, align_size, NULL,
982 ret = add_pages(nid, align_start >> PAGE_SHIFT,
983 align_size >> PAGE_SHIFT, NULL, false);
986 goto error_add_memory;
988 move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
989 align_start >> PAGE_SHIFT,
990 align_size >> PAGE_SHIFT, NULL);
993 for (pfn = devmem->pfn_first; pfn < devmem->pfn_last; pfn++) {
994 struct page *page = pfn_to_page(pfn);
996 page->pgmap = &devmem->pagemap;
1001 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
1003 hmm_devmem_radix_release(devmem->resource);
1008 static int hmm_devmem_match(struct device *dev, void *data, void *match_data)
1010 struct hmm_devmem *devmem = data;
1012 return devmem->resource == match_data;
1015 static void hmm_devmem_pages_remove(struct hmm_devmem *devmem)
1017 devres_release(devmem->device, &hmm_devmem_release,
1018 &hmm_devmem_match, devmem->resource);
1022 * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory
1024 * @ops: memory event device driver callback (see struct hmm_devmem_ops)
1025 * @device: device struct to bind the resource too
1026 * @size: size in bytes of the device memory to add
1027 * Returns: pointer to new hmm_devmem struct ERR_PTR otherwise
1029 * This function first finds an empty range of physical address big enough to
1030 * contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which
1031 * in turn allocates struct pages. It does not do anything beyond that; all
1032 * events affecting the memory will go through the various callbacks provided
1033 * by hmm_devmem_ops struct.
1035 * Device driver should call this function during device initialization and
1036 * is then responsible of memory management. HMM only provides helpers.
1038 struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
1039 struct device *device,
1042 struct hmm_devmem *devmem;
1043 resource_size_t addr;
1046 static_branch_enable(&device_private_key);
1048 devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem),
1049 GFP_KERNEL, dev_to_node(device));
1051 return ERR_PTR(-ENOMEM);
1053 init_completion(&devmem->completion);
1054 devmem->pfn_first = -1UL;
1055 devmem->pfn_last = -1UL;
1056 devmem->resource = NULL;
1057 devmem->device = device;
1060 ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
1063 goto error_percpu_ref;
1065 ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref);
1067 goto error_devm_add_action;
1069 size = ALIGN(size, PA_SECTION_SIZE);
1070 addr = min((unsigned long)iomem_resource.end,
1071 (1UL << MAX_PHYSMEM_BITS) - 1);
1072 addr = addr - size + 1UL;
1075 * FIXME add a new helper to quickly walk resource tree and find free
1078 * FIXME what about ioport_resource resource ?
1080 for (; addr > size && addr >= iomem_resource.start; addr -= size) {
1081 ret = region_intersects(addr, size, 0, IORES_DESC_NONE);
1082 if (ret != REGION_DISJOINT)
1085 devmem->resource = devm_request_mem_region(device, addr, size,
1087 if (!devmem->resource) {
1089 goto error_no_resource;
1093 if (!devmem->resource) {
1095 goto error_no_resource;
1098 devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY;
1099 devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
1100 devmem->pfn_last = devmem->pfn_first +
1101 (resource_size(devmem->resource) >> PAGE_SHIFT);
1103 ret = hmm_devmem_pages_create(devmem);
1107 devres_add(device, devmem);
1109 ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref);
1111 hmm_devmem_remove(devmem);
1112 return ERR_PTR(ret);
1118 devm_release_mem_region(device, devmem->resource->start,
1119 resource_size(devmem->resource));
1121 error_devm_add_action:
1122 hmm_devmem_ref_kill(&devmem->ref);
1123 hmm_devmem_ref_exit(&devmem->ref);
1125 devres_free(devmem);
1126 return ERR_PTR(ret);
1128 EXPORT_SYMBOL(hmm_devmem_add);
1130 struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
1131 struct device *device,
1132 struct resource *res)
1134 struct hmm_devmem *devmem;
1137 if (res->desc != IORES_DESC_DEVICE_PUBLIC_MEMORY)
1138 return ERR_PTR(-EINVAL);
1140 static_branch_enable(&device_private_key);
1142 devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem),
1143 GFP_KERNEL, dev_to_node(device));
1145 return ERR_PTR(-ENOMEM);
1147 init_completion(&devmem->completion);
1148 devmem->pfn_first = -1UL;
1149 devmem->pfn_last = -1UL;
1150 devmem->resource = res;
1151 devmem->device = device;
1154 ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
1157 goto error_percpu_ref;
1159 ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref);
1161 goto error_devm_add_action;
1164 devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
1165 devmem->pfn_last = devmem->pfn_first +
1166 (resource_size(devmem->resource) >> PAGE_SHIFT);
1168 ret = hmm_devmem_pages_create(devmem);
1170 goto error_devm_add_action;
1172 devres_add(device, devmem);
1174 ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref);
1176 hmm_devmem_remove(devmem);
1177 return ERR_PTR(ret);
1182 error_devm_add_action:
1183 hmm_devmem_ref_kill(&devmem->ref);
1184 hmm_devmem_ref_exit(&devmem->ref);
1186 devres_free(devmem);
1187 return ERR_PTR(ret);
1189 EXPORT_SYMBOL(hmm_devmem_add_resource);
1192 * hmm_devmem_remove() - remove device memory (kill and free ZONE_DEVICE)
1194 * @devmem: hmm_devmem struct use to track and manage the ZONE_DEVICE memory
1196 * This will hot-unplug memory that was hotplugged by hmm_devmem_add on behalf
1197 * of the device driver. It will free struct page and remove the resource that
1198 * reserved the physical address range for this device memory.
1200 void hmm_devmem_remove(struct hmm_devmem *devmem)
1202 resource_size_t start, size;
1203 struct device *device;
1209 device = devmem->device;
1210 start = devmem->resource->start;
1211 size = resource_size(devmem->resource);
1213 cdm = devmem->resource->desc == IORES_DESC_DEVICE_PUBLIC_MEMORY;
1214 hmm_devmem_ref_kill(&devmem->ref);
1215 hmm_devmem_ref_exit(&devmem->ref);
1216 hmm_devmem_pages_remove(devmem);
1219 devm_release_mem_region(device, start, size);
1221 EXPORT_SYMBOL(hmm_devmem_remove);
1224 * A device driver that wants to handle multiple devices memory through a
1225 * single fake device can use hmm_device to do so. This is purely a helper
1226 * and it is not needed to make use of any HMM functionality.
1228 #define HMM_DEVICE_MAX 256
1230 static DECLARE_BITMAP(hmm_device_mask, HMM_DEVICE_MAX);
1231 static DEFINE_SPINLOCK(hmm_device_lock);
1232 static struct class *hmm_device_class;
1233 static dev_t hmm_device_devt;
1235 static void hmm_device_release(struct device *device)
1237 struct hmm_device *hmm_device;
1239 hmm_device = container_of(device, struct hmm_device, device);
1240 spin_lock(&hmm_device_lock);
1241 clear_bit(hmm_device->minor, hmm_device_mask);
1242 spin_unlock(&hmm_device_lock);
1247 struct hmm_device *hmm_device_new(void *drvdata)
1249 struct hmm_device *hmm_device;
1251 hmm_device = kzalloc(sizeof(*hmm_device), GFP_KERNEL);
1253 return ERR_PTR(-ENOMEM);
1255 spin_lock(&hmm_device_lock);
1256 hmm_device->minor = find_first_zero_bit(hmm_device_mask, HMM_DEVICE_MAX);
1257 if (hmm_device->minor >= HMM_DEVICE_MAX) {
1258 spin_unlock(&hmm_device_lock);
1260 return ERR_PTR(-EBUSY);
1262 set_bit(hmm_device->minor, hmm_device_mask);
1263 spin_unlock(&hmm_device_lock);
1265 dev_set_name(&hmm_device->device, "hmm_device%d", hmm_device->minor);
1266 hmm_device->device.devt = MKDEV(MAJOR(hmm_device_devt),
1268 hmm_device->device.release = hmm_device_release;
1269 dev_set_drvdata(&hmm_device->device, drvdata);
1270 hmm_device->device.class = hmm_device_class;
1271 device_initialize(&hmm_device->device);
1275 EXPORT_SYMBOL(hmm_device_new);
1277 void hmm_device_put(struct hmm_device *hmm_device)
1279 put_device(&hmm_device->device);
1281 EXPORT_SYMBOL(hmm_device_put);
1283 static int __init hmm_init(void)
1287 ret = alloc_chrdev_region(&hmm_device_devt, 0,
1293 hmm_device_class = class_create(THIS_MODULE, "hmm_device");
1294 if (IS_ERR(hmm_device_class)) {
1295 unregister_chrdev_region(hmm_device_devt, HMM_DEVICE_MAX);
1296 return PTR_ERR(hmm_device_class);
1301 device_initcall(hmm_init);
1302 #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */