]> asedeno.scripts.mit.edu Git - linux.git/blob - mm/hmm.c
mm/hmm: use a structure for update callback parameters
[linux.git] / mm / hmm.c
1 /*
2  * Copyright 2013 Red Hat Inc.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * Authors: Jérôme Glisse <jglisse@redhat.com>
15  */
16 /*
17  * Refer to include/linux/hmm.h for information about heterogeneous memory
18  * management or HMM for short.
19  */
20 #include <linux/mm.h>
21 #include <linux/hmm.h>
22 #include <linux/init.h>
23 #include <linux/rmap.h>
24 #include <linux/swap.h>
25 #include <linux/slab.h>
26 #include <linux/sched.h>
27 #include <linux/mmzone.h>
28 #include <linux/pagemap.h>
29 #include <linux/swapops.h>
30 #include <linux/hugetlb.h>
31 #include <linux/memremap.h>
32 #include <linux/jump_label.h>
33 #include <linux/mmu_notifier.h>
34 #include <linux/memory_hotplug.h>
35
36 #define PA_SECTION_SIZE (1UL << PA_SECTION_SHIFT)
37
38 #if IS_ENABLED(CONFIG_HMM_MIRROR)
39 static const struct mmu_notifier_ops hmm_mmu_notifier_ops;
40
41 /*
42  * struct hmm - HMM per mm struct
43  *
44  * @mm: mm struct this HMM struct is bound to
45  * @lock: lock protecting ranges list
46  * @sequence: we track updates to the CPU page table with a sequence number
47  * @ranges: list of range being snapshotted
48  * @mirrors: list of mirrors for this mm
49  * @mmu_notifier: mmu notifier to track updates to CPU page table
50  * @mirrors_sem: read/write semaphore protecting the mirrors list
51  */
52 struct hmm {
53         struct mm_struct        *mm;
54         spinlock_t              lock;
55         atomic_t                sequence;
56         struct list_head        ranges;
57         struct list_head        mirrors;
58         struct mmu_notifier     mmu_notifier;
59         struct rw_semaphore     mirrors_sem;
60 };
61
62 /*
63  * hmm_register - register HMM against an mm (HMM internal)
64  *
65  * @mm: mm struct to attach to
66  *
67  * This is not intended to be used directly by device drivers. It allocates an
68  * HMM struct if mm does not have one, and initializes it.
69  */
70 static struct hmm *hmm_register(struct mm_struct *mm)
71 {
72         struct hmm *hmm = READ_ONCE(mm->hmm);
73         bool cleanup = false;
74
75         /*
76          * The hmm struct can only be freed once the mm_struct goes away,
77          * hence we should always have pre-allocated an new hmm struct
78          * above.
79          */
80         if (hmm)
81                 return hmm;
82
83         hmm = kmalloc(sizeof(*hmm), GFP_KERNEL);
84         if (!hmm)
85                 return NULL;
86         INIT_LIST_HEAD(&hmm->mirrors);
87         init_rwsem(&hmm->mirrors_sem);
88         atomic_set(&hmm->sequence, 0);
89         hmm->mmu_notifier.ops = NULL;
90         INIT_LIST_HEAD(&hmm->ranges);
91         spin_lock_init(&hmm->lock);
92         hmm->mm = mm;
93
94         spin_lock(&mm->page_table_lock);
95         if (!mm->hmm)
96                 mm->hmm = hmm;
97         else
98                 cleanup = true;
99         spin_unlock(&mm->page_table_lock);
100
101         if (cleanup)
102                 goto error;
103
104         /*
105          * We should only get here if hold the mmap_sem in write mode ie on
106          * registration of first mirror through hmm_mirror_register()
107          */
108         hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops;
109         if (__mmu_notifier_register(&hmm->mmu_notifier, mm))
110                 goto error_mm;
111
112         return mm->hmm;
113
114 error_mm:
115         spin_lock(&mm->page_table_lock);
116         if (mm->hmm == hmm)
117                 mm->hmm = NULL;
118         spin_unlock(&mm->page_table_lock);
119 error:
120         kfree(hmm);
121         return NULL;
122 }
123
124 void hmm_mm_destroy(struct mm_struct *mm)
125 {
126         kfree(mm->hmm);
127 }
128
129 static int hmm_invalidate_range(struct hmm *hmm,
130                                 const struct hmm_update *update)
131 {
132         struct hmm_mirror *mirror;
133         struct hmm_range *range;
134
135         spin_lock(&hmm->lock);
136         list_for_each_entry(range, &hmm->ranges, list) {
137                 unsigned long addr, idx, npages;
138
139                 if (update->end < range->start || update->start >= range->end)
140                         continue;
141
142                 range->valid = false;
143                 addr = max(update->start, range->start);
144                 idx = (addr - range->start) >> PAGE_SHIFT;
145                 npages = (min(range->end, update->end) - addr) >> PAGE_SHIFT;
146                 memset(&range->pfns[idx], 0, sizeof(*range->pfns) * npages);
147         }
148         spin_unlock(&hmm->lock);
149
150         down_read(&hmm->mirrors_sem);
151         list_for_each_entry(mirror, &hmm->mirrors, list) {
152                 int ret;
153
154                 ret = mirror->ops->sync_cpu_device_pagetables(mirror, update);
155                 if (!update->blockable && ret == -EAGAIN) {
156                         up_read(&hmm->mirrors_sem);
157                         return -EAGAIN;
158                 }
159         }
160         up_read(&hmm->mirrors_sem);
161
162         return 0;
163 }
164
165 static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
166 {
167         struct hmm_mirror *mirror;
168         struct hmm *hmm = mm->hmm;
169
170         down_write(&hmm->mirrors_sem);
171         mirror = list_first_entry_or_null(&hmm->mirrors, struct hmm_mirror,
172                                           list);
173         while (mirror) {
174                 list_del_init(&mirror->list);
175                 if (mirror->ops->release) {
176                         /*
177                          * Drop mirrors_sem so callback can wait on any pending
178                          * work that might itself trigger mmu_notifier callback
179                          * and thus would deadlock with us.
180                          */
181                         up_write(&hmm->mirrors_sem);
182                         mirror->ops->release(mirror);
183                         down_write(&hmm->mirrors_sem);
184                 }
185                 mirror = list_first_entry_or_null(&hmm->mirrors,
186                                                   struct hmm_mirror, list);
187         }
188         up_write(&hmm->mirrors_sem);
189 }
190
191 static int hmm_invalidate_range_start(struct mmu_notifier *mn,
192                                        struct mm_struct *mm,
193                                        unsigned long start,
194                                        unsigned long end,
195                                        bool blockable)
196 {
197         struct hmm *hmm = mm->hmm;
198
199         VM_BUG_ON(!hmm);
200
201         atomic_inc(&hmm->sequence);
202
203         return 0;
204 }
205
206 static void hmm_invalidate_range_end(struct mmu_notifier *mn,
207                                      struct mm_struct *mm,
208                                      unsigned long start,
209                                      unsigned long end)
210 {
211         struct hmm_update update;
212         struct hmm *hmm = mm->hmm;
213
214         VM_BUG_ON(!hmm);
215
216         update.start = start;
217         update.end = end;
218         update.event = HMM_UPDATE_INVALIDATE;
219         update.blockable = true;
220         hmm_invalidate_range(hmm, &update);
221 }
222
223 static const struct mmu_notifier_ops hmm_mmu_notifier_ops = {
224         .release                = hmm_release,
225         .invalidate_range_start = hmm_invalidate_range_start,
226         .invalidate_range_end   = hmm_invalidate_range_end,
227 };
228
229 /*
230  * hmm_mirror_register() - register a mirror against an mm
231  *
232  * @mirror: new mirror struct to register
233  * @mm: mm to register against
234  *
235  * To start mirroring a process address space, the device driver must register
236  * an HMM mirror struct.
237  *
238  * THE mm->mmap_sem MUST BE HELD IN WRITE MODE !
239  */
240 int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm)
241 {
242         /* Sanity check */
243         if (!mm || !mirror || !mirror->ops)
244                 return -EINVAL;
245
246 again:
247         mirror->hmm = hmm_register(mm);
248         if (!mirror->hmm)
249                 return -ENOMEM;
250
251         down_write(&mirror->hmm->mirrors_sem);
252         if (mirror->hmm->mm == NULL) {
253                 /*
254                  * A racing hmm_mirror_unregister() is about to destroy the hmm
255                  * struct. Try again to allocate a new one.
256                  */
257                 up_write(&mirror->hmm->mirrors_sem);
258                 mirror->hmm = NULL;
259                 goto again;
260         } else {
261                 list_add(&mirror->list, &mirror->hmm->mirrors);
262                 up_write(&mirror->hmm->mirrors_sem);
263         }
264
265         return 0;
266 }
267 EXPORT_SYMBOL(hmm_mirror_register);
268
269 /*
270  * hmm_mirror_unregister() - unregister a mirror
271  *
272  * @mirror: new mirror struct to register
273  *
274  * Stop mirroring a process address space, and cleanup.
275  */
276 void hmm_mirror_unregister(struct hmm_mirror *mirror)
277 {
278         bool should_unregister = false;
279         struct mm_struct *mm;
280         struct hmm *hmm;
281
282         if (mirror->hmm == NULL)
283                 return;
284
285         hmm = mirror->hmm;
286         down_write(&hmm->mirrors_sem);
287         list_del_init(&mirror->list);
288         should_unregister = list_empty(&hmm->mirrors);
289         mirror->hmm = NULL;
290         mm = hmm->mm;
291         hmm->mm = NULL;
292         up_write(&hmm->mirrors_sem);
293
294         if (!should_unregister || mm == NULL)
295                 return;
296
297         mmu_notifier_unregister_no_release(&hmm->mmu_notifier, mm);
298
299         spin_lock(&mm->page_table_lock);
300         if (mm->hmm == hmm)
301                 mm->hmm = NULL;
302         spin_unlock(&mm->page_table_lock);
303
304         kfree(hmm);
305 }
306 EXPORT_SYMBOL(hmm_mirror_unregister);
307
308 struct hmm_vma_walk {
309         struct hmm_range        *range;
310         unsigned long           last;
311         bool                    fault;
312         bool                    block;
313 };
314
315 static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr,
316                             bool write_fault, uint64_t *pfn)
317 {
318         unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_REMOTE;
319         struct hmm_vma_walk *hmm_vma_walk = walk->private;
320         struct hmm_range *range = hmm_vma_walk->range;
321         struct vm_area_struct *vma = walk->vma;
322         vm_fault_t ret;
323
324         flags |= hmm_vma_walk->block ? 0 : FAULT_FLAG_ALLOW_RETRY;
325         flags |= write_fault ? FAULT_FLAG_WRITE : 0;
326         ret = handle_mm_fault(vma, addr, flags);
327         if (ret & VM_FAULT_RETRY)
328                 return -EBUSY;
329         if (ret & VM_FAULT_ERROR) {
330                 *pfn = range->values[HMM_PFN_ERROR];
331                 return -EFAULT;
332         }
333
334         return -EAGAIN;
335 }
336
337 static int hmm_pfns_bad(unsigned long addr,
338                         unsigned long end,
339                         struct mm_walk *walk)
340 {
341         struct hmm_vma_walk *hmm_vma_walk = walk->private;
342         struct hmm_range *range = hmm_vma_walk->range;
343         uint64_t *pfns = range->pfns;
344         unsigned long i;
345
346         i = (addr - range->start) >> PAGE_SHIFT;
347         for (; addr < end; addr += PAGE_SIZE, i++)
348                 pfns[i] = range->values[HMM_PFN_ERROR];
349
350         return 0;
351 }
352
353 /*
354  * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s)
355  * @start: range virtual start address (inclusive)
356  * @end: range virtual end address (exclusive)
357  * @fault: should we fault or not ?
358  * @write_fault: write fault ?
359  * @walk: mm_walk structure
360  * Returns: 0 on success, -EAGAIN after page fault, or page fault error
361  *
362  * This function will be called whenever pmd_none() or pte_none() returns true,
363  * or whenever there is no page directory covering the virtual address range.
364  */
365 static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end,
366                               bool fault, bool write_fault,
367                               struct mm_walk *walk)
368 {
369         struct hmm_vma_walk *hmm_vma_walk = walk->private;
370         struct hmm_range *range = hmm_vma_walk->range;
371         uint64_t *pfns = range->pfns;
372         unsigned long i;
373
374         hmm_vma_walk->last = addr;
375         i = (addr - range->start) >> PAGE_SHIFT;
376         for (; addr < end; addr += PAGE_SIZE, i++) {
377                 pfns[i] = range->values[HMM_PFN_NONE];
378                 if (fault || write_fault) {
379                         int ret;
380
381                         ret = hmm_vma_do_fault(walk, addr, write_fault,
382                                                &pfns[i]);
383                         if (ret != -EAGAIN)
384                                 return ret;
385                 }
386         }
387
388         return (fault || write_fault) ? -EAGAIN : 0;
389 }
390
391 static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
392                                       uint64_t pfns, uint64_t cpu_flags,
393                                       bool *fault, bool *write_fault)
394 {
395         struct hmm_range *range = hmm_vma_walk->range;
396
397         *fault = *write_fault = false;
398         if (!hmm_vma_walk->fault)
399                 return;
400
401         /* We aren't ask to do anything ... */
402         if (!(pfns & range->flags[HMM_PFN_VALID]))
403                 return;
404         /* If this is device memory than only fault if explicitly requested */
405         if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
406                 /* Do we fault on device memory ? */
407                 if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) {
408                         *write_fault = pfns & range->flags[HMM_PFN_WRITE];
409                         *fault = true;
410                 }
411                 return;
412         }
413
414         /* If CPU page table is not valid then we need to fault */
415         *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]);
416         /* Need to write fault ? */
417         if ((pfns & range->flags[HMM_PFN_WRITE]) &&
418             !(cpu_flags & range->flags[HMM_PFN_WRITE])) {
419                 *write_fault = true;
420                 *fault = true;
421         }
422 }
423
424 static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
425                                  const uint64_t *pfns, unsigned long npages,
426                                  uint64_t cpu_flags, bool *fault,
427                                  bool *write_fault)
428 {
429         unsigned long i;
430
431         if (!hmm_vma_walk->fault) {
432                 *fault = *write_fault = false;
433                 return;
434         }
435
436         for (i = 0; i < npages; ++i) {
437                 hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags,
438                                    fault, write_fault);
439                 if ((*fault) || (*write_fault))
440                         return;
441         }
442 }
443
444 static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
445                              struct mm_walk *walk)
446 {
447         struct hmm_vma_walk *hmm_vma_walk = walk->private;
448         struct hmm_range *range = hmm_vma_walk->range;
449         bool fault, write_fault;
450         unsigned long i, npages;
451         uint64_t *pfns;
452
453         i = (addr - range->start) >> PAGE_SHIFT;
454         npages = (end - addr) >> PAGE_SHIFT;
455         pfns = &range->pfns[i];
456         hmm_range_need_fault(hmm_vma_walk, pfns, npages,
457                              0, &fault, &write_fault);
458         return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
459 }
460
461 static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd)
462 {
463         if (pmd_protnone(pmd))
464                 return 0;
465         return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] |
466                                 range->flags[HMM_PFN_WRITE] :
467                                 range->flags[HMM_PFN_VALID];
468 }
469
470 static int hmm_vma_handle_pmd(struct mm_walk *walk,
471                               unsigned long addr,
472                               unsigned long end,
473                               uint64_t *pfns,
474                               pmd_t pmd)
475 {
476         struct hmm_vma_walk *hmm_vma_walk = walk->private;
477         struct hmm_range *range = hmm_vma_walk->range;
478         unsigned long pfn, npages, i;
479         bool fault, write_fault;
480         uint64_t cpu_flags;
481
482         npages = (end - addr) >> PAGE_SHIFT;
483         cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
484         hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags,
485                              &fault, &write_fault);
486
487         if (pmd_protnone(pmd) || fault || write_fault)
488                 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
489
490         pfn = pmd_pfn(pmd) + pte_index(addr);
491         for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++)
492                 pfns[i] = hmm_pfn_from_pfn(range, pfn) | cpu_flags;
493         hmm_vma_walk->last = end;
494         return 0;
495 }
496
497 static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
498 {
499         if (pte_none(pte) || !pte_present(pte))
500                 return 0;
501         return pte_write(pte) ? range->flags[HMM_PFN_VALID] |
502                                 range->flags[HMM_PFN_WRITE] :
503                                 range->flags[HMM_PFN_VALID];
504 }
505
506 static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
507                               unsigned long end, pmd_t *pmdp, pte_t *ptep,
508                               uint64_t *pfn)
509 {
510         struct hmm_vma_walk *hmm_vma_walk = walk->private;
511         struct hmm_range *range = hmm_vma_walk->range;
512         struct vm_area_struct *vma = walk->vma;
513         bool fault, write_fault;
514         uint64_t cpu_flags;
515         pte_t pte = *ptep;
516         uint64_t orig_pfn = *pfn;
517
518         *pfn = range->values[HMM_PFN_NONE];
519         cpu_flags = pte_to_hmm_pfn_flags(range, pte);
520         hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
521                            &fault, &write_fault);
522
523         if (pte_none(pte)) {
524                 if (fault || write_fault)
525                         goto fault;
526                 return 0;
527         }
528
529         if (!pte_present(pte)) {
530                 swp_entry_t entry = pte_to_swp_entry(pte);
531
532                 if (!non_swap_entry(entry)) {
533                         if (fault || write_fault)
534                                 goto fault;
535                         return 0;
536                 }
537
538                 /*
539                  * This is a special swap entry, ignore migration, use
540                  * device and report anything else as error.
541                  */
542                 if (is_device_private_entry(entry)) {
543                         cpu_flags = range->flags[HMM_PFN_VALID] |
544                                 range->flags[HMM_PFN_DEVICE_PRIVATE];
545                         cpu_flags |= is_write_device_private_entry(entry) ?
546                                 range->flags[HMM_PFN_WRITE] : 0;
547                         hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
548                                            &fault, &write_fault);
549                         if (fault || write_fault)
550                                 goto fault;
551                         *pfn = hmm_pfn_from_pfn(range, swp_offset(entry));
552                         *pfn |= cpu_flags;
553                         return 0;
554                 }
555
556                 if (is_migration_entry(entry)) {
557                         if (fault || write_fault) {
558                                 pte_unmap(ptep);
559                                 hmm_vma_walk->last = addr;
560                                 migration_entry_wait(vma->vm_mm,
561                                                      pmdp, addr);
562                                 return -EAGAIN;
563                         }
564                         return 0;
565                 }
566
567                 /* Report error for everything else */
568                 *pfn = range->values[HMM_PFN_ERROR];
569                 return -EFAULT;
570         }
571
572         if (fault || write_fault)
573                 goto fault;
574
575         *pfn = hmm_pfn_from_pfn(range, pte_pfn(pte)) | cpu_flags;
576         return 0;
577
578 fault:
579         pte_unmap(ptep);
580         /* Fault any virtual address we were asked to fault */
581         return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
582 }
583
584 static int hmm_vma_walk_pmd(pmd_t *pmdp,
585                             unsigned long start,
586                             unsigned long end,
587                             struct mm_walk *walk)
588 {
589         struct hmm_vma_walk *hmm_vma_walk = walk->private;
590         struct hmm_range *range = hmm_vma_walk->range;
591         struct vm_area_struct *vma = walk->vma;
592         uint64_t *pfns = range->pfns;
593         unsigned long addr = start, i;
594         pte_t *ptep;
595         pmd_t pmd;
596
597
598 again:
599         pmd = READ_ONCE(*pmdp);
600         if (pmd_none(pmd))
601                 return hmm_vma_walk_hole(start, end, walk);
602
603         if (pmd_huge(pmd) && (range->vma->vm_flags & VM_HUGETLB))
604                 return hmm_pfns_bad(start, end, walk);
605
606         if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
607                 bool fault, write_fault;
608                 unsigned long npages;
609                 uint64_t *pfns;
610
611                 i = (addr - range->start) >> PAGE_SHIFT;
612                 npages = (end - addr) >> PAGE_SHIFT;
613                 pfns = &range->pfns[i];
614
615                 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
616                                      0, &fault, &write_fault);
617                 if (fault || write_fault) {
618                         hmm_vma_walk->last = addr;
619                         pmd_migration_entry_wait(vma->vm_mm, pmdp);
620                         return -EAGAIN;
621                 }
622                 return 0;
623         } else if (!pmd_present(pmd))
624                 return hmm_pfns_bad(start, end, walk);
625
626         if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
627                 /*
628                  * No need to take pmd_lock here, even if some other threads
629                  * is splitting the huge pmd we will get that event through
630                  * mmu_notifier callback.
631                  *
632                  * So just read pmd value and check again its a transparent
633                  * huge or device mapping one and compute corresponding pfn
634                  * values.
635                  */
636                 pmd = pmd_read_atomic(pmdp);
637                 barrier();
638                 if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
639                         goto again;
640
641                 i = (addr - range->start) >> PAGE_SHIFT;
642                 return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd);
643         }
644
645         /*
646          * We have handled all the valid case above ie either none, migration,
647          * huge or transparent huge. At this point either it is a valid pmd
648          * entry pointing to pte directory or it is a bad pmd that will not
649          * recover.
650          */
651         if (pmd_bad(pmd))
652                 return hmm_pfns_bad(start, end, walk);
653
654         ptep = pte_offset_map(pmdp, addr);
655         i = (addr - range->start) >> PAGE_SHIFT;
656         for (; addr < end; addr += PAGE_SIZE, ptep++, i++) {
657                 int r;
658
659                 r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]);
660                 if (r) {
661                         /* hmm_vma_handle_pte() did unmap pte directory */
662                         hmm_vma_walk->last = addr;
663                         return r;
664                 }
665         }
666         pte_unmap(ptep - 1);
667
668         hmm_vma_walk->last = addr;
669         return 0;
670 }
671
672 static void hmm_pfns_clear(struct hmm_range *range,
673                            uint64_t *pfns,
674                            unsigned long addr,
675                            unsigned long end)
676 {
677         for (; addr < end; addr += PAGE_SIZE, pfns++)
678                 *pfns = range->values[HMM_PFN_NONE];
679 }
680
681 static void hmm_pfns_special(struct hmm_range *range)
682 {
683         unsigned long addr = range->start, i = 0;
684
685         for (; addr < range->end; addr += PAGE_SIZE, i++)
686                 range->pfns[i] = range->values[HMM_PFN_SPECIAL];
687 }
688
689 /*
690  * hmm_vma_get_pfns() - snapshot CPU page table for a range of virtual addresses
691  * @range: range being snapshotted
692  * Returns: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
693  *          vma permission, 0 success
694  *
695  * This snapshots the CPU page table for a range of virtual addresses. Snapshot
696  * validity is tracked by range struct. See hmm_vma_range_done() for further
697  * information.
698  *
699  * The range struct is initialized here. It tracks the CPU page table, but only
700  * if the function returns success (0), in which case the caller must then call
701  * hmm_vma_range_done() to stop CPU page table update tracking on this range.
702  *
703  * NOT CALLING hmm_vma_range_done() IF FUNCTION RETURNS 0 WILL LEAD TO SERIOUS
704  * MEMORY CORRUPTION ! YOU HAVE BEEN WARNED !
705  */
706 int hmm_vma_get_pfns(struct hmm_range *range)
707 {
708         struct vm_area_struct *vma = range->vma;
709         struct hmm_vma_walk hmm_vma_walk;
710         struct mm_walk mm_walk;
711         struct hmm *hmm;
712
713         /* Sanity check, this really should not happen ! */
714         if (range->start < vma->vm_start || range->start >= vma->vm_end)
715                 return -EINVAL;
716         if (range->end < vma->vm_start || range->end > vma->vm_end)
717                 return -EINVAL;
718
719         hmm = hmm_register(vma->vm_mm);
720         if (!hmm)
721                 return -ENOMEM;
722         /* Caller must have registered a mirror, via hmm_mirror_register() ! */
723         if (!hmm->mmu_notifier.ops)
724                 return -EINVAL;
725
726         /* FIXME support hugetlb fs */
727         if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL) ||
728                         vma_is_dax(vma)) {
729                 hmm_pfns_special(range);
730                 return -EINVAL;
731         }
732
733         if (!(vma->vm_flags & VM_READ)) {
734                 /*
735                  * If vma do not allow read access, then assume that it does
736                  * not allow write access, either. Architecture that allow
737                  * write without read access are not supported by HMM, because
738                  * operations such has atomic access would not work.
739                  */
740                 hmm_pfns_clear(range, range->pfns, range->start, range->end);
741                 return -EPERM;
742         }
743
744         /* Initialize range to track CPU page table update */
745         spin_lock(&hmm->lock);
746         range->valid = true;
747         list_add_rcu(&range->list, &hmm->ranges);
748         spin_unlock(&hmm->lock);
749
750         hmm_vma_walk.fault = false;
751         hmm_vma_walk.range = range;
752         mm_walk.private = &hmm_vma_walk;
753
754         mm_walk.vma = vma;
755         mm_walk.mm = vma->vm_mm;
756         mm_walk.pte_entry = NULL;
757         mm_walk.test_walk = NULL;
758         mm_walk.hugetlb_entry = NULL;
759         mm_walk.pmd_entry = hmm_vma_walk_pmd;
760         mm_walk.pte_hole = hmm_vma_walk_hole;
761
762         walk_page_range(range->start, range->end, &mm_walk);
763         return 0;
764 }
765 EXPORT_SYMBOL(hmm_vma_get_pfns);
766
767 /*
768  * hmm_vma_range_done() - stop tracking change to CPU page table over a range
769  * @range: range being tracked
770  * Returns: false if range data has been invalidated, true otherwise
771  *
772  * Range struct is used to track updates to the CPU page table after a call to
773  * either hmm_vma_get_pfns() or hmm_vma_fault(). Once the device driver is done
774  * using the data,  or wants to lock updates to the data it got from those
775  * functions, it must call the hmm_vma_range_done() function, which will then
776  * stop tracking CPU page table updates.
777  *
778  * Note that device driver must still implement general CPU page table update
779  * tracking either by using hmm_mirror (see hmm_mirror_register()) or by using
780  * the mmu_notifier API directly.
781  *
782  * CPU page table update tracking done through hmm_range is only temporary and
783  * to be used while trying to duplicate CPU page table contents for a range of
784  * virtual addresses.
785  *
786  * There are two ways to use this :
787  * again:
788  *   hmm_vma_get_pfns(range); or hmm_vma_fault(...);
789  *   trans = device_build_page_table_update_transaction(pfns);
790  *   device_page_table_lock();
791  *   if (!hmm_vma_range_done(range)) {
792  *     device_page_table_unlock();
793  *     goto again;
794  *   }
795  *   device_commit_transaction(trans);
796  *   device_page_table_unlock();
797  *
798  * Or:
799  *   hmm_vma_get_pfns(range); or hmm_vma_fault(...);
800  *   device_page_table_lock();
801  *   hmm_vma_range_done(range);
802  *   device_update_page_table(range->pfns);
803  *   device_page_table_unlock();
804  */
805 bool hmm_vma_range_done(struct hmm_range *range)
806 {
807         unsigned long npages = (range->end - range->start) >> PAGE_SHIFT;
808         struct hmm *hmm;
809
810         if (range->end <= range->start) {
811                 BUG();
812                 return false;
813         }
814
815         hmm = hmm_register(range->vma->vm_mm);
816         if (!hmm) {
817                 memset(range->pfns, 0, sizeof(*range->pfns) * npages);
818                 return false;
819         }
820
821         spin_lock(&hmm->lock);
822         list_del_rcu(&range->list);
823         spin_unlock(&hmm->lock);
824
825         return range->valid;
826 }
827 EXPORT_SYMBOL(hmm_vma_range_done);
828
829 /*
830  * hmm_vma_fault() - try to fault some address in a virtual address range
831  * @range: range being faulted
832  * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
833  * Returns: 0 success, error otherwise (-EAGAIN means mmap_sem have been drop)
834  *
835  * This is similar to a regular CPU page fault except that it will not trigger
836  * any memory migration if the memory being faulted is not accessible by CPUs.
837  *
838  * On error, for one virtual address in the range, the function will mark the
839  * corresponding HMM pfn entry with an error flag.
840  *
841  * Expected use pattern:
842  * retry:
843  *   down_read(&mm->mmap_sem);
844  *   // Find vma and address device wants to fault, initialize hmm_pfn_t
845  *   // array accordingly
846  *   ret = hmm_vma_fault(range, write, block);
847  *   switch (ret) {
848  *   case -EAGAIN:
849  *     hmm_vma_range_done(range);
850  *     // You might want to rate limit or yield to play nicely, you may
851  *     // also commit any valid pfn in the array assuming that you are
852  *     // getting true from hmm_vma_range_monitor_end()
853  *     goto retry;
854  *   case 0:
855  *     break;
856  *   case -ENOMEM:
857  *   case -EINVAL:
858  *   case -EPERM:
859  *   default:
860  *     // Handle error !
861  *     up_read(&mm->mmap_sem)
862  *     return;
863  *   }
864  *   // Take device driver lock that serialize device page table update
865  *   driver_lock_device_page_table_update();
866  *   hmm_vma_range_done(range);
867  *   // Commit pfns we got from hmm_vma_fault()
868  *   driver_unlock_device_page_table_update();
869  *   up_read(&mm->mmap_sem)
870  *
871  * YOU MUST CALL hmm_vma_range_done() AFTER THIS FUNCTION RETURN SUCCESS (0)
872  * BEFORE FREEING THE range struct OR YOU WILL HAVE SERIOUS MEMORY CORRUPTION !
873  *
874  * YOU HAVE BEEN WARNED !
875  */
876 int hmm_vma_fault(struct hmm_range *range, bool block)
877 {
878         struct vm_area_struct *vma = range->vma;
879         unsigned long start = range->start;
880         struct hmm_vma_walk hmm_vma_walk;
881         struct mm_walk mm_walk;
882         struct hmm *hmm;
883         int ret;
884
885         /* Sanity check, this really should not happen ! */
886         if (range->start < vma->vm_start || range->start >= vma->vm_end)
887                 return -EINVAL;
888         if (range->end < vma->vm_start || range->end > vma->vm_end)
889                 return -EINVAL;
890
891         hmm = hmm_register(vma->vm_mm);
892         if (!hmm) {
893                 hmm_pfns_clear(range, range->pfns, range->start, range->end);
894                 return -ENOMEM;
895         }
896         /* Caller must have registered a mirror using hmm_mirror_register() */
897         if (!hmm->mmu_notifier.ops)
898                 return -EINVAL;
899
900         /* FIXME support hugetlb fs */
901         if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL) ||
902                         vma_is_dax(vma)) {
903                 hmm_pfns_special(range);
904                 return -EINVAL;
905         }
906
907         if (!(vma->vm_flags & VM_READ)) {
908                 /*
909                  * If vma do not allow read access, then assume that it does
910                  * not allow write access, either. Architecture that allow
911                  * write without read access are not supported by HMM, because
912                  * operations such has atomic access would not work.
913                  */
914                 hmm_pfns_clear(range, range->pfns, range->start, range->end);
915                 return -EPERM;
916         }
917
918         /* Initialize range to track CPU page table update */
919         spin_lock(&hmm->lock);
920         range->valid = true;
921         list_add_rcu(&range->list, &hmm->ranges);
922         spin_unlock(&hmm->lock);
923
924         hmm_vma_walk.fault = true;
925         hmm_vma_walk.block = block;
926         hmm_vma_walk.range = range;
927         mm_walk.private = &hmm_vma_walk;
928         hmm_vma_walk.last = range->start;
929
930         mm_walk.vma = vma;
931         mm_walk.mm = vma->vm_mm;
932         mm_walk.pte_entry = NULL;
933         mm_walk.test_walk = NULL;
934         mm_walk.hugetlb_entry = NULL;
935         mm_walk.pmd_entry = hmm_vma_walk_pmd;
936         mm_walk.pte_hole = hmm_vma_walk_hole;
937
938         do {
939                 ret = walk_page_range(start, range->end, &mm_walk);
940                 start = hmm_vma_walk.last;
941         } while (ret == -EAGAIN);
942
943         if (ret) {
944                 unsigned long i;
945
946                 i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
947                 hmm_pfns_clear(range, &range->pfns[i], hmm_vma_walk.last,
948                                range->end);
949                 hmm_vma_range_done(range);
950         }
951         return ret;
952 }
953 EXPORT_SYMBOL(hmm_vma_fault);
954 #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
955
956
957 #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) ||  IS_ENABLED(CONFIG_DEVICE_PUBLIC)
958 struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma,
959                                        unsigned long addr)
960 {
961         struct page *page;
962
963         page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
964         if (!page)
965                 return NULL;
966         lock_page(page);
967         return page;
968 }
969 EXPORT_SYMBOL(hmm_vma_alloc_locked_page);
970
971
972 static void hmm_devmem_ref_release(struct percpu_ref *ref)
973 {
974         struct hmm_devmem *devmem;
975
976         devmem = container_of(ref, struct hmm_devmem, ref);
977         complete(&devmem->completion);
978 }
979
980 static void hmm_devmem_ref_exit(void *data)
981 {
982         struct percpu_ref *ref = data;
983         struct hmm_devmem *devmem;
984
985         devmem = container_of(ref, struct hmm_devmem, ref);
986         percpu_ref_exit(ref);
987         devm_remove_action(devmem->device, &hmm_devmem_ref_exit, data);
988 }
989
990 static void hmm_devmem_ref_kill(void *data)
991 {
992         struct percpu_ref *ref = data;
993         struct hmm_devmem *devmem;
994
995         devmem = container_of(ref, struct hmm_devmem, ref);
996         percpu_ref_kill(ref);
997         wait_for_completion(&devmem->completion);
998         devm_remove_action(devmem->device, &hmm_devmem_ref_kill, data);
999 }
1000
1001 static int hmm_devmem_fault(struct vm_area_struct *vma,
1002                             unsigned long addr,
1003                             const struct page *page,
1004                             unsigned int flags,
1005                             pmd_t *pmdp)
1006 {
1007         struct hmm_devmem *devmem = page->pgmap->data;
1008
1009         return devmem->ops->fault(devmem, vma, addr, page, flags, pmdp);
1010 }
1011
1012 static void hmm_devmem_free(struct page *page, void *data)
1013 {
1014         struct hmm_devmem *devmem = data;
1015
1016         page->mapping = NULL;
1017
1018         devmem->ops->free(devmem, page);
1019 }
1020
1021 static DEFINE_MUTEX(hmm_devmem_lock);
1022 static RADIX_TREE(hmm_devmem_radix, GFP_KERNEL);
1023
1024 static void hmm_devmem_radix_release(struct resource *resource)
1025 {
1026         resource_size_t key;
1027
1028         mutex_lock(&hmm_devmem_lock);
1029         for (key = resource->start;
1030              key <= resource->end;
1031              key += PA_SECTION_SIZE)
1032                 radix_tree_delete(&hmm_devmem_radix, key >> PA_SECTION_SHIFT);
1033         mutex_unlock(&hmm_devmem_lock);
1034 }
1035
1036 static void hmm_devmem_release(struct device *dev, void *data)
1037 {
1038         struct hmm_devmem *devmem = data;
1039         struct resource *resource = devmem->resource;
1040         unsigned long start_pfn, npages;
1041         struct zone *zone;
1042         struct page *page;
1043
1044         if (percpu_ref_tryget_live(&devmem->ref)) {
1045                 dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
1046                 percpu_ref_put(&devmem->ref);
1047         }
1048
1049         /* pages are dead and unused, undo the arch mapping */
1050         start_pfn = (resource->start & ~(PA_SECTION_SIZE - 1)) >> PAGE_SHIFT;
1051         npages = ALIGN(resource_size(resource), PA_SECTION_SIZE) >> PAGE_SHIFT;
1052
1053         page = pfn_to_page(start_pfn);
1054         zone = page_zone(page);
1055
1056         mem_hotplug_begin();
1057         if (resource->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY)
1058                 __remove_pages(zone, start_pfn, npages, NULL);
1059         else
1060                 arch_remove_memory(start_pfn << PAGE_SHIFT,
1061                                    npages << PAGE_SHIFT, NULL);
1062         mem_hotplug_done();
1063
1064         hmm_devmem_radix_release(resource);
1065 }
1066
1067 static int hmm_devmem_pages_create(struct hmm_devmem *devmem)
1068 {
1069         resource_size_t key, align_start, align_size, align_end;
1070         struct device *device = devmem->device;
1071         int ret, nid, is_ram;
1072
1073         align_start = devmem->resource->start & ~(PA_SECTION_SIZE - 1);
1074         align_size = ALIGN(devmem->resource->start +
1075                            resource_size(devmem->resource),
1076                            PA_SECTION_SIZE) - align_start;
1077
1078         is_ram = region_intersects(align_start, align_size,
1079                                    IORESOURCE_SYSTEM_RAM,
1080                                    IORES_DESC_NONE);
1081         if (is_ram == REGION_MIXED) {
1082                 WARN_ONCE(1, "%s attempted on mixed region %pr\n",
1083                                 __func__, devmem->resource);
1084                 return -ENXIO;
1085         }
1086         if (is_ram == REGION_INTERSECTS)
1087                 return -ENXIO;
1088
1089         if (devmem->resource->desc == IORES_DESC_DEVICE_PUBLIC_MEMORY)
1090                 devmem->pagemap.type = MEMORY_DEVICE_PUBLIC;
1091         else
1092                 devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
1093
1094         devmem->pagemap.res = *devmem->resource;
1095         devmem->pagemap.page_fault = hmm_devmem_fault;
1096         devmem->pagemap.page_free = hmm_devmem_free;
1097         devmem->pagemap.dev = devmem->device;
1098         devmem->pagemap.ref = &devmem->ref;
1099         devmem->pagemap.data = devmem;
1100
1101         mutex_lock(&hmm_devmem_lock);
1102         align_end = align_start + align_size - 1;
1103         for (key = align_start; key <= align_end; key += PA_SECTION_SIZE) {
1104                 struct hmm_devmem *dup;
1105
1106                 dup = radix_tree_lookup(&hmm_devmem_radix,
1107                                         key >> PA_SECTION_SHIFT);
1108                 if (dup) {
1109                         dev_err(device, "%s: collides with mapping for %s\n",
1110                                 __func__, dev_name(dup->device));
1111                         mutex_unlock(&hmm_devmem_lock);
1112                         ret = -EBUSY;
1113                         goto error;
1114                 }
1115                 ret = radix_tree_insert(&hmm_devmem_radix,
1116                                         key >> PA_SECTION_SHIFT,
1117                                         devmem);
1118                 if (ret) {
1119                         dev_err(device, "%s: failed: %d\n", __func__, ret);
1120                         mutex_unlock(&hmm_devmem_lock);
1121                         goto error_radix;
1122                 }
1123         }
1124         mutex_unlock(&hmm_devmem_lock);
1125
1126         nid = dev_to_node(device);
1127         if (nid < 0)
1128                 nid = numa_mem_id();
1129
1130         mem_hotplug_begin();
1131         /*
1132          * For device private memory we call add_pages() as we only need to
1133          * allocate and initialize struct page for the device memory. More-
1134          * over the device memory is un-accessible thus we do not want to
1135          * create a linear mapping for the memory like arch_add_memory()
1136          * would do.
1137          *
1138          * For device public memory, which is accesible by the CPU, we do
1139          * want the linear mapping and thus use arch_add_memory().
1140          */
1141         if (devmem->pagemap.type == MEMORY_DEVICE_PUBLIC)
1142                 ret = arch_add_memory(nid, align_start, align_size, NULL,
1143                                 false);
1144         else
1145                 ret = add_pages(nid, align_start >> PAGE_SHIFT,
1146                                 align_size >> PAGE_SHIFT, NULL, false);
1147         if (ret) {
1148                 mem_hotplug_done();
1149                 goto error_add_memory;
1150         }
1151         move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
1152                                 align_start >> PAGE_SHIFT,
1153                                 align_size >> PAGE_SHIFT, NULL);
1154         mem_hotplug_done();
1155
1156         /*
1157          * Initialization of the pages has been deferred until now in order
1158          * to allow us to do the work while not holding the hotplug lock.
1159          */
1160         memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
1161                                 align_start >> PAGE_SHIFT,
1162                                 align_size >> PAGE_SHIFT, &devmem->pagemap);
1163
1164         return 0;
1165
1166 error_add_memory:
1167         untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
1168 error_radix:
1169         hmm_devmem_radix_release(devmem->resource);
1170 error:
1171         return ret;
1172 }
1173
1174 static int hmm_devmem_match(struct device *dev, void *data, void *match_data)
1175 {
1176         struct hmm_devmem *devmem = data;
1177
1178         return devmem->resource == match_data;
1179 }
1180
1181 static void hmm_devmem_pages_remove(struct hmm_devmem *devmem)
1182 {
1183         devres_release(devmem->device, &hmm_devmem_release,
1184                        &hmm_devmem_match, devmem->resource);
1185 }
1186
1187 /*
1188  * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory
1189  *
1190  * @ops: memory event device driver callback (see struct hmm_devmem_ops)
1191  * @device: device struct to bind the resource too
1192  * @size: size in bytes of the device memory to add
1193  * Returns: pointer to new hmm_devmem struct ERR_PTR otherwise
1194  *
1195  * This function first finds an empty range of physical address big enough to
1196  * contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which
1197  * in turn allocates struct pages. It does not do anything beyond that; all
1198  * events affecting the memory will go through the various callbacks provided
1199  * by hmm_devmem_ops struct.
1200  *
1201  * Device driver should call this function during device initialization and
1202  * is then responsible of memory management. HMM only provides helpers.
1203  */
1204 struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
1205                                   struct device *device,
1206                                   unsigned long size)
1207 {
1208         struct hmm_devmem *devmem;
1209         resource_size_t addr;
1210         int ret;
1211
1212         dev_pagemap_get_ops();
1213
1214         devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem),
1215                                    GFP_KERNEL, dev_to_node(device));
1216         if (!devmem)
1217                 return ERR_PTR(-ENOMEM);
1218
1219         init_completion(&devmem->completion);
1220         devmem->pfn_first = -1UL;
1221         devmem->pfn_last = -1UL;
1222         devmem->resource = NULL;
1223         devmem->device = device;
1224         devmem->ops = ops;
1225
1226         ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
1227                               0, GFP_KERNEL);
1228         if (ret)
1229                 goto error_percpu_ref;
1230
1231         ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref);
1232         if (ret)
1233                 goto error_devm_add_action;
1234
1235         size = ALIGN(size, PA_SECTION_SIZE);
1236         addr = min((unsigned long)iomem_resource.end,
1237                    (1UL << MAX_PHYSMEM_BITS) - 1);
1238         addr = addr - size + 1UL;
1239
1240         /*
1241          * FIXME add a new helper to quickly walk resource tree and find free
1242          * range
1243          *
1244          * FIXME what about ioport_resource resource ?
1245          */
1246         for (; addr > size && addr >= iomem_resource.start; addr -= size) {
1247                 ret = region_intersects(addr, size, 0, IORES_DESC_NONE);
1248                 if (ret != REGION_DISJOINT)
1249                         continue;
1250
1251                 devmem->resource = devm_request_mem_region(device, addr, size,
1252                                                            dev_name(device));
1253                 if (!devmem->resource) {
1254                         ret = -ENOMEM;
1255                         goto error_no_resource;
1256                 }
1257                 break;
1258         }
1259         if (!devmem->resource) {
1260                 ret = -ERANGE;
1261                 goto error_no_resource;
1262         }
1263
1264         devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY;
1265         devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
1266         devmem->pfn_last = devmem->pfn_first +
1267                            (resource_size(devmem->resource) >> PAGE_SHIFT);
1268
1269         ret = hmm_devmem_pages_create(devmem);
1270         if (ret)
1271                 goto error_pages;
1272
1273         devres_add(device, devmem);
1274
1275         ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref);
1276         if (ret) {
1277                 hmm_devmem_remove(devmem);
1278                 return ERR_PTR(ret);
1279         }
1280
1281         return devmem;
1282
1283 error_pages:
1284         devm_release_mem_region(device, devmem->resource->start,
1285                                 resource_size(devmem->resource));
1286 error_no_resource:
1287 error_devm_add_action:
1288         hmm_devmem_ref_kill(&devmem->ref);
1289         hmm_devmem_ref_exit(&devmem->ref);
1290 error_percpu_ref:
1291         devres_free(devmem);
1292         return ERR_PTR(ret);
1293 }
1294 EXPORT_SYMBOL(hmm_devmem_add);
1295
1296 struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
1297                                            struct device *device,
1298                                            struct resource *res)
1299 {
1300         struct hmm_devmem *devmem;
1301         int ret;
1302
1303         if (res->desc != IORES_DESC_DEVICE_PUBLIC_MEMORY)
1304                 return ERR_PTR(-EINVAL);
1305
1306         dev_pagemap_get_ops();
1307
1308         devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem),
1309                                    GFP_KERNEL, dev_to_node(device));
1310         if (!devmem)
1311                 return ERR_PTR(-ENOMEM);
1312
1313         init_completion(&devmem->completion);
1314         devmem->pfn_first = -1UL;
1315         devmem->pfn_last = -1UL;
1316         devmem->resource = res;
1317         devmem->device = device;
1318         devmem->ops = ops;
1319
1320         ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
1321                               0, GFP_KERNEL);
1322         if (ret)
1323                 goto error_percpu_ref;
1324
1325         ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref);
1326         if (ret)
1327                 goto error_devm_add_action;
1328
1329
1330         devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
1331         devmem->pfn_last = devmem->pfn_first +
1332                            (resource_size(devmem->resource) >> PAGE_SHIFT);
1333
1334         ret = hmm_devmem_pages_create(devmem);
1335         if (ret)
1336                 goto error_devm_add_action;
1337
1338         devres_add(device, devmem);
1339
1340         ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref);
1341         if (ret) {
1342                 hmm_devmem_remove(devmem);
1343                 return ERR_PTR(ret);
1344         }
1345
1346         return devmem;
1347
1348 error_devm_add_action:
1349         hmm_devmem_ref_kill(&devmem->ref);
1350         hmm_devmem_ref_exit(&devmem->ref);
1351 error_percpu_ref:
1352         devres_free(devmem);
1353         return ERR_PTR(ret);
1354 }
1355 EXPORT_SYMBOL(hmm_devmem_add_resource);
1356
1357 /*
1358  * hmm_devmem_remove() - remove device memory (kill and free ZONE_DEVICE)
1359  *
1360  * @devmem: hmm_devmem struct use to track and manage the ZONE_DEVICE memory
1361  *
1362  * This will hot-unplug memory that was hotplugged by hmm_devmem_add on behalf
1363  * of the device driver. It will free struct page and remove the resource that
1364  * reserved the physical address range for this device memory.
1365  */
1366 void hmm_devmem_remove(struct hmm_devmem *devmem)
1367 {
1368         resource_size_t start, size;
1369         struct device *device;
1370         bool cdm = false;
1371
1372         if (!devmem)
1373                 return;
1374
1375         device = devmem->device;
1376         start = devmem->resource->start;
1377         size = resource_size(devmem->resource);
1378
1379         cdm = devmem->resource->desc == IORES_DESC_DEVICE_PUBLIC_MEMORY;
1380         hmm_devmem_ref_kill(&devmem->ref);
1381         hmm_devmem_ref_exit(&devmem->ref);
1382         hmm_devmem_pages_remove(devmem);
1383
1384         if (!cdm)
1385                 devm_release_mem_region(device, start, size);
1386 }
1387 EXPORT_SYMBOL(hmm_devmem_remove);
1388
1389 /*
1390  * A device driver that wants to handle multiple devices memory through a
1391  * single fake device can use hmm_device to do so. This is purely a helper
1392  * and it is not needed to make use of any HMM functionality.
1393  */
1394 #define HMM_DEVICE_MAX 256
1395
1396 static DECLARE_BITMAP(hmm_device_mask, HMM_DEVICE_MAX);
1397 static DEFINE_SPINLOCK(hmm_device_lock);
1398 static struct class *hmm_device_class;
1399 static dev_t hmm_device_devt;
1400
1401 static void hmm_device_release(struct device *device)
1402 {
1403         struct hmm_device *hmm_device;
1404
1405         hmm_device = container_of(device, struct hmm_device, device);
1406         spin_lock(&hmm_device_lock);
1407         clear_bit(hmm_device->minor, hmm_device_mask);
1408         spin_unlock(&hmm_device_lock);
1409
1410         kfree(hmm_device);
1411 }
1412
1413 struct hmm_device *hmm_device_new(void *drvdata)
1414 {
1415         struct hmm_device *hmm_device;
1416
1417         hmm_device = kzalloc(sizeof(*hmm_device), GFP_KERNEL);
1418         if (!hmm_device)
1419                 return ERR_PTR(-ENOMEM);
1420
1421         spin_lock(&hmm_device_lock);
1422         hmm_device->minor = find_first_zero_bit(hmm_device_mask, HMM_DEVICE_MAX);
1423         if (hmm_device->minor >= HMM_DEVICE_MAX) {
1424                 spin_unlock(&hmm_device_lock);
1425                 kfree(hmm_device);
1426                 return ERR_PTR(-EBUSY);
1427         }
1428         set_bit(hmm_device->minor, hmm_device_mask);
1429         spin_unlock(&hmm_device_lock);
1430
1431         dev_set_name(&hmm_device->device, "hmm_device%d", hmm_device->minor);
1432         hmm_device->device.devt = MKDEV(MAJOR(hmm_device_devt),
1433                                         hmm_device->minor);
1434         hmm_device->device.release = hmm_device_release;
1435         dev_set_drvdata(&hmm_device->device, drvdata);
1436         hmm_device->device.class = hmm_device_class;
1437         device_initialize(&hmm_device->device);
1438
1439         return hmm_device;
1440 }
1441 EXPORT_SYMBOL(hmm_device_new);
1442
1443 void hmm_device_put(struct hmm_device *hmm_device)
1444 {
1445         put_device(&hmm_device->device);
1446 }
1447 EXPORT_SYMBOL(hmm_device_put);
1448
1449 static int __init hmm_init(void)
1450 {
1451         int ret;
1452
1453         ret = alloc_chrdev_region(&hmm_device_devt, 0,
1454                                   HMM_DEVICE_MAX,
1455                                   "hmm_device");
1456         if (ret)
1457                 return ret;
1458
1459         hmm_device_class = class_create(THIS_MODULE, "hmm_device");
1460         if (IS_ERR(hmm_device_class)) {
1461                 unregister_chrdev_region(hmm_device_devt, HMM_DEVICE_MAX);
1462                 return PTR_ERR(hmm_device_class);
1463         }
1464         return 0;
1465 }
1466
1467 device_initcall(hmm_init);
1468 #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */