1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MMU_NOTIFIER_H
3 #define _LINUX_MMU_NOTIFIER_H
5 #include <linux/list.h>
6 #include <linux/spinlock.h>
7 #include <linux/mm_types.h>
8 #include <linux/srcu.h>
9 #include <linux/interval_tree.h>
11 struct mmu_notifier_mm;
13 struct mmu_notifier_range;
14 struct mmu_interval_notifier;
17 * enum mmu_notifier_event - reason for the mmu notifier callback
18 * @MMU_NOTIFY_UNMAP: either munmap() that unmap the range or a mremap() that
21 * @MMU_NOTIFY_CLEAR: clear page table entry (many reasons for this like
22 * madvise() or replacing a page by another one, ...).
24 * @MMU_NOTIFY_PROTECTION_VMA: update is due to protection change for the range
25 * ie using the vma access permission (vm_page_prot) to update the whole range
26 * is enough no need to inspect changes to the CPU page table (mprotect()
29 * @MMU_NOTIFY_PROTECTION_PAGE: update is due to change in read/write flag for
30 * pages in the range so to mirror those changes the user must inspect the CPU
31 * page table (from the end callback).
33 * @MMU_NOTIFY_SOFT_DIRTY: soft dirty accounting (still same page and same
34 * access flags). User should soft dirty the page in the end callback to make
35 * sure that anyone relying on soft dirtyness catch pages that might be written
36 * through non CPU mappings.
38 * @MMU_NOTIFY_RELEASE: used during mmu_interval_notifier invalidate to signal
39 * that the mm refcount is zero and the range is no longer accessible.
41 enum mmu_notifier_event {
44 MMU_NOTIFY_PROTECTION_VMA,
45 MMU_NOTIFY_PROTECTION_PAGE,
46 MMU_NOTIFY_SOFT_DIRTY,
50 #define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
52 struct mmu_notifier_ops {
54 * Called either by mmu_notifier_unregister or when the mm is
55 * being destroyed by exit_mmap, always before all pages are
56 * freed. This can run concurrently with other mmu notifier
57 * methods (the ones invoked outside the mm context) and it
58 * should tear down all secondary mmu mappings and freeze the
59 * secondary mmu. If this method isn't implemented you've to
60 * be sure that nothing could possibly write to the pages
61 * through the secondary mmu by the time the last thread with
62 * tsk->mm == mm exits.
64 * As side note: the pages freed after ->release returns could
65 * be immediately reallocated by the gart at an alias physical
66 * address with a different cache model, so if ->release isn't
67 * implemented because all _software_ driven memory accesses
68 * through the secondary mmu are terminated by the time the
69 * last thread of this mm quits, you've also to be sure that
70 * speculative _hardware_ operations can't allocate dirty
71 * cachelines in the cpu that could not be snooped and made
72 * coherent with the other read and write operations happening
73 * through the gart alias address, so leading to memory
76 void (*release)(struct mmu_notifier *mn,
77 struct mm_struct *mm);
80 * clear_flush_young is called after the VM is
81 * test-and-clearing the young/accessed bitflag in the
82 * pte. This way the VM will provide proper aging to the
83 * accesses to the page through the secondary MMUs and not
84 * only to the ones through the Linux pte.
85 * Start-end is necessary in case the secondary MMU is mapping the page
86 * at a smaller granularity than the primary MMU.
88 int (*clear_flush_young)(struct mmu_notifier *mn,
94 * clear_young is a lightweight version of clear_flush_young. Like the
95 * latter, it is supposed to test-and-clear the young/accessed bitflag
96 * in the secondary pte, but it may omit flushing the secondary tlb.
98 int (*clear_young)(struct mmu_notifier *mn,
104 * test_young is called to check the young/accessed bitflag in
105 * the secondary pte. This is used to know if the page is
106 * frequently used without actually clearing the flag or tearing
107 * down the secondary mapping on the page.
109 int (*test_young)(struct mmu_notifier *mn,
110 struct mm_struct *mm,
111 unsigned long address);
114 * change_pte is called in cases that pte mapping to page is changed:
115 * for example, when ksm remaps pte to point to a new shared page.
117 void (*change_pte)(struct mmu_notifier *mn,
118 struct mm_struct *mm,
119 unsigned long address,
123 * invalidate_range_start() and invalidate_range_end() must be
124 * paired and are called only when the mmap_sem and/or the
125 * locks protecting the reverse maps are held. If the subsystem
126 * can't guarantee that no additional references are taken to
127 * the pages in the range, it has to implement the
128 * invalidate_range() notifier to remove any references taken
129 * after invalidate_range_start().
131 * Invalidation of multiple concurrent ranges may be
132 * optionally permitted by the driver. Either way the
133 * establishment of sptes is forbidden in the range passed to
134 * invalidate_range_begin/end for the whole duration of the
135 * invalidate_range_begin/end critical section.
137 * invalidate_range_start() is called when all pages in the
138 * range are still mapped and have at least a refcount of one.
140 * invalidate_range_end() is called when all pages in the
141 * range have been unmapped and the pages have been freed by
144 * The VM will remove the page table entries and potentially
145 * the page between invalidate_range_start() and
146 * invalidate_range_end(). If the page must not be freed
147 * because of pending I/O or other circumstances then the
148 * invalidate_range_start() callback (or the initial mapping
149 * by the driver) must make sure that the refcount is kept
152 * If the driver increases the refcount when the pages are
153 * initially mapped into an address space then either
154 * invalidate_range_start() or invalidate_range_end() may
155 * decrease the refcount. If the refcount is decreased on
156 * invalidate_range_start() then the VM can free pages as page
157 * table entries are removed. If the refcount is only
158 * droppped on invalidate_range_end() then the driver itself
159 * will drop the last refcount but it must take care to flush
160 * any secondary tlb before doing the final free on the
161 * page. Pages will no longer be referenced by the linux
162 * address space but may still be referenced by sptes until
163 * the last refcount is dropped.
165 * If blockable argument is set to false then the callback cannot
166 * sleep and has to return with -EAGAIN. 0 should be returned
167 * otherwise. Please note that if invalidate_range_start approves
168 * a non-blocking behavior then the same applies to
169 * invalidate_range_end.
172 int (*invalidate_range_start)(struct mmu_notifier *mn,
173 const struct mmu_notifier_range *range);
174 void (*invalidate_range_end)(struct mmu_notifier *mn,
175 const struct mmu_notifier_range *range);
178 * invalidate_range() is either called between
179 * invalidate_range_start() and invalidate_range_end() when the
180 * VM has to free pages that where unmapped, but before the
181 * pages are actually freed, or outside of _start()/_end() when
182 * a (remote) TLB is necessary.
184 * If invalidate_range() is used to manage a non-CPU TLB with
185 * shared page-tables, it not necessary to implement the
186 * invalidate_range_start()/end() notifiers, as
187 * invalidate_range() alread catches the points in time when an
188 * external TLB range needs to be flushed. For more in depth
189 * discussion on this see Documentation/vm/mmu_notifier.rst
191 * Note that this function might be called with just a sub-range
192 * of what was passed to invalidate_range_start()/end(), if
193 * called between those functions.
195 void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
196 unsigned long start, unsigned long end);
199 * These callbacks are used with the get/put interface to manage the
200 * lifetime of the mmu_notifier memory. alloc_notifier() returns a new
201 * notifier for use with the mm.
203 * free_notifier() is only called after the mmu_notifier has been
204 * fully put, calls to any ops callback are prevented and no ops
205 * callbacks are currently running. It is called from a SRCU callback
208 struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm);
209 void (*free_notifier)(struct mmu_notifier *mn);
213 * The notifier chains are protected by mmap_sem and/or the reverse map
214 * semaphores. Notifier chains are only changed when all reverse maps and
215 * the mmap_sem locks are taken.
217 * Therefore notifier chains can only be traversed when either
219 * 1. mmap_sem is held.
220 * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
221 * 3. No other concurrent thread can access the list (release)
223 struct mmu_notifier {
224 struct hlist_node hlist;
225 const struct mmu_notifier_ops *ops;
226 struct mm_struct *mm;
232 * struct mmu_interval_notifier_ops
233 * @invalidate: Upon return the caller must stop using any SPTEs within this
234 * range. This function can sleep. Return false only if sleeping
235 * was required but mmu_notifier_range_blockable(range) is false.
237 struct mmu_interval_notifier_ops {
238 bool (*invalidate)(struct mmu_interval_notifier *mni,
239 const struct mmu_notifier_range *range,
240 unsigned long cur_seq);
243 struct mmu_interval_notifier {
244 struct interval_tree_node interval_tree;
245 const struct mmu_interval_notifier_ops *ops;
246 struct mm_struct *mm;
247 struct hlist_node deferred_item;
248 unsigned long invalidate_seq;
251 #ifdef CONFIG_MMU_NOTIFIER
253 #ifdef CONFIG_LOCKDEP
254 extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
257 struct mmu_notifier_range {
258 struct vm_area_struct *vma;
259 struct mm_struct *mm;
263 enum mmu_notifier_event event;
266 static inline int mm_has_notifiers(struct mm_struct *mm)
268 return unlikely(mm->mmu_notifier_mm);
271 struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
272 struct mm_struct *mm);
273 static inline struct mmu_notifier *
274 mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
276 struct mmu_notifier *ret;
278 down_write(&mm->mmap_sem);
279 ret = mmu_notifier_get_locked(ops, mm);
280 up_write(&mm->mmap_sem);
283 void mmu_notifier_put(struct mmu_notifier *mn);
284 void mmu_notifier_synchronize(void);
286 extern int mmu_notifier_register(struct mmu_notifier *mn,
287 struct mm_struct *mm);
288 extern int __mmu_notifier_register(struct mmu_notifier *mn,
289 struct mm_struct *mm);
290 extern void mmu_notifier_unregister(struct mmu_notifier *mn,
291 struct mm_struct *mm);
293 unsigned long mmu_interval_read_begin(struct mmu_interval_notifier *mni);
294 int mmu_interval_notifier_insert(struct mmu_interval_notifier *mni,
295 struct mm_struct *mm, unsigned long start,
296 unsigned long length,
297 const struct mmu_interval_notifier_ops *ops);
298 int mmu_interval_notifier_insert_locked(
299 struct mmu_interval_notifier *mni, struct mm_struct *mm,
300 unsigned long start, unsigned long length,
301 const struct mmu_interval_notifier_ops *ops);
302 void mmu_interval_notifier_remove(struct mmu_interval_notifier *mni);
305 * mmu_interval_set_seq - Save the invalidation sequence
306 * @mni - The mni passed to invalidate
307 * @cur_seq - The cur_seq passed to the invalidate() callback
309 * This must be called unconditionally from the invalidate callback of a
310 * struct mmu_interval_notifier_ops under the same lock that is used to call
311 * mmu_interval_read_retry(). It updates the sequence number for later use by
312 * mmu_interval_read_retry(). The provided cur_seq will always be odd.
314 * If the caller does not call mmu_interval_read_begin() or
315 * mmu_interval_read_retry() then this call is not required.
317 static inline void mmu_interval_set_seq(struct mmu_interval_notifier *mni,
318 unsigned long cur_seq)
320 WRITE_ONCE(mni->invalidate_seq, cur_seq);
324 * mmu_interval_read_retry - End a read side critical section against a VA range
326 * seq: The return of the paired mmu_interval_read_begin()
328 * This MUST be called under a user provided lock that is also held
329 * unconditionally by op->invalidate() when it calls mmu_interval_set_seq().
331 * Each call should be paired with a single mmu_interval_read_begin() and
332 * should be used to conclude the read side.
334 * Returns true if an invalidation collided with this critical section, and
335 * the caller should retry.
337 static inline bool mmu_interval_read_retry(struct mmu_interval_notifier *mni,
340 return mni->invalidate_seq != seq;
344 * mmu_interval_check_retry - Test if a collision has occurred
346 * seq: The return of the matching mmu_interval_read_begin()
348 * This can be used in the critical section between mmu_interval_read_begin()
349 * and mmu_interval_read_retry(). A return of true indicates an invalidation
350 * has collided with this critical region and a future
351 * mmu_interval_read_retry() will return true.
353 * False is not reliable and only suggests a collision may not have
354 * occured. It can be called many times and does not have to hold the user
357 * This call can be used as part of loops and other expensive operations to
360 static inline bool mmu_interval_check_retry(struct mmu_interval_notifier *mni,
363 /* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
364 return READ_ONCE(mni->invalidate_seq) != seq;
367 extern void __mmu_notifier_mm_destroy(struct mm_struct *mm);
368 extern void __mmu_notifier_release(struct mm_struct *mm);
369 extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
372 extern int __mmu_notifier_clear_young(struct mm_struct *mm,
375 extern int __mmu_notifier_test_young(struct mm_struct *mm,
376 unsigned long address);
377 extern void __mmu_notifier_change_pte(struct mm_struct *mm,
378 unsigned long address, pte_t pte);
379 extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
380 extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r,
382 extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
383 unsigned long start, unsigned long end);
385 mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range);
388 mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
390 return (range->flags & MMU_NOTIFIER_RANGE_BLOCKABLE);
393 static inline void mmu_notifier_release(struct mm_struct *mm)
395 if (mm_has_notifiers(mm))
396 __mmu_notifier_release(mm);
399 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
403 if (mm_has_notifiers(mm))
404 return __mmu_notifier_clear_flush_young(mm, start, end);
408 static inline int mmu_notifier_clear_young(struct mm_struct *mm,
412 if (mm_has_notifiers(mm))
413 return __mmu_notifier_clear_young(mm, start, end);
417 static inline int mmu_notifier_test_young(struct mm_struct *mm,
418 unsigned long address)
420 if (mm_has_notifiers(mm))
421 return __mmu_notifier_test_young(mm, address);
425 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
426 unsigned long address, pte_t pte)
428 if (mm_has_notifiers(mm))
429 __mmu_notifier_change_pte(mm, address, pte);
433 mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
437 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
438 if (mm_has_notifiers(range->mm)) {
439 range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE;
440 __mmu_notifier_invalidate_range_start(range);
442 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
446 mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
450 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
451 if (mm_has_notifiers(range->mm)) {
452 range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE;
453 ret = __mmu_notifier_invalidate_range_start(range);
455 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
460 mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
462 if (mmu_notifier_range_blockable(range))
465 if (mm_has_notifiers(range->mm))
466 __mmu_notifier_invalidate_range_end(range, false);
470 mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
472 if (mm_has_notifiers(range->mm))
473 __mmu_notifier_invalidate_range_end(range, true);
476 static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
477 unsigned long start, unsigned long end)
479 if (mm_has_notifiers(mm))
480 __mmu_notifier_invalidate_range(mm, start, end);
483 static inline void mmu_notifier_mm_init(struct mm_struct *mm)
485 mm->mmu_notifier_mm = NULL;
488 static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
490 if (mm_has_notifiers(mm))
491 __mmu_notifier_mm_destroy(mm);
495 static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
496 enum mmu_notifier_event event,
498 struct vm_area_struct *vma,
499 struct mm_struct *mm,
504 range->event = event;
506 range->start = start;
508 range->flags = flags;
511 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
514 struct vm_area_struct *___vma = __vma; \
515 unsigned long ___address = __address; \
516 __young = ptep_clear_flush_young(___vma, ___address, __ptep); \
517 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
524 #define pmdp_clear_flush_young_notify(__vma, __address, __pmdp) \
527 struct vm_area_struct *___vma = __vma; \
528 unsigned long ___address = __address; \
529 __young = pmdp_clear_flush_young(___vma, ___address, __pmdp); \
530 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
537 #define ptep_clear_young_notify(__vma, __address, __ptep) \
540 struct vm_area_struct *___vma = __vma; \
541 unsigned long ___address = __address; \
542 __young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
543 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
544 ___address + PAGE_SIZE); \
548 #define pmdp_clear_young_notify(__vma, __address, __pmdp) \
551 struct vm_area_struct *___vma = __vma; \
552 unsigned long ___address = __address; \
553 __young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
554 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
555 ___address + PMD_SIZE); \
559 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
561 unsigned long ___addr = __address & PAGE_MASK; \
562 struct mm_struct *___mm = (__vma)->vm_mm; \
565 ___pte = ptep_clear_flush(__vma, __address, __ptep); \
566 mmu_notifier_invalidate_range(___mm, ___addr, \
567 ___addr + PAGE_SIZE); \
572 #define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd) \
574 unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
575 struct mm_struct *___mm = (__vma)->vm_mm; \
578 ___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd); \
579 mmu_notifier_invalidate_range(___mm, ___haddr, \
580 ___haddr + HPAGE_PMD_SIZE); \
585 #define pudp_huge_clear_flush_notify(__vma, __haddr, __pud) \
587 unsigned long ___haddr = __haddr & HPAGE_PUD_MASK; \
588 struct mm_struct *___mm = (__vma)->vm_mm; \
591 ___pud = pudp_huge_clear_flush(__vma, __haddr, __pud); \
592 mmu_notifier_invalidate_range(___mm, ___haddr, \
593 ___haddr + HPAGE_PUD_SIZE); \
599 * set_pte_at_notify() sets the pte _after_ running the notifier.
600 * This is safe to start by updating the secondary MMUs, because the primary MMU
601 * pte invalidate must have already happened with a ptep_clear_flush() before
602 * set_pte_at_notify() has been invoked. Updating the secondary MMUs first is
603 * required when we change both the protection of the mapping from read-only to
604 * read-write and the pfn (like during copy on write page faults). Otherwise the
605 * old page would remain mapped readonly in the secondary MMUs after the new
606 * page is already writable by some CPU through the primary MMU.
608 #define set_pte_at_notify(__mm, __address, __ptep, __pte) \
610 struct mm_struct *___mm = __mm; \
611 unsigned long ___address = __address; \
612 pte_t ___pte = __pte; \
614 mmu_notifier_change_pte(___mm, ___address, ___pte); \
615 set_pte_at(___mm, ___address, __ptep, ___pte); \
618 #else /* CONFIG_MMU_NOTIFIER */
620 struct mmu_notifier_range {
625 static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
629 range->start = start;
633 #define mmu_notifier_range_init(range,event,flags,vma,mm,start,end) \
634 _mmu_notifier_range_init(range, start, end)
637 mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
642 static inline int mm_has_notifiers(struct mm_struct *mm)
647 static inline void mmu_notifier_release(struct mm_struct *mm)
651 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
658 static inline int mmu_notifier_test_young(struct mm_struct *mm,
659 unsigned long address)
664 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
665 unsigned long address, pte_t pte)
670 mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
675 mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
681 void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
686 mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
690 static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
691 unsigned long start, unsigned long end)
695 static inline void mmu_notifier_mm_init(struct mm_struct *mm)
699 static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
703 #define mmu_notifier_range_update_to_read_only(r) false
705 #define ptep_clear_flush_young_notify ptep_clear_flush_young
706 #define pmdp_clear_flush_young_notify pmdp_clear_flush_young
707 #define ptep_clear_young_notify ptep_test_and_clear_young
708 #define pmdp_clear_young_notify pmdp_test_and_clear_young
709 #define ptep_clear_flush_notify ptep_clear_flush
710 #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
711 #define pudp_huge_clear_flush_notify pudp_huge_clear_flush
712 #define set_pte_at_notify set_pte_at
714 static inline void mmu_notifier_synchronize(void)
718 #endif /* CONFIG_MMU_NOTIFIER */
720 #endif /* _LINUX_MMU_NOTIFIER_H */