1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MMU_NOTIFIER_H
3 #define _LINUX_MMU_NOTIFIER_H
5 #include <linux/list.h>
6 #include <linux/spinlock.h>
7 #include <linux/mm_types.h>
8 #include <linux/srcu.h>
9 #include <linux/interval_tree.h>
11 struct mmu_notifier_subscriptions;
13 struct mmu_notifier_range;
14 struct mmu_interval_notifier;
17 * enum mmu_notifier_event - reason for the mmu notifier callback
18 * @MMU_NOTIFY_UNMAP: either munmap() that unmap the range or a mremap() that
21 * @MMU_NOTIFY_CLEAR: clear page table entry (many reasons for this like
22 * madvise() or replacing a page by another one, ...).
24 * @MMU_NOTIFY_PROTECTION_VMA: update is due to protection change for the range
25 * ie using the vma access permission (vm_page_prot) to update the whole range
26 * is enough no need to inspect changes to the CPU page table (mprotect()
29 * @MMU_NOTIFY_PROTECTION_PAGE: update is due to change in read/write flag for
30 * pages in the range so to mirror those changes the user must inspect the CPU
31 * page table (from the end callback).
33 * @MMU_NOTIFY_SOFT_DIRTY: soft dirty accounting (still same page and same
34 * access flags). User should soft dirty the page in the end callback to make
35 * sure that anyone relying on soft dirtyness catch pages that might be written
36 * through non CPU mappings.
38 * @MMU_NOTIFY_RELEASE: used during mmu_interval_notifier invalidate to signal
39 * that the mm refcount is zero and the range is no longer accessible.
41 enum mmu_notifier_event {
44 MMU_NOTIFY_PROTECTION_VMA,
45 MMU_NOTIFY_PROTECTION_PAGE,
46 MMU_NOTIFY_SOFT_DIRTY,
50 #define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
52 struct mmu_notifier_ops {
54 * Called either by mmu_notifier_unregister or when the mm is
55 * being destroyed by exit_mmap, always before all pages are
56 * freed. This can run concurrently with other mmu notifier
57 * methods (the ones invoked outside the mm context) and it
58 * should tear down all secondary mmu mappings and freeze the
59 * secondary mmu. If this method isn't implemented you've to
60 * be sure that nothing could possibly write to the pages
61 * through the secondary mmu by the time the last thread with
62 * tsk->mm == mm exits.
64 * As side note: the pages freed after ->release returns could
65 * be immediately reallocated by the gart at an alias physical
66 * address with a different cache model, so if ->release isn't
67 * implemented because all _software_ driven memory accesses
68 * through the secondary mmu are terminated by the time the
69 * last thread of this mm quits, you've also to be sure that
70 * speculative _hardware_ operations can't allocate dirty
71 * cachelines in the cpu that could not be snooped and made
72 * coherent with the other read and write operations happening
73 * through the gart alias address, so leading to memory
76 void (*release)(struct mmu_notifier *subscription,
77 struct mm_struct *mm);
80 * clear_flush_young is called after the VM is
81 * test-and-clearing the young/accessed bitflag in the
82 * pte. This way the VM will provide proper aging to the
83 * accesses to the page through the secondary MMUs and not
84 * only to the ones through the Linux pte.
85 * Start-end is necessary in case the secondary MMU is mapping the page
86 * at a smaller granularity than the primary MMU.
88 int (*clear_flush_young)(struct mmu_notifier *subscription,
94 * clear_young is a lightweight version of clear_flush_young. Like the
95 * latter, it is supposed to test-and-clear the young/accessed bitflag
96 * in the secondary pte, but it may omit flushing the secondary tlb.
98 int (*clear_young)(struct mmu_notifier *subscription,
104 * test_young is called to check the young/accessed bitflag in
105 * the secondary pte. This is used to know if the page is
106 * frequently used without actually clearing the flag or tearing
107 * down the secondary mapping on the page.
109 int (*test_young)(struct mmu_notifier *subscription,
110 struct mm_struct *mm,
111 unsigned long address);
114 * change_pte is called in cases that pte mapping to page is changed:
115 * for example, when ksm remaps pte to point to a new shared page.
117 void (*change_pte)(struct mmu_notifier *subscription,
118 struct mm_struct *mm,
119 unsigned long address,
123 * invalidate_range_start() and invalidate_range_end() must be
124 * paired and are called only when the mmap_sem and/or the
125 * locks protecting the reverse maps are held. If the subsystem
126 * can't guarantee that no additional references are taken to
127 * the pages in the range, it has to implement the
128 * invalidate_range() notifier to remove any references taken
129 * after invalidate_range_start().
131 * Invalidation of multiple concurrent ranges may be
132 * optionally permitted by the driver. Either way the
133 * establishment of sptes is forbidden in the range passed to
134 * invalidate_range_begin/end for the whole duration of the
135 * invalidate_range_begin/end critical section.
137 * invalidate_range_start() is called when all pages in the
138 * range are still mapped and have at least a refcount of one.
140 * invalidate_range_end() is called when all pages in the
141 * range have been unmapped and the pages have been freed by
144 * The VM will remove the page table entries and potentially
145 * the page between invalidate_range_start() and
146 * invalidate_range_end(). If the page must not be freed
147 * because of pending I/O or other circumstances then the
148 * invalidate_range_start() callback (or the initial mapping
149 * by the driver) must make sure that the refcount is kept
152 * If the driver increases the refcount when the pages are
153 * initially mapped into an address space then either
154 * invalidate_range_start() or invalidate_range_end() may
155 * decrease the refcount. If the refcount is decreased on
156 * invalidate_range_start() then the VM can free pages as page
157 * table entries are removed. If the refcount is only
158 * droppped on invalidate_range_end() then the driver itself
159 * will drop the last refcount but it must take care to flush
160 * any secondary tlb before doing the final free on the
161 * page. Pages will no longer be referenced by the linux
162 * address space but may still be referenced by sptes until
163 * the last refcount is dropped.
165 * If blockable argument is set to false then the callback cannot
166 * sleep and has to return with -EAGAIN. 0 should be returned
167 * otherwise. Please note that if invalidate_range_start approves
168 * a non-blocking behavior then the same applies to
169 * invalidate_range_end.
172 int (*invalidate_range_start)(struct mmu_notifier *subscription,
173 const struct mmu_notifier_range *range);
174 void (*invalidate_range_end)(struct mmu_notifier *subscription,
175 const struct mmu_notifier_range *range);
178 * invalidate_range() is either called between
179 * invalidate_range_start() and invalidate_range_end() when the
180 * VM has to free pages that where unmapped, but before the
181 * pages are actually freed, or outside of _start()/_end() when
182 * a (remote) TLB is necessary.
184 * If invalidate_range() is used to manage a non-CPU TLB with
185 * shared page-tables, it not necessary to implement the
186 * invalidate_range_start()/end() notifiers, as
187 * invalidate_range() alread catches the points in time when an
188 * external TLB range needs to be flushed. For more in depth
189 * discussion on this see Documentation/vm/mmu_notifier.rst
191 * Note that this function might be called with just a sub-range
192 * of what was passed to invalidate_range_start()/end(), if
193 * called between those functions.
195 void (*invalidate_range)(struct mmu_notifier *subscription,
196 struct mm_struct *mm,
201 * These callbacks are used with the get/put interface to manage the
202 * lifetime of the mmu_notifier memory. alloc_notifier() returns a new
203 * notifier for use with the mm.
205 * free_notifier() is only called after the mmu_notifier has been
206 * fully put, calls to any ops callback are prevented and no ops
207 * callbacks are currently running. It is called from a SRCU callback
210 struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm);
211 void (*free_notifier)(struct mmu_notifier *subscription);
215 * The notifier chains are protected by mmap_sem and/or the reverse map
216 * semaphores. Notifier chains are only changed when all reverse maps and
217 * the mmap_sem locks are taken.
219 * Therefore notifier chains can only be traversed when either
221 * 1. mmap_sem is held.
222 * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
223 * 3. No other concurrent thread can access the list (release)
225 struct mmu_notifier {
226 struct hlist_node hlist;
227 const struct mmu_notifier_ops *ops;
228 struct mm_struct *mm;
234 * struct mmu_interval_notifier_ops
235 * @invalidate: Upon return the caller must stop using any SPTEs within this
236 * range. This function can sleep. Return false only if sleeping
237 * was required but mmu_notifier_range_blockable(range) is false.
239 struct mmu_interval_notifier_ops {
240 bool (*invalidate)(struct mmu_interval_notifier *interval_sub,
241 const struct mmu_notifier_range *range,
242 unsigned long cur_seq);
245 struct mmu_interval_notifier {
246 struct interval_tree_node interval_tree;
247 const struct mmu_interval_notifier_ops *ops;
248 struct mm_struct *mm;
249 struct hlist_node deferred_item;
250 unsigned long invalidate_seq;
253 #ifdef CONFIG_MMU_NOTIFIER
255 #ifdef CONFIG_LOCKDEP
256 extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
259 struct mmu_notifier_range {
260 struct vm_area_struct *vma;
261 struct mm_struct *mm;
265 enum mmu_notifier_event event;
268 static inline int mm_has_notifiers(struct mm_struct *mm)
270 return unlikely(mm->notifier_subscriptions);
273 struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
274 struct mm_struct *mm);
275 static inline struct mmu_notifier *
276 mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
278 struct mmu_notifier *ret;
280 down_write(&mm->mmap_sem);
281 ret = mmu_notifier_get_locked(ops, mm);
282 up_write(&mm->mmap_sem);
285 void mmu_notifier_put(struct mmu_notifier *subscription);
286 void mmu_notifier_synchronize(void);
288 extern int mmu_notifier_register(struct mmu_notifier *subscription,
289 struct mm_struct *mm);
290 extern int __mmu_notifier_register(struct mmu_notifier *subscription,
291 struct mm_struct *mm);
292 extern void mmu_notifier_unregister(struct mmu_notifier *subscription,
293 struct mm_struct *mm);
296 mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub);
297 int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
298 struct mm_struct *mm, unsigned long start,
299 unsigned long length,
300 const struct mmu_interval_notifier_ops *ops);
301 int mmu_interval_notifier_insert_locked(
302 struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
303 unsigned long start, unsigned long length,
304 const struct mmu_interval_notifier_ops *ops);
305 void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub);
308 * mmu_interval_set_seq - Save the invalidation sequence
309 * @interval_sub - The subscription passed to invalidate
310 * @cur_seq - The cur_seq passed to the invalidate() callback
312 * This must be called unconditionally from the invalidate callback of a
313 * struct mmu_interval_notifier_ops under the same lock that is used to call
314 * mmu_interval_read_retry(). It updates the sequence number for later use by
315 * mmu_interval_read_retry(). The provided cur_seq will always be odd.
317 * If the caller does not call mmu_interval_read_begin() or
318 * mmu_interval_read_retry() then this call is not required.
321 mmu_interval_set_seq(struct mmu_interval_notifier *interval_sub,
322 unsigned long cur_seq)
324 WRITE_ONCE(interval_sub->invalidate_seq, cur_seq);
328 * mmu_interval_read_retry - End a read side critical section against a VA range
329 * interval_sub: The subscription
330 * seq: The return of the paired mmu_interval_read_begin()
332 * This MUST be called under a user provided lock that is also held
333 * unconditionally by op->invalidate() when it calls mmu_interval_set_seq().
335 * Each call should be paired with a single mmu_interval_read_begin() and
336 * should be used to conclude the read side.
338 * Returns true if an invalidation collided with this critical section, and
339 * the caller should retry.
342 mmu_interval_read_retry(struct mmu_interval_notifier *interval_sub,
345 return interval_sub->invalidate_seq != seq;
349 * mmu_interval_check_retry - Test if a collision has occurred
350 * interval_sub: The subscription
351 * seq: The return of the matching mmu_interval_read_begin()
353 * This can be used in the critical section between mmu_interval_read_begin()
354 * and mmu_interval_read_retry(). A return of true indicates an invalidation
355 * has collided with this critical region and a future
356 * mmu_interval_read_retry() will return true.
358 * False is not reliable and only suggests a collision may not have
359 * occured. It can be called many times and does not have to hold the user
362 * This call can be used as part of loops and other expensive operations to
366 mmu_interval_check_retry(struct mmu_interval_notifier *interval_sub,
369 /* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
370 return READ_ONCE(interval_sub->invalidate_seq) != seq;
373 extern void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm);
374 extern void __mmu_notifier_release(struct mm_struct *mm);
375 extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
378 extern int __mmu_notifier_clear_young(struct mm_struct *mm,
381 extern int __mmu_notifier_test_young(struct mm_struct *mm,
382 unsigned long address);
383 extern void __mmu_notifier_change_pte(struct mm_struct *mm,
384 unsigned long address, pte_t pte);
385 extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
386 extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r,
388 extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
389 unsigned long start, unsigned long end);
391 mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range);
394 mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
396 return (range->flags & MMU_NOTIFIER_RANGE_BLOCKABLE);
399 static inline void mmu_notifier_release(struct mm_struct *mm)
401 if (mm_has_notifiers(mm))
402 __mmu_notifier_release(mm);
405 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
409 if (mm_has_notifiers(mm))
410 return __mmu_notifier_clear_flush_young(mm, start, end);
414 static inline int mmu_notifier_clear_young(struct mm_struct *mm,
418 if (mm_has_notifiers(mm))
419 return __mmu_notifier_clear_young(mm, start, end);
423 static inline int mmu_notifier_test_young(struct mm_struct *mm,
424 unsigned long address)
426 if (mm_has_notifiers(mm))
427 return __mmu_notifier_test_young(mm, address);
431 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
432 unsigned long address, pte_t pte)
434 if (mm_has_notifiers(mm))
435 __mmu_notifier_change_pte(mm, address, pte);
439 mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
443 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
444 if (mm_has_notifiers(range->mm)) {
445 range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE;
446 __mmu_notifier_invalidate_range_start(range);
448 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
452 mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
456 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
457 if (mm_has_notifiers(range->mm)) {
458 range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE;
459 ret = __mmu_notifier_invalidate_range_start(range);
461 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
466 mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
468 if (mmu_notifier_range_blockable(range))
471 if (mm_has_notifiers(range->mm))
472 __mmu_notifier_invalidate_range_end(range, false);
476 mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
478 if (mm_has_notifiers(range->mm))
479 __mmu_notifier_invalidate_range_end(range, true);
482 static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
483 unsigned long start, unsigned long end)
485 if (mm_has_notifiers(mm))
486 __mmu_notifier_invalidate_range(mm, start, end);
489 static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
491 mm->notifier_subscriptions = NULL;
494 static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
496 if (mm_has_notifiers(mm))
497 __mmu_notifier_subscriptions_destroy(mm);
501 static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
502 enum mmu_notifier_event event,
504 struct vm_area_struct *vma,
505 struct mm_struct *mm,
510 range->event = event;
512 range->start = start;
514 range->flags = flags;
517 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
520 struct vm_area_struct *___vma = __vma; \
521 unsigned long ___address = __address; \
522 __young = ptep_clear_flush_young(___vma, ___address, __ptep); \
523 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
530 #define pmdp_clear_flush_young_notify(__vma, __address, __pmdp) \
533 struct vm_area_struct *___vma = __vma; \
534 unsigned long ___address = __address; \
535 __young = pmdp_clear_flush_young(___vma, ___address, __pmdp); \
536 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
543 #define ptep_clear_young_notify(__vma, __address, __ptep) \
546 struct vm_area_struct *___vma = __vma; \
547 unsigned long ___address = __address; \
548 __young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
549 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
550 ___address + PAGE_SIZE); \
554 #define pmdp_clear_young_notify(__vma, __address, __pmdp) \
557 struct vm_area_struct *___vma = __vma; \
558 unsigned long ___address = __address; \
559 __young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
560 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
561 ___address + PMD_SIZE); \
565 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
567 unsigned long ___addr = __address & PAGE_MASK; \
568 struct mm_struct *___mm = (__vma)->vm_mm; \
571 ___pte = ptep_clear_flush(__vma, __address, __ptep); \
572 mmu_notifier_invalidate_range(___mm, ___addr, \
573 ___addr + PAGE_SIZE); \
578 #define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd) \
580 unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
581 struct mm_struct *___mm = (__vma)->vm_mm; \
584 ___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd); \
585 mmu_notifier_invalidate_range(___mm, ___haddr, \
586 ___haddr + HPAGE_PMD_SIZE); \
591 #define pudp_huge_clear_flush_notify(__vma, __haddr, __pud) \
593 unsigned long ___haddr = __haddr & HPAGE_PUD_MASK; \
594 struct mm_struct *___mm = (__vma)->vm_mm; \
597 ___pud = pudp_huge_clear_flush(__vma, __haddr, __pud); \
598 mmu_notifier_invalidate_range(___mm, ___haddr, \
599 ___haddr + HPAGE_PUD_SIZE); \
605 * set_pte_at_notify() sets the pte _after_ running the notifier.
606 * This is safe to start by updating the secondary MMUs, because the primary MMU
607 * pte invalidate must have already happened with a ptep_clear_flush() before
608 * set_pte_at_notify() has been invoked. Updating the secondary MMUs first is
609 * required when we change both the protection of the mapping from read-only to
610 * read-write and the pfn (like during copy on write page faults). Otherwise the
611 * old page would remain mapped readonly in the secondary MMUs after the new
612 * page is already writable by some CPU through the primary MMU.
614 #define set_pte_at_notify(__mm, __address, __ptep, __pte) \
616 struct mm_struct *___mm = __mm; \
617 unsigned long ___address = __address; \
618 pte_t ___pte = __pte; \
620 mmu_notifier_change_pte(___mm, ___address, ___pte); \
621 set_pte_at(___mm, ___address, __ptep, ___pte); \
624 #else /* CONFIG_MMU_NOTIFIER */
626 struct mmu_notifier_range {
631 static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
635 range->start = start;
639 #define mmu_notifier_range_init(range,event,flags,vma,mm,start,end) \
640 _mmu_notifier_range_init(range, start, end)
643 mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
648 static inline int mm_has_notifiers(struct mm_struct *mm)
653 static inline void mmu_notifier_release(struct mm_struct *mm)
657 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
664 static inline int mmu_notifier_test_young(struct mm_struct *mm,
665 unsigned long address)
670 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
671 unsigned long address, pte_t pte)
676 mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
681 mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
687 void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
692 mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
696 static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
697 unsigned long start, unsigned long end)
701 static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
705 static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
709 #define mmu_notifier_range_update_to_read_only(r) false
711 #define ptep_clear_flush_young_notify ptep_clear_flush_young
712 #define pmdp_clear_flush_young_notify pmdp_clear_flush_young
713 #define ptep_clear_young_notify ptep_test_and_clear_young
714 #define pmdp_clear_young_notify pmdp_test_and_clear_young
715 #define ptep_clear_flush_notify ptep_clear_flush
716 #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
717 #define pudp_huge_clear_flush_notify pudp_huge_clear_flush
718 #define set_pte_at_notify set_pte_at
720 static inline void mmu_notifier_synchronize(void)
724 #endif /* CONFIG_MMU_NOTIFIER */
726 #endif /* _LINUX_MMU_NOTIFIER_H */