1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/pagewalk.h>
3 #include <linux/highmem.h>
4 #include <linux/sched.h>
5 #include <linux/hugetlb.h>
7 static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
12 const struct mm_walk_ops *ops = walk->ops;
15 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
17 err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
26 pte_unmap_unlock(pte, ptl);
30 static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
35 const struct mm_walk_ops *ops = walk->ops;
38 pmd = pmd_offset(pud, addr);
41 next = pmd_addr_end(addr, end);
42 if (pmd_none(*pmd) || !walk->vma) {
44 err = ops->pte_hole(addr, next, walk);
50 * This implies that each ->pmd_entry() handler
51 * needs to know about pmd_trans_huge() pmds
54 err = ops->pmd_entry(pmd, addr, next, walk);
59 * Check this here so we only break down trans_huge
60 * pages when we _need_ to
65 split_huge_pmd(walk->vma, pmd, addr);
66 if (pmd_trans_unstable(pmd))
68 err = walk_pte_range(pmd, addr, next, walk);
71 } while (pmd++, addr = next, addr != end);
76 static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
81 const struct mm_walk_ops *ops = walk->ops;
84 pud = pud_offset(p4d, addr);
87 next = pud_addr_end(addr, end);
88 if (pud_none(*pud) || !walk->vma) {
90 err = ops->pte_hole(addr, next, walk);
97 spinlock_t *ptl = pud_trans_huge_lock(pud, walk->vma);
100 err = ops->pud_entry(pud, addr, next, walk);
108 split_huge_pud(walk->vma, pud, addr);
112 if (ops->pmd_entry || ops->pte_entry)
113 err = walk_pmd_range(pud, addr, next, walk);
116 } while (pud++, addr = next, addr != end);
121 static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
122 struct mm_walk *walk)
126 const struct mm_walk_ops *ops = walk->ops;
129 p4d = p4d_offset(pgd, addr);
131 next = p4d_addr_end(addr, end);
132 if (p4d_none_or_clear_bad(p4d)) {
134 err = ops->pte_hole(addr, next, walk);
139 if (ops->pmd_entry || ops->pte_entry)
140 err = walk_pud_range(p4d, addr, next, walk);
143 } while (p4d++, addr = next, addr != end);
148 static int walk_pgd_range(unsigned long addr, unsigned long end,
149 struct mm_walk *walk)
153 const struct mm_walk_ops *ops = walk->ops;
156 pgd = pgd_offset(walk->mm, addr);
158 next = pgd_addr_end(addr, end);
159 if (pgd_none_or_clear_bad(pgd)) {
161 err = ops->pte_hole(addr, next, walk);
166 if (ops->pmd_entry || ops->pte_entry)
167 err = walk_p4d_range(pgd, addr, next, walk);
170 } while (pgd++, addr = next, addr != end);
175 #ifdef CONFIG_HUGETLB_PAGE
176 static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
179 unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
180 return boundary < end ? boundary : end;
183 static int walk_hugetlb_range(unsigned long addr, unsigned long end,
184 struct mm_walk *walk)
186 struct vm_area_struct *vma = walk->vma;
187 struct hstate *h = hstate_vma(vma);
189 unsigned long hmask = huge_page_mask(h);
190 unsigned long sz = huge_page_size(h);
192 const struct mm_walk_ops *ops = walk->ops;
196 next = hugetlb_entry_end(h, addr, end);
197 pte = huge_pte_offset(walk->mm, addr & hmask, sz);
200 err = ops->hugetlb_entry(pte, hmask, addr, next, walk);
201 else if (ops->pte_hole)
202 err = ops->pte_hole(addr, next, walk);
206 } while (addr = next, addr != end);
211 #else /* CONFIG_HUGETLB_PAGE */
212 static int walk_hugetlb_range(unsigned long addr, unsigned long end,
213 struct mm_walk *walk)
218 #endif /* CONFIG_HUGETLB_PAGE */
221 * Decide whether we really walk over the current vma on [@start, @end)
222 * or skip it via the returned value. Return 0 if we do walk over the
223 * current vma, and return 1 if we skip the vma. Negative values means
224 * error, where we abort the current walk.
226 static int walk_page_test(unsigned long start, unsigned long end,
227 struct mm_walk *walk)
229 struct vm_area_struct *vma = walk->vma;
230 const struct mm_walk_ops *ops = walk->ops;
233 return ops->test_walk(start, end, walk);
236 * vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP
237 * range, so we don't walk over it as we do for normal vmas. However,
238 * Some callers are interested in handling hole range and they don't
239 * want to just ignore any single address range. Such users certainly
240 * define their ->pte_hole() callbacks, so let's delegate them to handle
243 if (vma->vm_flags & VM_PFNMAP) {
246 err = ops->pte_hole(start, end, walk);
247 return err ? err : 1;
252 static int __walk_page_range(unsigned long start, unsigned long end,
253 struct mm_walk *walk)
256 struct vm_area_struct *vma = walk->vma;
257 const struct mm_walk_ops *ops = walk->ops;
259 if (vma && ops->pre_vma) {
260 err = ops->pre_vma(start, end, walk);
265 if (vma && is_vm_hugetlb_page(vma)) {
266 if (ops->hugetlb_entry)
267 err = walk_hugetlb_range(start, end, walk);
269 err = walk_pgd_range(start, end, walk);
271 if (vma && ops->post_vma)
278 * walk_page_range - walk page table with caller specific callbacks
279 * @mm: mm_struct representing the target process of page table walk
280 * @start: start address of the virtual address range
281 * @end: end address of the virtual address range
282 * @ops: operation to call during the walk
283 * @private: private data for callbacks' usage
285 * Recursively walk the page table tree of the process represented by @mm
286 * within the virtual address range [@start, @end). During walking, we can do
287 * some caller-specific works for each entry, by setting up pmd_entry(),
288 * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these
289 * callbacks, the associated entries/pages are just ignored.
290 * The return values of these callbacks are commonly defined like below:
292 * - 0 : succeeded to handle the current entry, and if you don't reach the
293 * end address yet, continue to walk.
294 * - >0 : succeeded to handle the current entry, and return to the caller
295 * with caller specific value.
296 * - <0 : failed to handle the current entry, and return to the caller
299 * Before starting to walk page table, some callers want to check whether
300 * they really want to walk over the current vma, typically by checking
301 * its vm_flags. walk_page_test() and @ops->test_walk() are used for this
304 * If operations need to be staged before and committed after a vma is walked,
305 * there are two callbacks, pre_vma() and post_vma(). Note that post_vma(),
306 * since it is intended to handle commit-type operations, can't return any
309 * struct mm_walk keeps current values of some common data like vma and pmd,
310 * which are useful for the access from callbacks. If you want to pass some
311 * caller-specific data to callbacks, @private should be helpful.
314 * Callers of walk_page_range() and walk_page_vma() should hold @mm->mmap_sem,
315 * because these function traverse vma list and/or access to vma's data.
317 int walk_page_range(struct mm_struct *mm, unsigned long start,
318 unsigned long end, const struct mm_walk_ops *ops,
323 struct vm_area_struct *vma;
324 struct mm_walk walk = {
336 lockdep_assert_held(&walk.mm->mmap_sem);
338 vma = find_vma(walk.mm, start);
340 if (!vma) { /* after the last vma */
343 } else if (start < vma->vm_start) { /* outside vma */
345 next = min(end, vma->vm_start);
346 } else { /* inside vma */
348 next = min(end, vma->vm_end);
351 err = walk_page_test(start, next, &walk);
354 * positive return values are purely for
355 * controlling the pagewalk, so should never
356 * be passed to the callers.
364 if (walk.vma || walk.ops->pte_hole)
365 err = __walk_page_range(start, next, &walk);
368 } while (start = next, start < end);
372 int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
375 struct mm_walk walk = {
386 lockdep_assert_held(&walk.mm->mmap_sem);
388 err = walk_page_test(vma->vm_start, vma->vm_end, &walk);
393 return __walk_page_range(vma->vm_start, vma->vm_end, &walk);
397 * walk_page_mapping - walk all memory areas mapped into a struct address_space.
398 * @mapping: Pointer to the struct address_space
399 * @first_index: First page offset in the address_space
400 * @nr: Number of incremental page offsets to cover
401 * @ops: operation to call during the walk
402 * @private: private data for callbacks' usage
404 * This function walks all memory areas mapped into a struct address_space.
405 * The walk is limited to only the given page-size index range, but if
406 * the index boundaries cross a huge page-table entry, that entry will be
409 * Also see walk_page_range() for additional information.
412 * This function can't require that the struct mm_struct::mmap_sem is held,
413 * since @mapping may be mapped by multiple processes. Instead
414 * @mapping->i_mmap_rwsem must be held. This might have implications in the
415 * callbacks, and it's up tho the caller to ensure that the
416 * struct mm_struct::mmap_sem is not needed.
418 * Also this means that a caller can't rely on the struct
419 * vm_area_struct::vm_flags to be constant across a call,
420 * except for immutable flags. Callers requiring this shouldn't use
423 * Return: 0 on success, negative error code on failure, positive number on
424 * caller defined premature termination.
426 int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
427 pgoff_t nr, const struct mm_walk_ops *ops,
430 struct mm_walk walk = {
434 struct vm_area_struct *vma;
435 pgoff_t vba, vea, cba, cea;
436 unsigned long start_addr, end_addr;
439 lockdep_assert_held(&mapping->i_mmap_rwsem);
440 vma_interval_tree_foreach(vma, &mapping->i_mmap, first_index,
441 first_index + nr - 1) {
442 /* Clip to the vma */
444 vea = vba + vma_pages(vma);
447 cea = first_index + nr;
450 start_addr = ((cba - vba) << PAGE_SHIFT) + vma->vm_start;
451 end_addr = ((cea - vba) << PAGE_SHIFT) + vma->vm_start;
452 if (start_addr >= end_addr)
456 walk.mm = vma->vm_mm;
458 err = walk_page_test(vma->vm_start, vma->vm_end, &walk);
465 err = __walk_page_range(start_addr, end_addr, &walk);