2 #include <linux/rmap.h>
3 #include <linux/hugetlb.h>
4 #include <linux/swap.h>
5 #include <linux/swapops.h>
9 static inline bool not_found(struct page_vma_mapped_walk *pvmw)
11 page_vma_mapped_walk_done(pvmw);
15 static bool map_pte(struct page_vma_mapped_walk *pvmw)
17 pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
18 if (!(pvmw->flags & PVMW_SYNC)) {
19 if (pvmw->flags & PVMW_MIGRATION) {
20 if (!is_swap_pte(*pvmw->pte))
23 if (!pte_present(*pvmw->pte))
27 pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
32 static bool check_pte(struct page_vma_mapped_walk *pvmw)
34 if (pvmw->flags & PVMW_MIGRATION) {
35 #ifdef CONFIG_MIGRATION
37 if (!is_swap_pte(*pvmw->pte))
39 entry = pte_to_swp_entry(*pvmw->pte);
41 if (!is_migration_entry(entry))
43 if (migration_entry_to_page(entry) - pvmw->page >=
44 hpage_nr_pages(pvmw->page)) {
47 if (migration_entry_to_page(entry) < pvmw->page)
53 if (is_swap_pte(*pvmw->pte)) {
56 entry = pte_to_swp_entry(*pvmw->pte);
57 if (is_device_private_entry(entry) &&
58 device_private_entry_to_page(entry) == pvmw->page)
62 if (!pte_present(*pvmw->pte))
65 /* THP can be referenced by any subpage */
66 if (pte_page(*pvmw->pte) - pvmw->page >=
67 hpage_nr_pages(pvmw->page)) {
70 if (pte_page(*pvmw->pte) < pvmw->page)
78 * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
80 * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
81 * must be set. pmd, pte and ptl must be NULL.
83 * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
84 * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
85 * adjusted if needed (for PTE-mapped THPs).
87 * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
88 * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
89 * a loop to find all PTEs that map the THP.
91 * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
92 * regardless of which page table level the page is mapped at. @pvmw->pmd is
95 * Retruns false if there are no more page table entries for the page in
96 * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
98 * If you need to stop the walk before page_vma_mapped_walk() returned false,
99 * use page_vma_mapped_walk_done(). It will do the housekeeping.
101 bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
103 struct mm_struct *mm = pvmw->vma->vm_mm;
104 struct page *page = pvmw->page;
110 /* The only possible pmd mapping has been handled on last iteration */
111 if (pvmw->pmd && !pvmw->pte)
112 return not_found(pvmw);
117 if (unlikely(PageHuge(pvmw->page))) {
118 /* when pud is not present, pte will be NULL */
119 pvmw->pte = huge_pte_offset(mm, pvmw->address,
120 PAGE_SIZE << compound_order(page));
124 pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
125 spin_lock(pvmw->ptl);
126 if (!check_pte(pvmw))
127 return not_found(pvmw);
131 pgd = pgd_offset(mm, pvmw->address);
132 if (!pgd_present(*pgd))
134 p4d = p4d_offset(pgd, pvmw->address);
135 if (!p4d_present(*p4d))
137 pud = pud_offset(p4d, pvmw->address);
138 if (!pud_present(*pud))
140 pvmw->pmd = pmd_offset(pud, pvmw->address);
142 * Make sure the pmd value isn't cached in a register by the
143 * compiler and used as a stale value after we've observed a
146 pmde = READ_ONCE(*pvmw->pmd);
147 if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
148 pvmw->ptl = pmd_lock(mm, pvmw->pmd);
149 if (likely(pmd_trans_huge(*pvmw->pmd))) {
150 if (pvmw->flags & PVMW_MIGRATION)
151 return not_found(pvmw);
152 if (pmd_page(*pvmw->pmd) != page)
153 return not_found(pvmw);
155 } else if (!pmd_present(*pvmw->pmd)) {
156 if (thp_migration_supported()) {
157 if (!(pvmw->flags & PVMW_MIGRATION))
158 return not_found(pvmw);
159 if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) {
160 swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd);
162 if (migration_entry_to_page(entry) != page)
163 return not_found(pvmw);
167 return not_found(pvmw);
169 /* THP pmd was split under us: handle on pte level */
170 spin_unlock(pvmw->ptl);
173 } else if (!pmd_present(pmde)) {
182 /* Seek to next pte only makes sense for THP */
183 if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
184 return not_found(pvmw);
186 pvmw->address += PAGE_SIZE;
187 if (pvmw->address >= pvmw->vma->vm_end ||
189 __vma_address(pvmw->page, pvmw->vma) +
190 hpage_nr_pages(pvmw->page) * PAGE_SIZE)
191 return not_found(pvmw);
192 /* Did we cross page table boundary? */
193 if (pvmw->address % PMD_SIZE == 0) {
194 pte_unmap(pvmw->pte);
196 spin_unlock(pvmw->ptl);
203 } while (pte_none(*pvmw->pte));
206 pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
207 spin_lock(pvmw->ptl);
213 * page_mapped_in_vma - check whether a page is really mapped in a VMA
214 * @page: the page to test
215 * @vma: the VMA to test
217 * Returns 1 if the page is mapped into the page tables of the VMA, 0
218 * if the page is not mapped into the page tables of this VMA. Only
219 * valid for normal file or anonymous VMAs.
221 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
223 struct page_vma_mapped_walk pvmw = {
228 unsigned long start, end;
230 start = __vma_address(page, vma);
231 end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
233 if (unlikely(end < vma->vm_start || start >= vma->vm_end))
235 pvmw.address = max(start, vma->vm_start);
236 if (!page_vma_mapped_walk(&pvmw))
238 page_vma_mapped_walk_done(&pvmw);