2 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/sched.h>
11 #include <asm/pgalloc.h>
15 #include <trace/events/thp.h>
17 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
19 * This is called when relaxing access to a hugepage. It's also called in the page
20 * fault path when we don't hit any of the major fault cases, ie, a minor
21 * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
22 * handled those two for us, we additionally deal with missing execute
23 * permission here on some processors
25 int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
26 pmd_t *pmdp, pmd_t entry, int dirty)
29 #ifdef CONFIG_DEBUG_VM
30 WARN_ON(!pmd_trans_huge(*pmdp));
31 assert_spin_locked(&vma->vm_mm->page_table_lock);
33 changed = !pmd_same(*(pmdp), entry);
35 __ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry));
36 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
41 int pmdp_test_and_clear_young(struct vm_area_struct *vma,
42 unsigned long address, pmd_t *pmdp)
44 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
47 * set a new huge pmd. We should not be called for updating
48 * an existing pmd entry. That should go via pmd_hugepage_update.
50 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
51 pmd_t *pmdp, pmd_t pmd)
53 #ifdef CONFIG_DEBUG_VM
54 WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
55 assert_spin_locked(&mm->page_table_lock);
56 WARN_ON(!pmd_trans_huge(pmd));
58 trace_hugepage_set_pmd(addr, pmd_val(pmd));
59 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
62 * We use this to invalidate a pmdp entry before switching from a
63 * hugepte to regular pmd entry.
65 void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
68 pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
69 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
71 * This ensures that generic code that rely on IRQ disabling
72 * to prevent a parallel THP split work as expected.
77 static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
79 return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
82 pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
86 pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
87 return pmd_set_protbits(__pmd(pmdv), pgprot);
90 pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
92 return pfn_pmd(page_to_pfn(page), pgprot);
95 pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
100 pmdv &= _HPAGE_CHG_MASK;
101 return pmd_set_protbits(__pmd(pmdv), newprot);
105 * This is called at the end of handling a user page fault, when the
106 * fault has been handled by updating a HUGE PMD entry in the linux page tables.
107 * We use it to preload an HPTE into the hash table corresponding to
108 * the updated linux HUGE PMD entry.
110 void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
115 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */