]> asedeno.scripts.mit.edu Git - linux.git/blob - arch/sparc/include/asm/hugetlb.h
hugetlb: introduce generic version of huge_ptep_get_and_clear()
[linux.git] / arch / sparc / include / asm / hugetlb.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_SPARC64_HUGETLB_H
3 #define _ASM_SPARC64_HUGETLB_H
4
5 #include <asm/page.h>
6
7 #ifdef CONFIG_HUGETLB_PAGE
8 struct pud_huge_patch_entry {
9         unsigned int addr;
10         unsigned int insn;
11 };
12 extern struct pud_huge_patch_entry __pud_huge_patch, __pud_huge_patch_end;
13 #endif
14
15 #define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
16 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
17                      pte_t *ptep, pte_t pte);
18
19 #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
20 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
21                               pte_t *ptep);
22
23 static inline int is_hugepage_only_range(struct mm_struct *mm,
24                                          unsigned long addr,
25                                          unsigned long len) {
26         return 0;
27 }
28
29 /*
30  * If the arch doesn't supply something else, assume that hugepage
31  * size aligned regions are ok without further preparation.
32  */
33 static inline int prepare_hugepage_range(struct file *file,
34                         unsigned long addr, unsigned long len)
35 {
36         struct hstate *h = hstate_file(file);
37
38         if (len & ~huge_page_mask(h))
39                 return -EINVAL;
40         if (addr & ~huge_page_mask(h))
41                 return -EINVAL;
42         return 0;
43 }
44
45 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
46                                          unsigned long addr, pte_t *ptep)
47 {
48 }
49
50 static inline int huge_pte_none(pte_t pte)
51 {
52         return pte_none(pte);
53 }
54
55 static inline pte_t huge_pte_wrprotect(pte_t pte)
56 {
57         return pte_wrprotect(pte);
58 }
59
60 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
61                                            unsigned long addr, pte_t *ptep)
62 {
63         pte_t old_pte = *ptep;
64         set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
65 }
66
67 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
68                                              unsigned long addr, pte_t *ptep,
69                                              pte_t pte, int dirty)
70 {
71         int changed = !pte_same(*ptep, pte);
72         if (changed) {
73                 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
74                 flush_tlb_page(vma, addr);
75         }
76         return changed;
77 }
78
79 static inline pte_t huge_ptep_get(pte_t *ptep)
80 {
81         return *ptep;
82 }
83
84 static inline void arch_clear_hugepage_flags(struct page *page)
85 {
86 }
87
88 #define __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE
89 void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
90                             unsigned long end, unsigned long floor,
91                             unsigned long ceiling);
92
93 #include <asm-generic/hugetlb.h>
94
95 #endif /* _ASM_SPARC64_HUGETLB_H */