1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGETLB_H
3 #define _LINUX_HUGETLB_H
5 #include <linux/mm_types.h>
6 #include <linux/mmdebug.h>
8 #include <linux/hugetlb_inline.h>
9 #include <linux/cgroup.h>
10 #include <linux/list.h>
11 #include <linux/kref.h>
12 #include <asm/pgtable.h>
19 typedef struct { unsigned long pd; } hugepd_t;
20 #define is_hugepd(hugepd) (0)
21 #define __hugepd(x) ((hugepd_t) { (x) })
24 #ifdef CONFIG_HUGETLB_PAGE
26 #include <linux/mempolicy.h>
27 #include <linux/shm.h>
28 #include <asm/tlbflush.h>
30 struct hugepage_subpool {
33 long max_hpages; /* Maximum huge pages or -1 if no maximum. */
34 long used_hpages; /* Used count against maximum, includes */
35 /* both alloced and reserved pages. */
36 struct hstate *hstate;
37 long min_hpages; /* Minimum huge pages or -1 if no minimum. */
38 long rsv_hpages; /* Pages reserved against global pool to */
39 /* sasitfy minimum size. */
45 struct list_head regions;
46 long adds_in_progress;
47 struct list_head region_cache;
48 long region_cache_count;
50 extern struct resv_map *resv_map_alloc(void);
51 void resv_map_release(struct kref *ref);
53 extern spinlock_t hugetlb_lock;
54 extern int hugetlb_max_hstate __read_mostly;
55 #define for_each_hstate(h) \
56 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
58 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
60 void hugepage_put_subpool(struct hugepage_subpool *spool);
62 void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
63 int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
64 int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
65 int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
68 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
69 void __user *, size_t *, loff_t *);
72 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
73 long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
74 struct page **, struct vm_area_struct **,
75 unsigned long *, unsigned long *, long, unsigned int,
77 void unmap_hugepage_range(struct vm_area_struct *,
78 unsigned long, unsigned long, struct page *);
79 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
80 struct vm_area_struct *vma,
81 unsigned long start, unsigned long end,
82 struct page *ref_page);
83 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
84 unsigned long start, unsigned long end,
85 struct page *ref_page);
86 void hugetlb_report_meminfo(struct seq_file *);
87 int hugetlb_report_node_meminfo(int, char *);
88 void hugetlb_show_meminfo(void);
89 unsigned long hugetlb_total_pages(void);
90 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
91 unsigned long address, unsigned int flags);
92 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
93 struct vm_area_struct *dst_vma,
94 unsigned long dst_addr,
95 unsigned long src_addr,
97 int hugetlb_reserve_pages(struct inode *inode, long from, long to,
98 struct vm_area_struct *vma,
100 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
102 bool isolate_huge_page(struct page *page, struct list_head *list);
103 void putback_active_hugepage(struct page *page);
104 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
105 void free_huge_page(struct page *page);
106 void hugetlb_fix_reserve_counts(struct inode *inode);
107 extern struct mutex *hugetlb_fault_mutex_table;
108 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
110 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
112 extern int sysctl_hugetlb_shm_group;
113 extern struct list_head huge_boot_pages;
117 pte_t *huge_pte_alloc(struct mm_struct *mm,
118 unsigned long addr, unsigned long sz);
119 pte_t *huge_pte_offset(struct mm_struct *mm,
120 unsigned long addr, unsigned long sz);
121 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
122 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
123 unsigned long *start, unsigned long *end);
124 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
126 struct page *follow_huge_pd(struct vm_area_struct *vma,
127 unsigned long address, hugepd_t hpd,
128 int flags, int pdshift);
129 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
130 pmd_t *pmd, int flags);
131 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
132 pud_t *pud, int flags);
133 struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
134 pgd_t *pgd, int flags);
136 int pmd_huge(pmd_t pmd);
137 int pud_huge(pud_t pud);
138 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
139 unsigned long address, unsigned long end, pgprot_t newprot);
141 bool is_hugetlb_entry_migration(pte_t pte);
143 #else /* !CONFIG_HUGETLB_PAGE */
145 static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
149 static inline unsigned long hugetlb_total_pages(void)
154 static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
160 static inline void adjust_range_if_pmd_sharing_possible(
161 struct vm_area_struct *vma,
162 unsigned long *start, unsigned long *end)
166 static inline long follow_hugetlb_page(struct mm_struct *mm,
167 struct vm_area_struct *vma, struct page **pages,
168 struct vm_area_struct **vmas, unsigned long *position,
169 unsigned long *nr_pages, long i, unsigned int flags,
176 static inline struct page *follow_huge_addr(struct mm_struct *mm,
177 unsigned long address, int write)
179 return ERR_PTR(-EINVAL);
182 static inline int copy_hugetlb_page_range(struct mm_struct *dst,
183 struct mm_struct *src, struct vm_area_struct *vma)
189 static inline void hugetlb_report_meminfo(struct seq_file *m)
193 static inline int hugetlb_report_node_meminfo(int nid, char *buf)
198 static inline void hugetlb_show_meminfo(void)
202 static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
203 unsigned long address, hugepd_t hpd, int flags,
209 static inline struct page *follow_huge_pmd(struct mm_struct *mm,
210 unsigned long address, pmd_t *pmd, int flags)
215 static inline struct page *follow_huge_pud(struct mm_struct *mm,
216 unsigned long address, pud_t *pud, int flags)
221 static inline struct page *follow_huge_pgd(struct mm_struct *mm,
222 unsigned long address, pgd_t *pgd, int flags)
227 static inline int prepare_hugepage_range(struct file *file,
228 unsigned long addr, unsigned long len)
233 static inline int pmd_huge(pmd_t pmd)
238 static inline int pud_huge(pud_t pud)
243 static inline int is_hugepage_only_range(struct mm_struct *mm,
244 unsigned long addr, unsigned long len)
249 static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
250 unsigned long addr, unsigned long end,
251 unsigned long floor, unsigned long ceiling)
256 static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
258 struct vm_area_struct *dst_vma,
259 unsigned long dst_addr,
260 unsigned long src_addr,
267 static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
273 static inline bool isolate_huge_page(struct page *page, struct list_head *list)
278 static inline void putback_active_hugepage(struct page *page)
282 static inline void move_hugetlb_state(struct page *oldpage,
283 struct page *newpage, int reason)
287 static inline unsigned long hugetlb_change_protection(
288 struct vm_area_struct *vma, unsigned long address,
289 unsigned long end, pgprot_t newprot)
294 static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
295 struct vm_area_struct *vma, unsigned long start,
296 unsigned long end, struct page *ref_page)
301 static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
302 struct vm_area_struct *vma, unsigned long start,
303 unsigned long end, struct page *ref_page)
308 static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
309 struct vm_area_struct *vma, unsigned long address,
316 #endif /* !CONFIG_HUGETLB_PAGE */
318 * hugepages at page global directory. If arch support
319 * hugepages at pgd level, they need to define this.
322 #define pgd_huge(x) 0
325 #define p4d_huge(x) 0
329 static inline int pgd_write(pgd_t pgd)
336 #define HUGETLB_ANON_FILE "anon_hugepage"
340 * The file will be used as an shm file so shmfs accounting rules
343 HUGETLB_SHMFS_INODE = 1,
345 * The file is being created on the internal vfs mount and shmfs
346 * accounting rules do not apply
348 HUGETLB_ANONHUGE_INODE = 2,
351 #ifdef CONFIG_HUGETLBFS
352 struct hugetlbfs_sb_info {
353 long max_inodes; /* inodes allowed */
354 long free_inodes; /* inodes free */
355 spinlock_t stat_lock;
356 struct hstate *hstate;
357 struct hugepage_subpool *spool;
363 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
365 return sb->s_fs_info;
368 struct hugetlbfs_inode_info {
369 struct shared_policy policy;
370 struct inode vfs_inode;
374 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
376 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
379 extern const struct file_operations hugetlbfs_file_operations;
380 extern const struct vm_operations_struct hugetlb_vm_ops;
381 struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
382 struct user_struct **user, int creat_flags,
385 static inline bool is_file_hugepages(struct file *file)
387 if (file->f_op == &hugetlbfs_file_operations)
390 return is_file_shm_hugepages(file);
394 #else /* !CONFIG_HUGETLBFS */
396 #define is_file_hugepages(file) false
397 static inline struct file *
398 hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
399 struct user_struct **user, int creat_flags,
402 return ERR_PTR(-ENOSYS);
405 #endif /* !CONFIG_HUGETLBFS */
407 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
408 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
409 unsigned long len, unsigned long pgoff,
410 unsigned long flags);
411 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
413 #ifdef CONFIG_HUGETLB_PAGE
415 #define HSTATE_NAME_LEN 32
416 /* Defines one hugetlb page size */
418 int next_nid_to_alloc;
419 int next_nid_to_free;
422 unsigned long max_huge_pages;
423 unsigned long nr_huge_pages;
424 unsigned long free_huge_pages;
425 unsigned long resv_huge_pages;
426 unsigned long surplus_huge_pages;
427 unsigned long nr_overcommit_huge_pages;
428 struct list_head hugepage_activelist;
429 struct list_head hugepage_freelists[MAX_NUMNODES];
430 unsigned int nr_huge_pages_node[MAX_NUMNODES];
431 unsigned int free_huge_pages_node[MAX_NUMNODES];
432 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
433 #ifdef CONFIG_CGROUP_HUGETLB
434 /* cgroup control files */
435 struct cftype cgroup_files[5];
437 char name[HSTATE_NAME_LEN];
440 struct huge_bootmem_page {
441 struct list_head list;
442 struct hstate *hstate;
445 struct page *alloc_huge_page(struct vm_area_struct *vma,
446 unsigned long addr, int avoid_reserve);
447 struct page *alloc_huge_page_node(struct hstate *h, int nid);
448 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
450 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
451 unsigned long address);
452 struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
453 int nid, nodemask_t *nmask);
454 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
458 int __init __alloc_bootmem_huge_page(struct hstate *h);
459 int __init alloc_bootmem_huge_page(struct hstate *h);
461 void __init hugetlb_bad_size(void);
462 void __init hugetlb_add_hstate(unsigned order);
463 struct hstate *size_to_hstate(unsigned long size);
465 #ifndef HUGE_MAX_HSTATE
466 #define HUGE_MAX_HSTATE 1
469 extern struct hstate hstates[HUGE_MAX_HSTATE];
470 extern unsigned int default_hstate_idx;
472 #define default_hstate (hstates[default_hstate_idx])
474 static inline struct hstate *hstate_inode(struct inode *i)
476 return HUGETLBFS_SB(i->i_sb)->hstate;
479 static inline struct hstate *hstate_file(struct file *f)
481 return hstate_inode(file_inode(f));
484 static inline struct hstate *hstate_sizelog(int page_size_log)
487 return &default_hstate;
489 return size_to_hstate(1UL << page_size_log);
492 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
494 return hstate_file(vma->vm_file);
497 static inline unsigned long huge_page_size(struct hstate *h)
499 return (unsigned long)PAGE_SIZE << h->order;
502 extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
504 extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
506 static inline unsigned long huge_page_mask(struct hstate *h)
511 static inline unsigned int huge_page_order(struct hstate *h)
516 static inline unsigned huge_page_shift(struct hstate *h)
518 return h->order + PAGE_SHIFT;
521 static inline bool hstate_is_gigantic(struct hstate *h)
523 return huge_page_order(h) >= MAX_ORDER;
526 static inline unsigned int pages_per_huge_page(struct hstate *h)
528 return 1 << h->order;
531 static inline unsigned int blocks_per_huge_page(struct hstate *h)
533 return huge_page_size(h) / 512;
536 #include <asm/hugetlb.h>
538 #ifndef arch_make_huge_pte
539 static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
540 struct page *page, int writable)
546 static inline struct hstate *page_hstate(struct page *page)
548 VM_BUG_ON_PAGE(!PageHuge(page), page);
549 return size_to_hstate(page_size(page));
552 static inline unsigned hstate_index_to_shift(unsigned index)
554 return hstates[index].order + PAGE_SHIFT;
557 static inline int hstate_index(struct hstate *h)
562 pgoff_t __basepage_index(struct page *page);
564 /* Return page->index in PAGE_SIZE units */
565 static inline pgoff_t basepage_index(struct page *page)
567 if (!PageCompound(page))
570 return __basepage_index(page);
573 extern int dissolve_free_huge_page(struct page *page);
574 extern int dissolve_free_huge_pages(unsigned long start_pfn,
575 unsigned long end_pfn);
577 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
578 #ifndef arch_hugetlb_migration_supported
579 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
581 if ((huge_page_shift(h) == PMD_SHIFT) ||
582 (huge_page_shift(h) == PUD_SHIFT) ||
583 (huge_page_shift(h) == PGDIR_SHIFT))
590 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
596 static inline bool hugepage_migration_supported(struct hstate *h)
598 return arch_hugetlb_migration_supported(h);
602 * Movability check is different as compared to migration check.
603 * It determines whether or not a huge page should be placed on
604 * movable zone or not. Movability of any huge page should be
605 * required only if huge page size is supported for migration.
606 * There wont be any reason for the huge page to be movable if
607 * it is not migratable to start with. Also the size of the huge
608 * page should be large enough to be placed under a movable zone
609 * and still feasible enough to be migratable. Just the presence
610 * in movable zone does not make the migration feasible.
612 * So even though large huge page sizes like the gigantic ones
613 * are migratable they should not be movable because its not
614 * feasible to migrate them from movable zone.
616 static inline bool hugepage_movable_supported(struct hstate *h)
618 if (!hugepage_migration_supported(h))
621 if (hstate_is_gigantic(h))
626 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
627 struct mm_struct *mm, pte_t *pte)
629 if (huge_page_size(h) == PMD_SIZE)
630 return pmd_lockptr(mm, (pmd_t *) pte);
631 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
632 return &mm->page_table_lock;
635 #ifndef hugepages_supported
637 * Some platform decide whether they support huge pages at boot
638 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
639 * when there is no such support
641 #define hugepages_supported() (HPAGE_SHIFT != 0)
644 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
646 static inline void hugetlb_count_add(long l, struct mm_struct *mm)
648 atomic_long_add(l, &mm->hugetlb_usage);
651 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
653 atomic_long_sub(l, &mm->hugetlb_usage);
656 #ifndef set_huge_swap_pte_at
657 static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
658 pte_t *ptep, pte_t pte, unsigned long sz)
660 set_huge_pte_at(mm, addr, ptep, pte);
664 #ifndef huge_ptep_modify_prot_start
665 #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
666 static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
667 unsigned long addr, pte_t *ptep)
669 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
673 #ifndef huge_ptep_modify_prot_commit
674 #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
675 static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
676 unsigned long addr, pte_t *ptep,
677 pte_t old_pte, pte_t pte)
679 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
683 #else /* CONFIG_HUGETLB_PAGE */
686 static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
693 static inline struct page *alloc_huge_page_node(struct hstate *h, int nid)
698 static inline struct page *
699 alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask)
704 static inline struct page *alloc_huge_page_vma(struct hstate *h,
705 struct vm_area_struct *vma,
706 unsigned long address)
711 static inline int __alloc_bootmem_huge_page(struct hstate *h)
716 static inline struct hstate *hstate_file(struct file *f)
721 static inline struct hstate *hstate_sizelog(int page_size_log)
726 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
731 static inline struct hstate *hstate_inode(struct inode *i)
736 static inline struct hstate *page_hstate(struct page *page)
741 static inline unsigned long huge_page_size(struct hstate *h)
746 static inline unsigned long huge_page_mask(struct hstate *h)
751 static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
756 static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
761 static inline unsigned int huge_page_order(struct hstate *h)
766 static inline unsigned int huge_page_shift(struct hstate *h)
771 static inline bool hstate_is_gigantic(struct hstate *h)
776 static inline unsigned int pages_per_huge_page(struct hstate *h)
781 static inline unsigned hstate_index_to_shift(unsigned index)
786 static inline int hstate_index(struct hstate *h)
791 static inline pgoff_t basepage_index(struct page *page)
796 static inline int dissolve_free_huge_page(struct page *page)
801 static inline int dissolve_free_huge_pages(unsigned long start_pfn,
802 unsigned long end_pfn)
807 static inline bool hugepage_migration_supported(struct hstate *h)
812 static inline bool hugepage_movable_supported(struct hstate *h)
817 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
818 struct mm_struct *mm, pte_t *pte)
820 return &mm->page_table_lock;
823 static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
827 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
831 static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
832 pte_t *ptep, pte_t pte, unsigned long sz)
835 #endif /* CONFIG_HUGETLB_PAGE */
837 static inline spinlock_t *huge_pte_lock(struct hstate *h,
838 struct mm_struct *mm, pte_t *pte)
842 ptl = huge_pte_lockptr(h, mm, pte);
847 #endif /* _LINUX_HUGETLB_H */