2 * address space "slices" (meta-segments) support
4 * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation.
6 * Based on hugetlb implementation
8 * Copyright (C) 2003 David Gibson, IBM Corporation.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 #include <linux/kernel.h>
29 #include <linux/pagemap.h>
30 #include <linux/err.h>
31 #include <linux/spinlock.h>
32 #include <linux/export.h>
33 #include <linux/hugetlb.h>
36 #include <asm/copro.h>
37 #include <asm/hugetlb.h>
38 #include <asm/mmu_context.h>
40 static DEFINE_SPINLOCK(slice_convert_lock);
45 static void slice_print_mask(const char *label, const struct slice_mask *mask)
49 pr_devel("%s low_slice: %*pbl\n", label,
50 (int)SLICE_NUM_LOW, &mask->low_slices);
51 pr_devel("%s high_slice: %*pbl\n", label,
52 (int)SLICE_NUM_HIGH, mask->high_slices);
55 #define slice_dbg(fmt...) do { if (_slice_debug) pr_devel(fmt); } while (0)
59 static void slice_print_mask(const char *label, const struct slice_mask *mask) {}
60 #define slice_dbg(fmt...)
64 static void slice_range_to_mask(unsigned long start, unsigned long len,
65 struct slice_mask *ret)
67 unsigned long end = start + len - 1;
71 bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
73 if (start < SLICE_LOW_TOP) {
74 unsigned long mend = min(end,
75 (unsigned long)(SLICE_LOW_TOP - 1));
77 ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
78 - (1u << GET_LOW_SLICE_INDEX(start));
81 if ((start + len) > SLICE_LOW_TOP) {
82 unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
83 unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
84 unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
86 bitmap_set(ret->high_slices, start_index, count);
90 static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
93 struct vm_area_struct *vma;
95 if ((mm->context.slb_addr_limit - len) < addr)
97 vma = find_vma(mm, addr);
98 return (!vma || (addr + len) <= vm_start_gap(vma));
101 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
103 return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
104 1ul << SLICE_LOW_SHIFT);
107 static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
109 unsigned long start = slice << SLICE_HIGH_SHIFT;
110 unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
113 /* Hack, so that each addresses is controlled by exactly one
114 * of the high or low area bitmaps, the first high area starts
117 start = SLICE_LOW_TOP;
120 return !slice_area_is_free(mm, start, end - start);
123 static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret,
124 unsigned long high_limit)
130 bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
132 for (i = 0; i < SLICE_NUM_LOW; i++)
133 if (!slice_low_has_vma(mm, i))
134 ret->low_slices |= 1u << i;
136 if (high_limit <= SLICE_LOW_TOP)
139 for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++)
140 if (!slice_high_has_vma(mm, i))
141 __set_bit(i, ret->high_slices);
144 #ifdef CONFIG_PPC_BOOK3S_64
145 static struct slice_mask *slice_mask_for_size(struct mm_struct *mm, int psize)
147 #ifdef CONFIG_PPC_64K_PAGES
148 if (psize == MMU_PAGE_64K)
149 return &mm->context.mask_64k;
151 if (psize == MMU_PAGE_4K)
152 return &mm->context.mask_4k;
153 #ifdef CONFIG_HUGETLB_PAGE
154 if (psize == MMU_PAGE_16M)
155 return &mm->context.mask_16m;
156 if (psize == MMU_PAGE_16G)
157 return &mm->context.mask_16g;
161 #elif defined(CONFIG_PPC_8xx)
162 static struct slice_mask *slice_mask_for_size(struct mm_struct *mm, int psize)
164 if (psize == mmu_virtual_psize)
165 return &mm->context.mask_base_psize;
166 #ifdef CONFIG_HUGETLB_PAGE
167 if (psize == MMU_PAGE_512K)
168 return &mm->context.mask_512k;
169 if (psize == MMU_PAGE_8M)
170 return &mm->context.mask_8m;
175 #error "Must define the slice masks for page sizes supported by the platform"
178 static bool slice_check_range_fits(struct mm_struct *mm,
179 const struct slice_mask *available,
180 unsigned long start, unsigned long len)
182 unsigned long end = start + len - 1;
185 if (start < SLICE_LOW_TOP) {
186 unsigned long mend = min(end,
187 (unsigned long)(SLICE_LOW_TOP - 1));
189 low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
190 - (1u << GET_LOW_SLICE_INDEX(start));
192 if ((low_slices & available->low_slices) != low_slices)
195 if (SLICE_NUM_HIGH && ((start + len) > SLICE_LOW_TOP)) {
196 unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
197 unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
198 unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
201 for (i = start_index; i < start_index + count; i++) {
202 if (!test_bit(i, available->high_slices))
210 static void slice_flush_segments(void *parm)
213 struct mm_struct *mm = parm;
216 if (mm != current->active_mm)
219 copy_mm_to_paca(current->active_mm);
221 local_irq_save(flags);
222 slb_flush_and_rebolt();
223 local_irq_restore(flags);
227 static void slice_convert(struct mm_struct *mm,
228 const struct slice_mask *mask, int psize)
230 int index, mask_index;
231 /* Write the new slice psize bits */
232 unsigned char *hpsizes, *lpsizes;
233 struct slice_mask *psize_mask, *old_mask;
234 unsigned long i, flags;
237 slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
238 slice_print_mask(" mask", mask);
240 psize_mask = slice_mask_for_size(mm, psize);
242 /* We need to use a spinlock here to protect against
243 * concurrent 64k -> 4k demotion ...
245 spin_lock_irqsave(&slice_convert_lock, flags);
247 lpsizes = mm->context.low_slices_psize;
248 for (i = 0; i < SLICE_NUM_LOW; i++) {
249 if (!(mask->low_slices & (1u << i)))
252 mask_index = i & 0x1;
255 /* Update the slice_mask */
256 old_psize = (lpsizes[index] >> (mask_index * 4)) & 0xf;
257 old_mask = slice_mask_for_size(mm, old_psize);
258 old_mask->low_slices &= ~(1u << i);
259 psize_mask->low_slices |= 1u << i;
261 /* Update the sizes array */
262 lpsizes[index] = (lpsizes[index] & ~(0xf << (mask_index * 4))) |
263 (((unsigned long)psize) << (mask_index * 4));
266 hpsizes = mm->context.high_slices_psize;
267 for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++) {
268 if (!test_bit(i, mask->high_slices))
271 mask_index = i & 0x1;
274 /* Update the slice_mask */
275 old_psize = (hpsizes[index] >> (mask_index * 4)) & 0xf;
276 old_mask = slice_mask_for_size(mm, old_psize);
277 __clear_bit(i, old_mask->high_slices);
278 __set_bit(i, psize_mask->high_slices);
280 /* Update the sizes array */
281 hpsizes[index] = (hpsizes[index] & ~(0xf << (mask_index * 4))) |
282 (((unsigned long)psize) << (mask_index * 4));
285 slice_dbg(" lsps=%lx, hsps=%lx\n",
286 (unsigned long)mm->context.low_slices_psize,
287 (unsigned long)mm->context.high_slices_psize);
289 spin_unlock_irqrestore(&slice_convert_lock, flags);
291 copro_flush_all_slbs(mm);
295 * Compute which slice addr is part of;
296 * set *boundary_addr to the start or end boundary of that slice
297 * (depending on 'end' parameter);
298 * return boolean indicating if the slice is marked as available in the
299 * 'available' slice_mark.
301 static bool slice_scan_available(unsigned long addr,
302 const struct slice_mask *available,
303 int end, unsigned long *boundary_addr)
306 if (addr < SLICE_LOW_TOP) {
307 slice = GET_LOW_SLICE_INDEX(addr);
308 *boundary_addr = (slice + end) << SLICE_LOW_SHIFT;
309 return !!(available->low_slices & (1u << slice));
311 slice = GET_HIGH_SLICE_INDEX(addr);
312 *boundary_addr = (slice + end) ?
313 ((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
314 return !!test_bit(slice, available->high_slices);
318 static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
320 const struct slice_mask *available,
321 int psize, unsigned long high_limit)
323 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
324 unsigned long addr, found, next_end;
325 struct vm_unmapped_area_info info;
329 info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
330 info.align_offset = 0;
332 addr = TASK_UNMAPPED_BASE;
334 * Check till the allow max value for this mmap request
336 while (addr < high_limit) {
337 info.low_limit = addr;
338 if (!slice_scan_available(addr, available, 1, &addr))
343 * At this point [info.low_limit; addr) covers
344 * available slices only and ends at a slice boundary.
345 * Check if we need to reduce the range, or if we can
346 * extend it to cover the next available slice.
348 if (addr >= high_limit)
350 else if (slice_scan_available(addr, available, 1, &next_end)) {
354 info.high_limit = addr;
356 found = vm_unmapped_area(&info);
357 if (!(found & ~PAGE_MASK))
364 static unsigned long slice_find_area_topdown(struct mm_struct *mm,
366 const struct slice_mask *available,
367 int psize, unsigned long high_limit)
369 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
370 unsigned long addr, found, prev;
371 struct vm_unmapped_area_info info;
373 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
375 info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
376 info.align_offset = 0;
378 addr = mm->mmap_base;
380 * If we are trying to allocate above DEFAULT_MAP_WINDOW
381 * Add the different to the mmap_base.
382 * Only for that request for which high_limit is above
383 * DEFAULT_MAP_WINDOW we should apply this.
385 if (high_limit > DEFAULT_MAP_WINDOW)
386 addr += mm->context.slb_addr_limit - DEFAULT_MAP_WINDOW;
388 while (addr > PAGE_SIZE) {
389 info.high_limit = addr;
390 if (!slice_scan_available(addr - 1, available, 0, &addr))
395 * At this point [addr; info.high_limit) covers
396 * available slices only and starts at a slice boundary.
397 * Check if we need to reduce the range, or if we can
398 * extend it to cover the previous available slice.
400 if (addr < PAGE_SIZE)
402 else if (slice_scan_available(addr - 1, available, 0, &prev)) {
406 info.low_limit = addr;
408 found = vm_unmapped_area(&info);
409 if (!(found & ~PAGE_MASK))
414 * A failed mmap() very likely causes application failure,
415 * so fall back to the bottom-up function here. This scenario
416 * can happen with large stack limits and large mmap()
419 return slice_find_area_bottomup(mm, len, available, psize, high_limit);
423 static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
424 const struct slice_mask *mask, int psize,
425 int topdown, unsigned long high_limit)
428 return slice_find_area_topdown(mm, len, mask, psize, high_limit);
430 return slice_find_area_bottomup(mm, len, mask, psize, high_limit);
433 static inline void slice_copy_mask(struct slice_mask *dst,
434 const struct slice_mask *src)
436 dst->low_slices = src->low_slices;
439 bitmap_copy(dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
442 static inline void slice_or_mask(struct slice_mask *dst,
443 const struct slice_mask *src1,
444 const struct slice_mask *src2)
446 dst->low_slices = src1->low_slices | src2->low_slices;
449 bitmap_or(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH);
452 static inline void slice_andnot_mask(struct slice_mask *dst,
453 const struct slice_mask *src1,
454 const struct slice_mask *src2)
456 dst->low_slices = src1->low_slices & ~src2->low_slices;
459 bitmap_andnot(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH);
462 #ifdef CONFIG_PPC_64K_PAGES
463 #define MMU_PAGE_BASE MMU_PAGE_64K
465 #define MMU_PAGE_BASE MMU_PAGE_4K
468 unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
469 unsigned long flags, unsigned int psize,
472 struct slice_mask good_mask;
473 struct slice_mask potential_mask;
474 const struct slice_mask *maskp;
475 const struct slice_mask *compat_maskp = NULL;
476 int fixed = (flags & MAP_FIXED);
477 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
478 unsigned long page_size = 1UL << pshift;
479 struct mm_struct *mm = current->mm;
480 unsigned long newaddr;
481 unsigned long high_limit;
483 high_limit = DEFAULT_MAP_WINDOW;
484 if (addr >= high_limit || (fixed && (addr + len > high_limit)))
485 high_limit = TASK_SIZE;
487 if (len > high_limit)
489 if (len & (page_size - 1))
492 if (addr & (page_size - 1))
494 if (addr > high_limit - len)
498 if (high_limit > mm->context.slb_addr_limit) {
500 * Increasing the slb_addr_limit does not require
501 * slice mask cache to be recalculated because it should
502 * be already initialised beyond the old address limit.
504 mm->context.slb_addr_limit = high_limit;
506 on_each_cpu(slice_flush_segments, mm, 1);
510 BUG_ON(mm->task_size == 0);
511 BUG_ON(mm->context.slb_addr_limit == 0);
512 VM_BUG_ON(radix_enabled());
514 slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
515 slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
516 addr, len, flags, topdown);
518 /* If hint, make sure it matches our alignment restrictions */
519 if (!fixed && addr) {
520 addr = _ALIGN_UP(addr, page_size);
521 slice_dbg(" aligned addr=%lx\n", addr);
522 /* Ignore hint if it's too large or overlaps a VMA */
523 if (addr > high_limit - len ||
524 !slice_area_is_free(mm, addr, len))
528 /* First make up a "good" mask of slices that have the right size
531 maskp = slice_mask_for_size(mm, psize);
534 * Here "good" means slices that are already the right page size,
535 * "compat" means slices that have a compatible page size (i.e.
536 * 4k in a 64k pagesize kernel), and "free" means slices without
540 * check if fits in good | compat => OK
541 * check if fits in good | compat | free => convert free
544 * check if hint fits in good => OK
545 * check if hint fits in good | free => convert free
547 * search in good, found => OK
548 * search in good | free, found => convert free
549 * search in good | compat | free, found => convert free.
553 * If we support combo pages, we can allow 64k pages in 4k slices
554 * The mask copies could be avoided in most cases here if we had
555 * a pointer to good mask for the next code to use.
557 if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
558 compat_maskp = slice_mask_for_size(mm, MMU_PAGE_4K);
560 slice_or_mask(&good_mask, maskp, compat_maskp);
562 slice_copy_mask(&good_mask, maskp);
564 slice_copy_mask(&good_mask, maskp);
567 slice_print_mask(" good_mask", &good_mask);
569 slice_print_mask(" compat_mask", compat_maskp);
571 /* First check hint if it's valid or if we have MAP_FIXED */
572 if (addr != 0 || fixed) {
573 /* Check if we fit in the good mask. If we do, we just return,
576 if (slice_check_range_fits(mm, &good_mask, addr, len)) {
577 slice_dbg(" fits good !\n");
582 /* Now let's see if we can find something in the existing
583 * slices for that size
585 newaddr = slice_find_area(mm, len, &good_mask,
586 psize, topdown, high_limit);
587 if (newaddr != -ENOMEM) {
588 /* Found within the good mask, we don't have to setup,
589 * we thus return directly
591 slice_dbg(" found area at 0x%lx\n", newaddr);
596 * We don't fit in the good mask, check what other slices are
597 * empty and thus can be converted
599 slice_mask_for_free(mm, &potential_mask, high_limit);
600 slice_or_mask(&potential_mask, &potential_mask, &good_mask);
601 slice_print_mask(" potential", &potential_mask);
603 if (addr != 0 || fixed) {
604 if (slice_check_range_fits(mm, &potential_mask, addr, len)) {
605 slice_dbg(" fits potential !\n");
611 /* If we have MAP_FIXED and failed the above steps, then error out */
615 slice_dbg(" search...\n");
617 /* If we had a hint that didn't work out, see if we can fit
618 * anywhere in the good area.
621 newaddr = slice_find_area(mm, len, &good_mask,
622 psize, topdown, high_limit);
623 if (newaddr != -ENOMEM) {
624 slice_dbg(" found area at 0x%lx\n", newaddr);
629 /* Now let's see if we can find something in the existing slices
630 * for that size plus free slices
632 newaddr = slice_find_area(mm, len, &potential_mask,
633 psize, topdown, high_limit);
635 #ifdef CONFIG_PPC_64K_PAGES
636 if (newaddr == -ENOMEM && psize == MMU_PAGE_64K) {
637 /* retry the search with 4k-page slices included */
638 slice_or_mask(&potential_mask, &potential_mask, compat_maskp);
639 newaddr = slice_find_area(mm, len, &potential_mask,
640 psize, topdown, high_limit);
644 if (newaddr == -ENOMEM)
647 slice_range_to_mask(newaddr, len, &potential_mask);
648 slice_dbg(" found potential area at 0x%lx\n", newaddr);
649 slice_print_mask(" mask", &potential_mask);
653 * Try to allocate the context before we do slice convert
654 * so that we handle the context allocation failure gracefully.
656 if (need_extra_context(mm, newaddr)) {
657 if (alloc_extended_context(mm, newaddr) < 0)
661 slice_andnot_mask(&potential_mask, &potential_mask, &good_mask);
662 if (compat_maskp && !fixed)
663 slice_andnot_mask(&potential_mask, &potential_mask, compat_maskp);
664 if (potential_mask.low_slices ||
666 !bitmap_empty(potential_mask.high_slices, SLICE_NUM_HIGH))) {
667 slice_convert(mm, &potential_mask, psize);
668 if (psize > MMU_PAGE_BASE)
669 on_each_cpu(slice_flush_segments, mm, 1);
674 if (need_extra_context(mm, newaddr)) {
675 if (alloc_extended_context(mm, newaddr) < 0)
680 EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
682 unsigned long arch_get_unmapped_area(struct file *filp,
688 return slice_get_unmapped_area(addr, len, flags,
689 current->mm->context.user_psize, 0);
692 unsigned long arch_get_unmapped_area_topdown(struct file *filp,
693 const unsigned long addr0,
694 const unsigned long len,
695 const unsigned long pgoff,
696 const unsigned long flags)
698 return slice_get_unmapped_area(addr0, len, flags,
699 current->mm->context.user_psize, 1);
702 unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
704 unsigned char *psizes;
705 int index, mask_index;
707 VM_BUG_ON(radix_enabled());
709 if (addr < SLICE_LOW_TOP) {
710 psizes = mm->context.low_slices_psize;
711 index = GET_LOW_SLICE_INDEX(addr);
713 psizes = mm->context.high_slices_psize;
714 index = GET_HIGH_SLICE_INDEX(addr);
716 mask_index = index & 0x1;
717 return (psizes[index >> 1] >> (mask_index * 4)) & 0xf;
719 EXPORT_SYMBOL_GPL(get_slice_psize);
721 void slice_init_new_context_exec(struct mm_struct *mm)
723 unsigned char *hpsizes, *lpsizes;
724 struct slice_mask *mask;
725 unsigned int psize = mmu_virtual_psize;
727 slice_dbg("slice_init_new_context_exec(mm=%p)\n", mm);
730 * In the case of exec, use the default limit. In the
731 * case of fork it is just inherited from the mm being
735 mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW_USER64;
737 mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW;
740 mm->context.user_psize = psize;
743 * Set all slice psizes to the default.
745 lpsizes = mm->context.low_slices_psize;
746 memset(lpsizes, (psize << 4) | psize, SLICE_NUM_LOW >> 1);
748 hpsizes = mm->context.high_slices_psize;
749 memset(hpsizes, (psize << 4) | psize, SLICE_NUM_HIGH >> 1);
752 * Slice mask cache starts zeroed, fill the default size cache.
754 mask = slice_mask_for_size(mm, psize);
755 mask->low_slices = ~0UL;
757 bitmap_fill(mask->high_slices, SLICE_NUM_HIGH);
760 void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
761 unsigned long len, unsigned int psize)
763 struct slice_mask mask;
765 VM_BUG_ON(radix_enabled());
767 slice_range_to_mask(start, len, &mask);
768 slice_convert(mm, &mask, psize);
771 #ifdef CONFIG_HUGETLB_PAGE
773 * is_hugepage_only_range() is used by generic code to verify whether
774 * a normal mmap mapping (non hugetlbfs) is valid on a given area.
776 * until the generic code provides a more generic hook and/or starts
777 * calling arch get_unmapped_area for MAP_FIXED (which our implementation
778 * here knows how to deal with), we hijack it to keep standard mappings
781 * because of that generic code limitation, MAP_FIXED mapping cannot
782 * "convert" back a slice with no VMAs to the standard page size, only
783 * get_unmapped_area() can. It would be possible to fix it here but I
784 * prefer working on fixing the generic code instead.
786 * WARNING: This will not work if hugetlbfs isn't enabled since the
787 * generic code will redefine that function as 0 in that. This is ok
788 * for now as we only use slices with hugetlbfs enabled. This should
789 * be fixed as the generic code gets fixed.
791 int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
794 const struct slice_mask *maskp;
795 unsigned int psize = mm->context.user_psize;
797 VM_BUG_ON(radix_enabled());
799 maskp = slice_mask_for_size(mm, psize);
800 #ifdef CONFIG_PPC_64K_PAGES
801 /* We need to account for 4k slices too */
802 if (psize == MMU_PAGE_64K) {
803 const struct slice_mask *compat_maskp;
804 struct slice_mask available;
806 compat_maskp = slice_mask_for_size(mm, MMU_PAGE_4K);
807 slice_or_mask(&available, maskp, compat_maskp);
808 return !slice_check_range_fits(mm, &available, addr, len);
812 return !slice_check_range_fits(mm, maskp, addr, len);