1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
7 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
8 * Copyright (C) 1996 Paul Mackerras
9 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
15 #include <linux/export.h>
16 #include <linux/sched.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
20 #include <linux/gfp.h>
21 #include <linux/types.h>
23 #include <linux/stddef.h>
24 #include <linux/init.h>
25 #include <linux/memblock.h>
26 #include <linux/highmem.h>
27 #include <linux/initrd.h>
28 #include <linux/pagemap.h>
29 #include <linux/suspend.h>
30 #include <linux/hugetlb.h>
31 #include <linux/slab.h>
32 #include <linux/vmalloc.h>
33 #include <linux/memremap.h>
34 #include <linux/dma-direct.h>
36 #include <asm/pgalloc.h>
39 #include <asm/mmu_context.h>
40 #include <asm/pgtable.h>
43 #include <asm/machdep.h>
44 #include <asm/btext.h>
46 #include <asm/sections.h>
47 #include <asm/sparsemem.h>
49 #include <asm/fixmap.h>
50 #include <asm/swiotlb.h>
53 #include <mm/mmu_decl.h>
55 #ifndef CPU_FTR_COHERENT_ICACHE
56 #define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
57 #define CPU_FTR_NOEXECUTE 0
60 unsigned long long memory_limit;
61 bool init_mem_is_free;
65 EXPORT_SYMBOL(kmap_pte);
67 EXPORT_SYMBOL(kmap_prot);
69 static inline pte_t *virt_to_kpte(unsigned long vaddr)
71 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
72 vaddr), vaddr), vaddr);
76 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
77 unsigned long size, pgprot_t vma_prot)
79 if (ppc_md.phys_mem_access_prot)
80 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
82 if (!page_is_ram(pfn))
83 vma_prot = pgprot_noncached(vma_prot);
87 EXPORT_SYMBOL(phys_mem_access_prot);
89 #ifdef CONFIG_MEMORY_HOTPLUG
92 int memory_add_physaddr_to_nid(u64 start)
94 return hot_add_scn_to_nid(start);
98 int __weak create_section_mapping(unsigned long start, unsigned long end, int nid)
103 int __weak remove_section_mapping(unsigned long start, unsigned long end)
108 #define FLUSH_CHUNK_SIZE SZ_1G
110 * flush_dcache_range_chunked(): Write any modified data cache blocks out to
111 * memory and invalidate them, in chunks of up to FLUSH_CHUNK_SIZE
112 * Does not invalidate the corresponding instruction cache blocks.
114 * @start: the start address
115 * @stop: the stop address (exclusive)
116 * @chunk: the max size of the chunks
118 static void flush_dcache_range_chunked(unsigned long start, unsigned long stop,
123 for (i = start; i < stop; i += chunk) {
124 flush_dcache_range(i, min(stop, i + chunk));
129 int __ref arch_add_memory(int nid, u64 start, u64 size,
130 struct mhp_restrictions *restrictions)
132 unsigned long start_pfn = start >> PAGE_SHIFT;
133 unsigned long nr_pages = size >> PAGE_SHIFT;
136 resize_hpt_for_hotplug(memblock_phys_mem_size());
138 start = (unsigned long)__va(start);
139 rc = create_section_mapping(start, start + size, nid);
141 pr_warn("Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n",
142 start, start + size, rc);
146 return __add_pages(nid, start_pfn, nr_pages, restrictions);
149 void __ref arch_remove_memory(int nid, u64 start, u64 size,
150 struct vmem_altmap *altmap)
152 unsigned long start_pfn = start >> PAGE_SHIFT;
153 unsigned long nr_pages = size >> PAGE_SHIFT;
154 struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap);
157 __remove_pages(page_zone(page), start_pfn, nr_pages, altmap);
159 /* Remove htab bolted mappings for this section of memory */
160 start = (unsigned long)__va(start);
161 flush_dcache_range_chunked(start, start + size, FLUSH_CHUNK_SIZE);
163 ret = remove_section_mapping(start, start + size);
166 /* Ensure all vmalloc mappings are flushed in case they also
167 * hit that section of memory
171 if (resize_hpt_for_hotplug(memblock_phys_mem_size()) == -ENOSPC)
172 pr_warn("Hash collision while resizing HPT\n");
176 #ifndef CONFIG_NEED_MULTIPLE_NODES
177 void __init mem_topology_setup(void)
179 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
180 min_low_pfn = MEMORY_START >> PAGE_SHIFT;
181 #ifdef CONFIG_HIGHMEM
182 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
185 /* Place all memblock_regions in the same node and merge contiguous
188 memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
191 void __init initmem_init(void)
193 /* XXX need to clip this if using highmem? */
194 sparse_memory_present_with_active_regions(0);
198 /* mark pages that don't exist as nosave */
199 static int __init mark_nonram_nosave(void)
201 struct memblock_region *reg, *prev = NULL;
203 for_each_memblock(memory, reg) {
205 memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg))
206 register_nosave_region(memblock_region_memory_end_pfn(prev),
207 memblock_region_memory_base_pfn(reg));
212 #else /* CONFIG_NEED_MULTIPLE_NODES */
213 static int __init mark_nonram_nosave(void)
222 * We setup ZONE_DMA to be 31-bits on all platforms and ZONE_NORMAL to be
223 * everything else. GFP_DMA32 page allocations automatically fall back to
226 * By using 31-bit unconditionally, we can exploit zone_dma_bits to inform the
227 * generic DMA mapping code. 32-bit only devices (if not handled by an IOMMU
228 * anyway) will take a first dip into ZONE_NORMAL and get otherwise served by
231 static unsigned long max_zone_pfns[MAX_NR_ZONES];
234 * paging_init() sets up the page tables - in fact we've already done this.
236 void __init paging_init(void)
238 unsigned long long total_ram = memblock_phys_mem_size();
239 phys_addr_t top_of_ram = memblock_end_of_DRAM();
241 #ifdef CONFIG_HIGHMEM
242 unsigned long v = __fix_to_virt(FIX_KMAP_END);
243 unsigned long end = __fix_to_virt(FIX_KMAP_BEGIN);
245 for (; v < end; v += PAGE_SIZE)
246 map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */
248 map_kernel_page(PKMAP_BASE, 0, __pgprot(0)); /* XXX gross */
249 pkmap_page_table = virt_to_kpte(PKMAP_BASE);
251 kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
252 kmap_prot = PAGE_KERNEL;
253 #endif /* CONFIG_HIGHMEM */
255 printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
256 (unsigned long long)top_of_ram, total_ram);
257 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
258 (long int)((top_of_ram - total_ram) >> 20));
261 * Allow 30-bit DMA for very limited Broadcom wifi chips on many
264 if (IS_ENABLED(CONFIG_PPC32))
269 #ifdef CONFIG_ZONE_DMA
270 max_zone_pfns[ZONE_DMA] = min(max_low_pfn,
271 1UL << (zone_dma_bits - PAGE_SHIFT));
273 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
274 #ifdef CONFIG_HIGHMEM
275 max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
278 free_area_init_nodes(max_zone_pfns);
280 mark_nonram_nosave();
283 void __init mem_init(void)
286 * book3s is limited to 16 page sizes due to encoding this in
287 * a 4-bit field for slices.
289 BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
291 #ifdef CONFIG_SWIOTLB
293 * Some platforms (e.g. 85xx) limit DMA-able memory way below
294 * 4G. We force memblock to bottom-up mode to ensure that the
295 * memory allocated in swiotlb_init() is DMA-able.
296 * As it's the last memblock allocation, no need to reset it
299 memblock_set_bottom_up(true);
303 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
304 set_max_mapnr(max_pfn);
307 #ifdef CONFIG_HIGHMEM
309 unsigned long pfn, highmem_mapnr;
311 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
312 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
313 phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
314 struct page *page = pfn_to_page(pfn);
315 if (!memblock_is_reserved(paddr))
316 free_highmem_page(page);
319 #endif /* CONFIG_HIGHMEM */
321 #if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
323 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
324 * functions.... do it here for the non-smp case.
326 per_cpu(next_tlbcam_idx, smp_processor_id()) =
327 (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
330 mem_init_print_info(NULL);
332 pr_info("Kernel virtual memory layout:\n");
334 pr_info(" * 0x%08lx..0x%08lx : kasan shadow mem\n",
335 KASAN_SHADOW_START, KASAN_SHADOW_END);
337 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
338 #ifdef CONFIG_HIGHMEM
339 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
340 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
341 #endif /* CONFIG_HIGHMEM */
342 if (ioremap_bot != IOREMAP_TOP)
343 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
344 ioremap_bot, IOREMAP_TOP);
345 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
346 VMALLOC_START, VMALLOC_END);
347 #endif /* CONFIG_PPC32 */
350 void free_initmem(void)
352 ppc_md.progress = ppc_printk_progress;
354 init_mem_is_free = true;
355 free_initmem_default(POISON_FREE_INITMEM);
359 * flush_coherent_icache() - if a CPU has a coherent icache, flush it
360 * @addr: The base address to use (can be any valid address, the whole cache will be flushed)
361 * Return true if the cache was flushed, false otherwise
363 static inline bool flush_coherent_icache(unsigned long addr)
366 * For a snooping icache, we still need a dummy icbi to purge all the
367 * prefetched instructions from the ifetch buffers. We also need a sync
368 * before the icbi to order the the actual stores to memory that might
369 * have modified instructions with the icbi.
371 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
383 * invalidate_icache_range() - Flush the icache by issuing icbi across an address range
384 * @start: the start address
385 * @stop: the stop address (exclusive)
387 static void invalidate_icache_range(unsigned long start, unsigned long stop)
389 unsigned long shift = l1_icache_shift();
390 unsigned long bytes = l1_icache_bytes();
391 char *addr = (char *)(start & ~(bytes - 1));
392 unsigned long size = stop - (unsigned long)addr + (bytes - 1);
395 for (i = 0; i < size >> shift; i++, addr += bytes)
403 * flush_icache_range: Write any modified data cache blocks out to memory
404 * and invalidate the corresponding blocks in the instruction cache
406 * Generic code will call this after writing memory, before executing from it.
408 * @start: the start address
409 * @stop: the stop address (exclusive)
411 void flush_icache_range(unsigned long start, unsigned long stop)
413 if (flush_coherent_icache(start))
416 clean_dcache_range(start, stop);
418 if (IS_ENABLED(CONFIG_44x)) {
420 * Flash invalidate on 44x because we are passed kmapped
421 * addresses and this doesn't work for userspace pages due to
422 * the virtually tagged icache.
424 iccci((void *)start);
428 invalidate_icache_range(start, stop);
430 EXPORT_SYMBOL(flush_icache_range);
432 #if !defined(CONFIG_PPC_8xx) && !defined(CONFIG_PPC64)
434 * flush_dcache_icache_phys() - Flush a page by it's physical address
435 * @physaddr: the physical address of the page
437 static void flush_dcache_icache_phys(unsigned long physaddr)
439 unsigned long bytes = l1_dcache_bytes();
440 unsigned long nb = PAGE_SIZE / bytes;
441 unsigned long addr = physaddr & PAGE_MASK;
442 unsigned long msr, msr0;
443 unsigned long loop1 = addr, loop2 = addr;
446 msr = msr0 & ~MSR_DR;
448 * This must remain as ASM to prevent potential memory accesses
449 * while the data MMU is disabled
456 " addi %0, %0, %4;\n"
461 " addi %1, %1, %4;\n"
466 : "+&r" (loop1), "+&r" (loop2)
467 : "r" (nb), "r" (msr), "i" (bytes), "r" (msr0)
470 #endif // !defined(CONFIG_PPC_8xx) && !defined(CONFIG_PPC64)
473 * This is called when a page has been modified by the kernel.
474 * It just marks the page as not i-cache clean. We do the i-cache
475 * flush later when the page is given to a user process, if necessary.
477 void flush_dcache_page(struct page *page)
479 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
481 /* avoid an atomic op if possible */
482 if (test_bit(PG_arch_1, &page->flags))
483 clear_bit(PG_arch_1, &page->flags);
485 EXPORT_SYMBOL(flush_dcache_page);
487 void flush_dcache_icache_page(struct page *page)
489 #ifdef CONFIG_HUGETLB_PAGE
490 if (PageCompound(page)) {
491 flush_dcache_icache_hugepage(page);
495 #if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC64)
496 /* On 8xx there is no need to kmap since highmem is not supported */
497 __flush_dcache_icache(page_address(page));
499 if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) {
500 void *start = kmap_atomic(page);
501 __flush_dcache_icache(start);
502 kunmap_atomic(start);
504 unsigned long addr = page_to_pfn(page) << PAGE_SHIFT;
506 if (flush_coherent_icache(addr))
508 flush_dcache_icache_phys(addr);
512 EXPORT_SYMBOL(flush_dcache_icache_page);
515 * __flush_dcache_icache(): Flush a particular page from the data cache to RAM.
516 * Note: this is necessary because the instruction cache does *not*
517 * snoop from the data cache.
519 * @page: the address of the page to flush
521 void __flush_dcache_icache(void *p)
523 unsigned long addr = (unsigned long)p;
525 if (flush_coherent_icache(addr))
528 clean_dcache_range(addr, addr + PAGE_SIZE);
531 * We don't flush the icache on 44x. Those have a virtual icache and we
532 * don't have access to the virtual address here (it's not the page
533 * vaddr but where it's mapped in user space). The flushing of the
534 * icache on these is handled elsewhere, when a change in the address
535 * space occurs, before returning to user space.
538 if (cpu_has_feature(MMU_FTR_TYPE_44x))
541 invalidate_icache_range(addr, addr + PAGE_SIZE);
544 void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
549 * We shouldn't have to do this, but some versions of glibc
550 * require it (ld.so assumes zero filled pages are icache clean)
553 flush_dcache_page(pg);
555 EXPORT_SYMBOL(clear_user_page);
557 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
560 copy_page(vto, vfrom);
563 * We should be able to use the following optimisation, however
564 * there are two problems.
565 * Firstly a bug in some versions of binutils meant PLT sections
566 * were not marked executable.
567 * Secondly the first word in the GOT section is blrl, used
568 * to establish the GOT address. Until recently the GOT was
569 * not marked executable.
573 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
577 flush_dcache_page(pg);
580 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
581 unsigned long addr, int len)
585 maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
586 flush_icache_range(maddr, maddr + len);
589 EXPORT_SYMBOL(flush_icache_user_range);
592 * System memory should not be in /proc/iomem but various tools expect it
595 static int __init add_system_ram_resources(void)
597 struct memblock_region *reg;
599 for_each_memblock(memory, reg) {
600 struct resource *res;
601 unsigned long base = reg->base;
602 unsigned long size = reg->size;
604 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
608 res->name = "System RAM";
610 res->end = base + size - 1;
611 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
612 WARN_ON(request_resource(&iomem_resource, res) < 0);
618 subsys_initcall(add_system_ram_resources);
620 #ifdef CONFIG_STRICT_DEVMEM
622 * devmem_is_allowed(): check to see if /dev/mem access to a certain address
623 * is valid. The argument is a physical page number.
625 * Access has to be given to non-kernel-ram areas as well, these contain the
626 * PCI mmio resources as well as potential bios/acpi data regions.
628 int devmem_is_allowed(unsigned long pfn)
630 if (page_is_rtas_user_buf(pfn))
632 if (iomem_is_exclusive(PFN_PHYS(pfn)))
634 if (!page_is_ram(pfn))
638 #endif /* CONFIG_STRICT_DEVMEM */
641 * This is defined in kernel/resource.c but only powerpc needs to export it, for
642 * the EHEA driver. Drop this when drivers/net/ethernet/ibm/ehea is removed.
644 EXPORT_SYMBOL_GPL(walk_system_ram_range);