]> asedeno.scripts.mit.edu Git - linux.git/blob - arch/powerpc/mm/ioremap_32.c
fb43ba71aa5415ba487dea28973164b55d6ce9ec
[linux.git] / arch / powerpc / mm / ioremap_32.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 #include <linux/io.h>
4 #include <linux/slab.h>
5 #include <linux/vmalloc.h>
6
7 #include <mm/mmu_decl.h>
8
9 void __iomem *ioremap_wt(phys_addr_t addr, unsigned long size)
10 {
11         pgprot_t prot = pgprot_cached_wthru(PAGE_KERNEL);
12
13         return __ioremap_caller(addr, size, prot, __builtin_return_address(0));
14 }
15 EXPORT_SYMBOL(ioremap_wt);
16
17 void __iomem *
18 __ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *caller)
19 {
20         unsigned long v, i;
21         phys_addr_t p;
22         int err;
23
24         /*
25          * Choose an address to map it to.
26          * Once the vmalloc system is running, we use it.
27          * Before then, we use space going down from IOREMAP_TOP
28          * (ioremap_bot records where we're up to).
29          */
30         p = addr & PAGE_MASK;
31         size = PAGE_ALIGN(addr + size) - p;
32
33         /*
34          * If the address lies within the first 16 MB, assume it's in ISA
35          * memory space
36          */
37         if (p < 16 * 1024 * 1024)
38                 p += _ISA_MEM_BASE;
39
40 #ifndef CONFIG_CRASH_DUMP
41         /*
42          * Don't allow anybody to remap normal RAM that we're using.
43          * mem_init() sets high_memory so only do the check after that.
44          */
45         if (slab_is_available() && p <= virt_to_phys(high_memory - 1) &&
46             page_is_ram(__phys_to_pfn(p))) {
47                 pr_warn("%s(): phys addr 0x%llx is RAM lr %ps\n", __func__,
48                         (unsigned long long)p, __builtin_return_address(0));
49                 return NULL;
50         }
51 #endif
52
53         if (size == 0)
54                 return NULL;
55
56         /*
57          * Is it already mapped?  Perhaps overlapped by a previous
58          * mapping.
59          */
60         v = p_block_mapped(p);
61         if (v)
62                 goto out;
63
64         if (slab_is_available()) {
65                 struct vm_struct *area;
66                 area = get_vm_area_caller(size, VM_IOREMAP, caller);
67                 if (area == 0)
68                         return NULL;
69                 area->phys_addr = p;
70                 v = (unsigned long)area->addr;
71         } else {
72                 v = (ioremap_bot -= size);
73         }
74
75         /*
76          * Should check if it is a candidate for a BAT mapping
77          */
78
79         err = 0;
80         for (i = 0; i < size && err == 0; i += PAGE_SIZE)
81                 err = map_kernel_page(v + i, p + i, prot);
82         if (err) {
83                 if (slab_is_available())
84                         vunmap((void *)v);
85                 return NULL;
86         }
87
88 out:
89         return (void __iomem *)(v + ((unsigned long)addr & ~PAGE_MASK));
90 }
91
92 void iounmap(volatile void __iomem *addr)
93 {
94         /*
95          * If mapped by BATs then there is nothing to do.
96          * Calling vfree() generates a benign warning.
97          */
98         if (v_block_mapped((unsigned long)addr))
99                 return;
100
101         if (addr > high_memory && (unsigned long)addr < ioremap_bot)
102                 vunmap((void *)(PAGE_MASK & (unsigned long)addr));
103 }
104 EXPORT_SYMBOL(iounmap);