]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/staging/android/ion/ion_heap.c
772dad65396ed68015f7d00563fb116138dfeff8
[linux.git] / drivers / staging / android / ion / ion_heap.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/staging/android/ion/ion_heap.c
4  *
5  * Copyright (C) 2011 Google, Inc.
6  */
7
8 #include <linux/err.h>
9 #include <linux/freezer.h>
10 #include <linux/kthread.h>
11 #include <linux/mm.h>
12 #include <linux/rtmutex.h>
13 #include <linux/sched.h>
14 #include <uapi/linux/sched/types.h>
15 #include <linux/scatterlist.h>
16 #include <linux/vmalloc.h>
17 #include "ion.h"
18
19 void *ion_heap_map_kernel(struct ion_heap *heap,
20                           struct ion_buffer *buffer)
21 {
22         struct scatterlist *sg;
23         int i, j;
24         void *vaddr;
25         pgprot_t pgprot;
26         struct sg_table *table = buffer->sg_table;
27         int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
28         struct page **pages = vmalloc(sizeof(struct page *) * npages);
29         struct page **tmp = pages;
30
31         if (!pages)
32                 return NULL;
33
34         if (buffer->flags & ION_FLAG_CACHED)
35                 pgprot = PAGE_KERNEL;
36         else
37                 pgprot = pgprot_writecombine(PAGE_KERNEL);
38
39         for_each_sg(table->sgl, sg, table->nents, i) {
40                 int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
41                 struct page *page = sg_page(sg);
42
43                 BUG_ON(i >= npages);
44                 for (j = 0; j < npages_this_entry; j++)
45                         *(tmp++) = page++;
46         }
47         vaddr = vmap(pages, npages, VM_MAP, pgprot);
48         vfree(pages);
49
50         if (!vaddr)
51                 return ERR_PTR(-ENOMEM);
52
53         return vaddr;
54 }
55
56 void ion_heap_unmap_kernel(struct ion_heap *heap,
57                            struct ion_buffer *buffer)
58 {
59         vunmap(buffer->vaddr);
60 }
61
62 int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
63                       struct vm_area_struct *vma)
64 {
65         struct sg_table *table = buffer->sg_table;
66         unsigned long addr = vma->vm_start;
67         unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
68         struct scatterlist *sg;
69         int i;
70         int ret;
71
72         for_each_sg(table->sgl, sg, table->nents, i) {
73                 struct page *page = sg_page(sg);
74                 unsigned long remainder = vma->vm_end - addr;
75                 unsigned long len = sg->length;
76
77                 if (offset >= sg->length) {
78                         offset -= sg->length;
79                         continue;
80                 } else if (offset) {
81                         page += offset / PAGE_SIZE;
82                         len = sg->length - offset;
83                         offset = 0;
84                 }
85                 len = min(len, remainder);
86                 ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
87                                       vma->vm_page_prot);
88                 if (ret)
89                         return ret;
90                 addr += len;
91                 if (addr >= vma->vm_end)
92                         return 0;
93         }
94         return 0;
95 }
96
97 static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
98 {
99         void *addr = vm_map_ram(pages, num, -1, pgprot);
100
101         if (!addr)
102                 return -ENOMEM;
103         memset(addr, 0, PAGE_SIZE * num);
104         vm_unmap_ram(addr, num);
105
106         return 0;
107 }
108
109 static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents,
110                                 pgprot_t pgprot)
111 {
112         int p = 0;
113         int ret = 0;
114         struct sg_page_iter piter;
115         struct page *pages[32];
116
117         for_each_sg_page(sgl, &piter, nents, 0) {
118                 pages[p++] = sg_page_iter_page(&piter);
119                 if (p == ARRAY_SIZE(pages)) {
120                         ret = ion_heap_clear_pages(pages, p, pgprot);
121                         if (ret)
122                                 return ret;
123                         p = 0;
124                 }
125         }
126         if (p)
127                 ret = ion_heap_clear_pages(pages, p, pgprot);
128
129         return ret;
130 }
131
132 int ion_heap_buffer_zero(struct ion_buffer *buffer)
133 {
134         struct sg_table *table = buffer->sg_table;
135         pgprot_t pgprot;
136
137         if (buffer->flags & ION_FLAG_CACHED)
138                 pgprot = PAGE_KERNEL;
139         else
140                 pgprot = pgprot_writecombine(PAGE_KERNEL);
141
142         return ion_heap_sglist_zero(table->sgl, table->nents, pgprot);
143 }
144
145 int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot)
146 {
147         struct scatterlist sg;
148
149         sg_init_table(&sg, 1);
150         sg_set_page(&sg, page, size, 0);
151         return ion_heap_sglist_zero(&sg, 1, pgprot);
152 }
153
154 void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
155 {
156         spin_lock(&heap->free_lock);
157         list_add(&buffer->list, &heap->free_list);
158         heap->free_list_size += buffer->size;
159         spin_unlock(&heap->free_lock);
160         wake_up(&heap->waitqueue);
161 }
162
163 size_t ion_heap_freelist_size(struct ion_heap *heap)
164 {
165         size_t size;
166
167         spin_lock(&heap->free_lock);
168         size = heap->free_list_size;
169         spin_unlock(&heap->free_lock);
170
171         return size;
172 }
173
174 static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
175                                        bool skip_pools)
176 {
177         struct ion_buffer *buffer;
178         size_t total_drained = 0;
179
180         if (ion_heap_freelist_size(heap) == 0)
181                 return 0;
182
183         spin_lock(&heap->free_lock);
184         if (size == 0)
185                 size = heap->free_list_size;
186
187         while (!list_empty(&heap->free_list)) {
188                 if (total_drained >= size)
189                         break;
190                 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
191                                           list);
192                 list_del(&buffer->list);
193                 heap->free_list_size -= buffer->size;
194                 if (skip_pools)
195                         buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
196                 total_drained += buffer->size;
197                 spin_unlock(&heap->free_lock);
198                 ion_buffer_destroy(buffer);
199                 spin_lock(&heap->free_lock);
200         }
201         spin_unlock(&heap->free_lock);
202
203         return total_drained;
204 }
205
206 size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
207 {
208         return _ion_heap_freelist_drain(heap, size, false);
209 }
210
211 size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size)
212 {
213         return _ion_heap_freelist_drain(heap, size, true);
214 }
215
216 static int ion_heap_deferred_free(void *data)
217 {
218         struct ion_heap *heap = data;
219
220         while (true) {
221                 struct ion_buffer *buffer;
222
223                 wait_event_freezable(heap->waitqueue,
224                                      ion_heap_freelist_size(heap) > 0);
225
226                 spin_lock(&heap->free_lock);
227                 if (list_empty(&heap->free_list)) {
228                         spin_unlock(&heap->free_lock);
229                         continue;
230                 }
231                 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
232                                           list);
233                 list_del(&buffer->list);
234                 heap->free_list_size -= buffer->size;
235                 spin_unlock(&heap->free_lock);
236                 ion_buffer_destroy(buffer);
237         }
238
239         return 0;
240 }
241
242 int ion_heap_init_deferred_free(struct ion_heap *heap)
243 {
244         struct sched_param param = { .sched_priority = 0 };
245
246         INIT_LIST_HEAD(&heap->free_list);
247         init_waitqueue_head(&heap->waitqueue);
248         heap->task = kthread_run(ion_heap_deferred_free, heap,
249                                  "%s", heap->name);
250         if (IS_ERR(heap->task)) {
251                 pr_err("%s: creating thread for deferred free failed\n",
252                        __func__);
253                 return PTR_ERR_OR_ZERO(heap->task);
254         }
255         sched_setscheduler(heap->task, SCHED_IDLE, &param);
256         return 0;
257 }
258
259 static unsigned long ion_heap_shrink_count(struct shrinker *shrinker,
260                                            struct shrink_control *sc)
261 {
262         struct ion_heap *heap = container_of(shrinker, struct ion_heap,
263                                              shrinker);
264         int total = 0;
265
266         total = ion_heap_freelist_size(heap) / PAGE_SIZE;
267         if (heap->ops->shrink)
268                 total += heap->ops->shrink(heap, sc->gfp_mask, 0);
269         return total;
270 }
271
272 static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker,
273                                           struct shrink_control *sc)
274 {
275         struct ion_heap *heap = container_of(shrinker, struct ion_heap,
276                                              shrinker);
277         int freed = 0;
278         int to_scan = sc->nr_to_scan;
279
280         if (to_scan == 0)
281                 return 0;
282
283         /*
284          * shrink the free list first, no point in zeroing the memory if we're
285          * just going to reclaim it. Also, skip any possible page pooling.
286          */
287         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
288                 freed = ion_heap_freelist_shrink(heap, to_scan * PAGE_SIZE) /
289                                 PAGE_SIZE;
290
291         to_scan -= freed;
292         if (to_scan <= 0)
293                 return freed;
294
295         if (heap->ops->shrink)
296                 freed += heap->ops->shrink(heap, sc->gfp_mask, to_scan);
297         return freed;
298 }
299
300 int ion_heap_init_shrinker(struct ion_heap *heap)
301 {
302         heap->shrinker.count_objects = ion_heap_shrink_count;
303         heap->shrinker.scan_objects = ion_heap_shrink_scan;
304         heap->shrinker.seeks = DEFAULT_SEEKS;
305         heap->shrinker.batch = 0;
306
307         return register_shrinker(&heap->shrinker);
308 }