]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/staging/android/ion/ion.c
a5220445b5e8f2c0625fa2f6293b82728b7f5411
[linux.git] / drivers / staging / android / ion / ion.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/staging/android/ion/ion.c
4  *
5  * Copyright (C) 2011 Google, Inc.
6  */
7
8 #include <linux/anon_inodes.h>
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/dma-buf.h>
12 #include <linux/err.h>
13 #include <linux/export.h>
14 #include <linux/file.h>
15 #include <linux/freezer.h>
16 #include <linux/fs.h>
17 #include <linux/idr.h>
18 #include <linux/kthread.h>
19 #include <linux/list.h>
20 #include <linux/memblock.h>
21 #include <linux/miscdevice.h>
22 #include <linux/mm.h>
23 #include <linux/mm_types.h>
24 #include <linux/rbtree.h>
25 #include <linux/sched/task.h>
26 #include <linux/seq_file.h>
27 #include <linux/slab.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30
31 #include "ion.h"
32
33 static struct ion_device *internal_dev;
34 static int heap_id;
35
36 /* this function should only be called while dev->lock is held */
37 static void ion_buffer_add(struct ion_device *dev,
38                            struct ion_buffer *buffer)
39 {
40         struct rb_node **p = &dev->buffers.rb_node;
41         struct rb_node *parent = NULL;
42         struct ion_buffer *entry;
43
44         while (*p) {
45                 parent = *p;
46                 entry = rb_entry(parent, struct ion_buffer, node);
47
48                 if (buffer < entry) {
49                         p = &(*p)->rb_left;
50                 } else if (buffer > entry) {
51                         p = &(*p)->rb_right;
52                 } else {
53                         pr_err("%s: buffer already found.", __func__);
54                         BUG();
55                 }
56         }
57
58         rb_link_node(&buffer->node, parent, p);
59         rb_insert_color(&buffer->node, &dev->buffers);
60 }
61
62 /* this function should only be called while dev->lock is held */
63 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
64                                             struct ion_device *dev,
65                                             unsigned long len,
66                                             unsigned long flags)
67 {
68         struct ion_buffer *buffer;
69         int ret;
70
71         buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
72         if (!buffer)
73                 return ERR_PTR(-ENOMEM);
74
75         buffer->heap = heap;
76         buffer->flags = flags;
77
78         ret = heap->ops->allocate(heap, buffer, len, flags);
79
80         if (ret) {
81                 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
82                         goto err2;
83
84                 ion_heap_freelist_drain(heap, 0);
85                 ret = heap->ops->allocate(heap, buffer, len, flags);
86                 if (ret)
87                         goto err2;
88         }
89
90         if (!buffer->sg_table) {
91                 WARN_ONCE(1, "This heap needs to set the sgtable");
92                 ret = -EINVAL;
93                 goto err1;
94         }
95
96         buffer->dev = dev;
97         buffer->size = len;
98
99         buffer->dev = dev;
100         buffer->size = len;
101         INIT_LIST_HEAD(&buffer->attachments);
102         mutex_init(&buffer->lock);
103         mutex_lock(&dev->buffer_lock);
104         ion_buffer_add(dev, buffer);
105         mutex_unlock(&dev->buffer_lock);
106         return buffer;
107
108 err1:
109         heap->ops->free(buffer);
110 err2:
111         kfree(buffer);
112         return ERR_PTR(ret);
113 }
114
115 void ion_buffer_destroy(struct ion_buffer *buffer)
116 {
117         if (WARN_ON(buffer->kmap_cnt > 0))
118                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
119         buffer->heap->ops->free(buffer);
120         kfree(buffer);
121 }
122
123 static void _ion_buffer_destroy(struct ion_buffer *buffer)
124 {
125         struct ion_heap *heap = buffer->heap;
126         struct ion_device *dev = buffer->dev;
127
128         mutex_lock(&dev->buffer_lock);
129         rb_erase(&buffer->node, &dev->buffers);
130         mutex_unlock(&dev->buffer_lock);
131
132         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
133                 ion_heap_freelist_add(heap, buffer);
134         else
135                 ion_buffer_destroy(buffer);
136 }
137
138 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
139 {
140         void *vaddr;
141
142         if (buffer->kmap_cnt) {
143                 buffer->kmap_cnt++;
144                 return buffer->vaddr;
145         }
146         vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
147         if (WARN_ONCE(!vaddr,
148                       "heap->ops->map_kernel should return ERR_PTR on error"))
149                 return ERR_PTR(-EINVAL);
150         if (IS_ERR(vaddr))
151                 return vaddr;
152         buffer->vaddr = vaddr;
153         buffer->kmap_cnt++;
154         return vaddr;
155 }
156
157 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
158 {
159         buffer->kmap_cnt--;
160         if (!buffer->kmap_cnt) {
161                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
162                 buffer->vaddr = NULL;
163         }
164 }
165
166 static struct sg_table *dup_sg_table(struct sg_table *table)
167 {
168         struct sg_table *new_table;
169         int ret, i;
170         struct scatterlist *sg, *new_sg;
171
172         new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
173         if (!new_table)
174                 return ERR_PTR(-ENOMEM);
175
176         ret = sg_alloc_table(new_table, table->nents, GFP_KERNEL);
177         if (ret) {
178                 kfree(new_table);
179                 return ERR_PTR(-ENOMEM);
180         }
181
182         new_sg = new_table->sgl;
183         for_each_sg(table->sgl, sg, table->nents, i) {
184                 memcpy(new_sg, sg, sizeof(*sg));
185                 new_sg->dma_address = 0;
186                 new_sg = sg_next(new_sg);
187         }
188
189         return new_table;
190 }
191
192 static void free_duped_table(struct sg_table *table)
193 {
194         sg_free_table(table);
195         kfree(table);
196 }
197
198 struct ion_dma_buf_attachment {
199         struct device *dev;
200         struct sg_table *table;
201         struct list_head list;
202 };
203
204 static int ion_dma_buf_attach(struct dma_buf *dmabuf, struct device *dev,
205                               struct dma_buf_attachment *attachment)
206 {
207         struct ion_dma_buf_attachment *a;
208         struct sg_table *table;
209         struct ion_buffer *buffer = dmabuf->priv;
210
211         a = kzalloc(sizeof(*a), GFP_KERNEL);
212         if (!a)
213                 return -ENOMEM;
214
215         table = dup_sg_table(buffer->sg_table);
216         if (IS_ERR(table)) {
217                 kfree(a);
218                 return -ENOMEM;
219         }
220
221         a->table = table;
222         a->dev = dev;
223         INIT_LIST_HEAD(&a->list);
224
225         attachment->priv = a;
226
227         mutex_lock(&buffer->lock);
228         list_add(&a->list, &buffer->attachments);
229         mutex_unlock(&buffer->lock);
230
231         return 0;
232 }
233
234 static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
235                                 struct dma_buf_attachment *attachment)
236 {
237         struct ion_dma_buf_attachment *a = attachment->priv;
238         struct ion_buffer *buffer = dmabuf->priv;
239
240         free_duped_table(a->table);
241         mutex_lock(&buffer->lock);
242         list_del(&a->list);
243         mutex_unlock(&buffer->lock);
244
245         kfree(a);
246 }
247
248 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
249                                         enum dma_data_direction direction)
250 {
251         struct ion_dma_buf_attachment *a = attachment->priv;
252         struct sg_table *table;
253
254         table = a->table;
255
256         if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
257                         direction))
258                 return ERR_PTR(-ENOMEM);
259
260         return table;
261 }
262
263 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
264                               struct sg_table *table,
265                               enum dma_data_direction direction)
266 {
267         dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
268 }
269
270 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
271 {
272         struct ion_buffer *buffer = dmabuf->priv;
273         int ret = 0;
274
275         if (!buffer->heap->ops->map_user) {
276                 pr_err("%s: this heap does not define a method for mapping to userspace\n",
277                        __func__);
278                 return -EINVAL;
279         }
280
281         if (!(buffer->flags & ION_FLAG_CACHED))
282                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
283
284         mutex_lock(&buffer->lock);
285         /* now map it to userspace */
286         ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
287         mutex_unlock(&buffer->lock);
288
289         if (ret)
290                 pr_err("%s: failure mapping buffer to userspace\n",
291                        __func__);
292
293         return ret;
294 }
295
296 static void ion_dma_buf_release(struct dma_buf *dmabuf)
297 {
298         struct ion_buffer *buffer = dmabuf->priv;
299
300         _ion_buffer_destroy(buffer);
301 }
302
303 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
304 {
305         struct ion_buffer *buffer = dmabuf->priv;
306
307         return buffer->vaddr + offset * PAGE_SIZE;
308 }
309
310 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
311                                void *ptr)
312 {
313 }
314
315 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
316                                         enum dma_data_direction direction)
317 {
318         struct ion_buffer *buffer = dmabuf->priv;
319         void *vaddr;
320         struct ion_dma_buf_attachment *a;
321
322         /*
323          * TODO: Move this elsewhere because we don't always need a vaddr
324          */
325         if (buffer->heap->ops->map_kernel) {
326                 mutex_lock(&buffer->lock);
327                 vaddr = ion_buffer_kmap_get(buffer);
328                 mutex_unlock(&buffer->lock);
329         }
330
331         mutex_lock(&buffer->lock);
332         list_for_each_entry(a, &buffer->attachments, list) {
333                 dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
334                                     direction);
335         }
336         mutex_unlock(&buffer->lock);
337
338         return 0;
339 }
340
341 static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
342                                       enum dma_data_direction direction)
343 {
344         struct ion_buffer *buffer = dmabuf->priv;
345         struct ion_dma_buf_attachment *a;
346
347         if (buffer->heap->ops->map_kernel) {
348                 mutex_lock(&buffer->lock);
349                 ion_buffer_kmap_put(buffer);
350                 mutex_unlock(&buffer->lock);
351         }
352
353         mutex_lock(&buffer->lock);
354         list_for_each_entry(a, &buffer->attachments, list) {
355                 dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
356                                        direction);
357         }
358         mutex_unlock(&buffer->lock);
359
360         return 0;
361 }
362
363 static const struct dma_buf_ops dma_buf_ops = {
364         .map_dma_buf = ion_map_dma_buf,
365         .unmap_dma_buf = ion_unmap_dma_buf,
366         .mmap = ion_mmap,
367         .release = ion_dma_buf_release,
368         .attach = ion_dma_buf_attach,
369         .detach = ion_dma_buf_detatch,
370         .begin_cpu_access = ion_dma_buf_begin_cpu_access,
371         .end_cpu_access = ion_dma_buf_end_cpu_access,
372         .map = ion_dma_buf_kmap,
373         .unmap = ion_dma_buf_kunmap,
374 };
375
376 int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags)
377 {
378         struct ion_device *dev = internal_dev;
379         struct ion_buffer *buffer = NULL;
380         struct ion_heap *heap;
381         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
382         int fd;
383         struct dma_buf *dmabuf;
384
385         pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
386                  len, heap_id_mask, flags);
387         /*
388          * traverse the list of heaps available in this system in priority
389          * order.  If the heap type is supported by the client, and matches the
390          * request of the caller allocate from it.  Repeat until allocate has
391          * succeeded or all heaps have been tried
392          */
393         len = PAGE_ALIGN(len);
394
395         if (!len)
396                 return -EINVAL;
397
398         down_read(&dev->lock);
399         plist_for_each_entry(heap, &dev->heaps, node) {
400                 /* if the caller didn't specify this heap id */
401                 if (!((1 << heap->id) & heap_id_mask))
402                         continue;
403                 buffer = ion_buffer_create(heap, dev, len, flags);
404                 if (!IS_ERR(buffer))
405                         break;
406         }
407         up_read(&dev->lock);
408
409         if (!buffer)
410                 return -ENODEV;
411
412         if (IS_ERR(buffer))
413                 return PTR_ERR(buffer);
414
415         exp_info.ops = &dma_buf_ops;
416         exp_info.size = buffer->size;
417         exp_info.flags = O_RDWR;
418         exp_info.priv = buffer;
419
420         dmabuf = dma_buf_export(&exp_info);
421         if (IS_ERR(dmabuf)) {
422                 _ion_buffer_destroy(buffer);
423                 return PTR_ERR(dmabuf);
424         }
425
426         fd = dma_buf_fd(dmabuf, O_CLOEXEC);
427         if (fd < 0)
428                 dma_buf_put(dmabuf);
429
430         return fd;
431 }
432
433 int ion_query_heaps(struct ion_heap_query *query)
434 {
435         struct ion_device *dev = internal_dev;
436         struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
437         int ret = -EINVAL, cnt = 0, max_cnt;
438         struct ion_heap *heap;
439         struct ion_heap_data hdata;
440
441         memset(&hdata, 0, sizeof(hdata));
442
443         down_read(&dev->lock);
444         if (!buffer) {
445                 query->cnt = dev->heap_cnt;
446                 ret = 0;
447                 goto out;
448         }
449
450         if (query->cnt <= 0)
451                 goto out;
452
453         max_cnt = query->cnt;
454
455         plist_for_each_entry(heap, &dev->heaps, node) {
456                 strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
457                 hdata.name[sizeof(hdata.name) - 1] = '\0';
458                 hdata.type = heap->type;
459                 hdata.heap_id = heap->id;
460
461                 if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
462                         ret = -EFAULT;
463                         goto out;
464                 }
465
466                 cnt++;
467                 if (cnt >= max_cnt)
468                         break;
469         }
470
471         query->cnt = cnt;
472         ret = 0;
473 out:
474         up_read(&dev->lock);
475         return ret;
476 }
477
478 static const struct file_operations ion_fops = {
479         .owner          = THIS_MODULE,
480         .unlocked_ioctl = ion_ioctl,
481 #ifdef CONFIG_COMPAT
482         .compat_ioctl   = ion_ioctl,
483 #endif
484 };
485
486 static int debug_shrink_set(void *data, u64 val)
487 {
488         struct ion_heap *heap = data;
489         struct shrink_control sc;
490         int objs;
491
492         sc.gfp_mask = GFP_HIGHUSER;
493         sc.nr_to_scan = val;
494
495         if (!val) {
496                 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
497                 sc.nr_to_scan = objs;
498         }
499
500         heap->shrinker.scan_objects(&heap->shrinker, &sc);
501         return 0;
502 }
503
504 static int debug_shrink_get(void *data, u64 *val)
505 {
506         struct ion_heap *heap = data;
507         struct shrink_control sc;
508         int objs;
509
510         sc.gfp_mask = GFP_HIGHUSER;
511         sc.nr_to_scan = 0;
512
513         objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
514         *val = objs;
515         return 0;
516 }
517
518 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
519                         debug_shrink_set, "%llu\n");
520
521 void ion_device_add_heap(struct ion_heap *heap)
522 {
523         struct ion_device *dev = internal_dev;
524         int ret;
525
526         if (!heap->ops->allocate || !heap->ops->free)
527                 pr_err("%s: can not add heap with invalid ops struct.\n",
528                        __func__);
529
530         spin_lock_init(&heap->free_lock);
531         heap->free_list_size = 0;
532
533         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
534                 ion_heap_init_deferred_free(heap);
535
536         if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink) {
537                 ret = ion_heap_init_shrinker(heap);
538                 if (ret)
539                         pr_err("%s: Failed to register shrinker\n", __func__);
540         }
541
542         heap->dev = dev;
543         down_write(&dev->lock);
544         heap->id = heap_id++;
545         /*
546          * use negative heap->id to reverse the priority -- when traversing
547          * the list later attempt higher id numbers first
548          */
549         plist_node_init(&heap->node, -heap->id);
550         plist_add(&heap->node, &dev->heaps);
551
552         if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
553                 char debug_name[64];
554
555                 snprintf(debug_name, 64, "%s_shrink", heap->name);
556                 debugfs_create_file(debug_name, 0644, dev->debug_root,
557                                     heap, &debug_shrink_fops);
558         }
559
560         dev->heap_cnt++;
561         up_write(&dev->lock);
562 }
563 EXPORT_SYMBOL(ion_device_add_heap);
564
565 static int ion_device_create(void)
566 {
567         struct ion_device *idev;
568         int ret;
569
570         idev = kzalloc(sizeof(*idev), GFP_KERNEL);
571         if (!idev)
572                 return -ENOMEM;
573
574         idev->dev.minor = MISC_DYNAMIC_MINOR;
575         idev->dev.name = "ion";
576         idev->dev.fops = &ion_fops;
577         idev->dev.parent = NULL;
578         ret = misc_register(&idev->dev);
579         if (ret) {
580                 pr_err("ion: failed to register misc device.\n");
581                 kfree(idev);
582                 return ret;
583         }
584
585         idev->debug_root = debugfs_create_dir("ion", NULL);
586         idev->buffers = RB_ROOT;
587         mutex_init(&idev->buffer_lock);
588         init_rwsem(&idev->lock);
589         plist_head_init(&idev->heaps);
590         internal_dev = idev;
591         return 0;
592 }
593 subsys_initcall(ion_device_create);