]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/staging/android/ion/ion.c
Linux 5.6-rc7
[linux.git] / drivers / staging / android / ion / ion.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ION Memory Allocator
4  *
5  * Copyright (C) 2011 Google, Inc.
6  */
7
8 #include <linux/debugfs.h>
9 #include <linux/device.h>
10 #include <linux/dma-buf.h>
11 #include <linux/err.h>
12 #include <linux/export.h>
13 #include <linux/file.h>
14 #include <linux/freezer.h>
15 #include <linux/fs.h>
16 #include <linux/kthread.h>
17 #include <linux/list.h>
18 #include <linux/miscdevice.h>
19 #include <linux/mm.h>
20 #include <linux/mm_types.h>
21 #include <linux/rbtree.h>
22 #include <linux/sched/task.h>
23 #include <linux/slab.h>
24 #include <linux/uaccess.h>
25 #include <linux/vmalloc.h>
26
27 #include "ion.h"
28
29 static struct ion_device *internal_dev;
30 static int heap_id;
31
32 /* this function should only be called while dev->lock is held */
33 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
34                                             struct ion_device *dev,
35                                             unsigned long len,
36                                             unsigned long flags)
37 {
38         struct ion_buffer *buffer;
39         int ret;
40
41         buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
42         if (!buffer)
43                 return ERR_PTR(-ENOMEM);
44
45         buffer->heap = heap;
46         buffer->flags = flags;
47         buffer->dev = dev;
48         buffer->size = len;
49
50         ret = heap->ops->allocate(heap, buffer, len, flags);
51
52         if (ret) {
53                 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
54                         goto err2;
55
56                 ion_heap_freelist_drain(heap, 0);
57                 ret = heap->ops->allocate(heap, buffer, len, flags);
58                 if (ret)
59                         goto err2;
60         }
61
62         if (!buffer->sg_table) {
63                 WARN_ONCE(1, "This heap needs to set the sgtable");
64                 ret = -EINVAL;
65                 goto err1;
66         }
67
68         spin_lock(&heap->stat_lock);
69         heap->num_of_buffers++;
70         heap->num_of_alloc_bytes += len;
71         if (heap->num_of_alloc_bytes > heap->alloc_bytes_wm)
72                 heap->alloc_bytes_wm = heap->num_of_alloc_bytes;
73         spin_unlock(&heap->stat_lock);
74
75         INIT_LIST_HEAD(&buffer->attachments);
76         mutex_init(&buffer->lock);
77         return buffer;
78
79 err1:
80         heap->ops->free(buffer);
81 err2:
82         kfree(buffer);
83         return ERR_PTR(ret);
84 }
85
86 void ion_buffer_destroy(struct ion_buffer *buffer)
87 {
88         if (buffer->kmap_cnt > 0) {
89                 pr_warn_once("%s: buffer still mapped in the kernel\n",
90                              __func__);
91                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
92         }
93         buffer->heap->ops->free(buffer);
94         spin_lock(&buffer->heap->stat_lock);
95         buffer->heap->num_of_buffers--;
96         buffer->heap->num_of_alloc_bytes -= buffer->size;
97         spin_unlock(&buffer->heap->stat_lock);
98
99         kfree(buffer);
100 }
101
102 static void _ion_buffer_destroy(struct ion_buffer *buffer)
103 {
104         struct ion_heap *heap = buffer->heap;
105
106         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
107                 ion_heap_freelist_add(heap, buffer);
108         else
109                 ion_buffer_destroy(buffer);
110 }
111
112 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
113 {
114         void *vaddr;
115
116         if (buffer->kmap_cnt) {
117                 buffer->kmap_cnt++;
118                 return buffer->vaddr;
119         }
120         vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
121         if (WARN_ONCE(!vaddr,
122                       "heap->ops->map_kernel should return ERR_PTR on error"))
123                 return ERR_PTR(-EINVAL);
124         if (IS_ERR(vaddr))
125                 return vaddr;
126         buffer->vaddr = vaddr;
127         buffer->kmap_cnt++;
128         return vaddr;
129 }
130
131 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
132 {
133         buffer->kmap_cnt--;
134         if (!buffer->kmap_cnt) {
135                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
136                 buffer->vaddr = NULL;
137         }
138 }
139
140 static struct sg_table *dup_sg_table(struct sg_table *table)
141 {
142         struct sg_table *new_table;
143         int ret, i;
144         struct scatterlist *sg, *new_sg;
145
146         new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
147         if (!new_table)
148                 return ERR_PTR(-ENOMEM);
149
150         ret = sg_alloc_table(new_table, table->nents, GFP_KERNEL);
151         if (ret) {
152                 kfree(new_table);
153                 return ERR_PTR(-ENOMEM);
154         }
155
156         new_sg = new_table->sgl;
157         for_each_sg(table->sgl, sg, table->nents, i) {
158                 memcpy(new_sg, sg, sizeof(*sg));
159                 new_sg->dma_address = 0;
160                 new_sg = sg_next(new_sg);
161         }
162
163         return new_table;
164 }
165
166 static void free_duped_table(struct sg_table *table)
167 {
168         sg_free_table(table);
169         kfree(table);
170 }
171
172 struct ion_dma_buf_attachment {
173         struct device *dev;
174         struct sg_table *table;
175         struct list_head list;
176 };
177
178 static int ion_dma_buf_attach(struct dma_buf *dmabuf,
179                               struct dma_buf_attachment *attachment)
180 {
181         struct ion_dma_buf_attachment *a;
182         struct sg_table *table;
183         struct ion_buffer *buffer = dmabuf->priv;
184
185         a = kzalloc(sizeof(*a), GFP_KERNEL);
186         if (!a)
187                 return -ENOMEM;
188
189         table = dup_sg_table(buffer->sg_table);
190         if (IS_ERR(table)) {
191                 kfree(a);
192                 return -ENOMEM;
193         }
194
195         a->table = table;
196         a->dev = attachment->dev;
197         INIT_LIST_HEAD(&a->list);
198
199         attachment->priv = a;
200
201         mutex_lock(&buffer->lock);
202         list_add(&a->list, &buffer->attachments);
203         mutex_unlock(&buffer->lock);
204
205         return 0;
206 }
207
208 static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
209                                 struct dma_buf_attachment *attachment)
210 {
211         struct ion_dma_buf_attachment *a = attachment->priv;
212         struct ion_buffer *buffer = dmabuf->priv;
213
214         mutex_lock(&buffer->lock);
215         list_del(&a->list);
216         mutex_unlock(&buffer->lock);
217         free_duped_table(a->table);
218
219         kfree(a);
220 }
221
222 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
223                                         enum dma_data_direction direction)
224 {
225         struct ion_dma_buf_attachment *a = attachment->priv;
226         struct sg_table *table;
227
228         table = a->table;
229
230         if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
231                         direction))
232                 return ERR_PTR(-ENOMEM);
233
234         return table;
235 }
236
237 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
238                               struct sg_table *table,
239                               enum dma_data_direction direction)
240 {
241         dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
242 }
243
244 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
245 {
246         struct ion_buffer *buffer = dmabuf->priv;
247         int ret = 0;
248
249         if (!buffer->heap->ops->map_user) {
250                 pr_err("%s: this heap does not define a method for mapping to userspace\n",
251                        __func__);
252                 return -EINVAL;
253         }
254
255         if (!(buffer->flags & ION_FLAG_CACHED))
256                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
257
258         mutex_lock(&buffer->lock);
259         /* now map it to userspace */
260         ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
261         mutex_unlock(&buffer->lock);
262
263         if (ret)
264                 pr_err("%s: failure mapping buffer to userspace\n",
265                        __func__);
266
267         return ret;
268 }
269
270 static void ion_dma_buf_release(struct dma_buf *dmabuf)
271 {
272         struct ion_buffer *buffer = dmabuf->priv;
273
274         _ion_buffer_destroy(buffer);
275 }
276
277 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
278                                         enum dma_data_direction direction)
279 {
280         struct ion_buffer *buffer = dmabuf->priv;
281         void *vaddr;
282         struct ion_dma_buf_attachment *a;
283         int ret = 0;
284
285         /*
286          * TODO: Move this elsewhere because we don't always need a vaddr
287          */
288         if (buffer->heap->ops->map_kernel) {
289                 mutex_lock(&buffer->lock);
290                 vaddr = ion_buffer_kmap_get(buffer);
291                 if (IS_ERR(vaddr)) {
292                         ret = PTR_ERR(vaddr);
293                         goto unlock;
294                 }
295                 mutex_unlock(&buffer->lock);
296         }
297
298         mutex_lock(&buffer->lock);
299         list_for_each_entry(a, &buffer->attachments, list) {
300                 dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
301                                     direction);
302         }
303
304 unlock:
305         mutex_unlock(&buffer->lock);
306         return ret;
307 }
308
309 static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
310                                       enum dma_data_direction direction)
311 {
312         struct ion_buffer *buffer = dmabuf->priv;
313         struct ion_dma_buf_attachment *a;
314
315         if (buffer->heap->ops->map_kernel) {
316                 mutex_lock(&buffer->lock);
317                 ion_buffer_kmap_put(buffer);
318                 mutex_unlock(&buffer->lock);
319         }
320
321         mutex_lock(&buffer->lock);
322         list_for_each_entry(a, &buffer->attachments, list) {
323                 dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
324                                        direction);
325         }
326         mutex_unlock(&buffer->lock);
327
328         return 0;
329 }
330
331 static const struct dma_buf_ops dma_buf_ops = {
332         .map_dma_buf = ion_map_dma_buf,
333         .unmap_dma_buf = ion_unmap_dma_buf,
334         .mmap = ion_mmap,
335         .release = ion_dma_buf_release,
336         .attach = ion_dma_buf_attach,
337         .detach = ion_dma_buf_detatch,
338         .begin_cpu_access = ion_dma_buf_begin_cpu_access,
339         .end_cpu_access = ion_dma_buf_end_cpu_access,
340 };
341
342 static int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags)
343 {
344         struct ion_device *dev = internal_dev;
345         struct ion_buffer *buffer = NULL;
346         struct ion_heap *heap;
347         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
348         int fd;
349         struct dma_buf *dmabuf;
350
351         pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
352                  len, heap_id_mask, flags);
353         /*
354          * traverse the list of heaps available in this system in priority
355          * order.  If the heap type is supported by the client, and matches the
356          * request of the caller allocate from it.  Repeat until allocate has
357          * succeeded or all heaps have been tried
358          */
359         len = PAGE_ALIGN(len);
360
361         if (!len)
362                 return -EINVAL;
363
364         down_read(&dev->lock);
365         plist_for_each_entry(heap, &dev->heaps, node) {
366                 /* if the caller didn't specify this heap id */
367                 if (!((1 << heap->id) & heap_id_mask))
368                         continue;
369                 buffer = ion_buffer_create(heap, dev, len, flags);
370                 if (!IS_ERR(buffer))
371                         break;
372         }
373         up_read(&dev->lock);
374
375         if (!buffer)
376                 return -ENODEV;
377
378         if (IS_ERR(buffer))
379                 return PTR_ERR(buffer);
380
381         exp_info.ops = &dma_buf_ops;
382         exp_info.size = buffer->size;
383         exp_info.flags = O_RDWR;
384         exp_info.priv = buffer;
385
386         dmabuf = dma_buf_export(&exp_info);
387         if (IS_ERR(dmabuf)) {
388                 _ion_buffer_destroy(buffer);
389                 return PTR_ERR(dmabuf);
390         }
391
392         fd = dma_buf_fd(dmabuf, O_CLOEXEC);
393         if (fd < 0)
394                 dma_buf_put(dmabuf);
395
396         return fd;
397 }
398
399 static int ion_query_heaps(struct ion_heap_query *query)
400 {
401         struct ion_device *dev = internal_dev;
402         struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
403         int ret = -EINVAL, cnt = 0, max_cnt;
404         struct ion_heap *heap;
405         struct ion_heap_data hdata;
406
407         memset(&hdata, 0, sizeof(hdata));
408
409         down_read(&dev->lock);
410         if (!buffer) {
411                 query->cnt = dev->heap_cnt;
412                 ret = 0;
413                 goto out;
414         }
415
416         if (query->cnt <= 0)
417                 goto out;
418
419         max_cnt = query->cnt;
420
421         plist_for_each_entry(heap, &dev->heaps, node) {
422                 strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
423                 hdata.name[sizeof(hdata.name) - 1] = '\0';
424                 hdata.type = heap->type;
425                 hdata.heap_id = heap->id;
426
427                 if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
428                         ret = -EFAULT;
429                         goto out;
430                 }
431
432                 cnt++;
433                 if (cnt >= max_cnt)
434                         break;
435         }
436
437         query->cnt = cnt;
438         ret = 0;
439 out:
440         up_read(&dev->lock);
441         return ret;
442 }
443
444 union ion_ioctl_arg {
445         struct ion_allocation_data allocation;
446         struct ion_heap_query query;
447 };
448
449 static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg)
450 {
451         switch (cmd) {
452         case ION_IOC_HEAP_QUERY:
453                 if (arg->query.reserved0 ||
454                     arg->query.reserved1 ||
455                     arg->query.reserved2)
456                         return -EINVAL;
457                 break;
458         default:
459                 break;
460         }
461
462         return 0;
463 }
464
465 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
466 {
467         int ret = 0;
468         union ion_ioctl_arg data;
469
470         if (_IOC_SIZE(cmd) > sizeof(data))
471                 return -EINVAL;
472
473         /*
474          * The copy_from_user is unconditional here for both read and write
475          * to do the validate. If there is no write for the ioctl, the
476          * buffer is cleared
477          */
478         if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
479                 return -EFAULT;
480
481         ret = validate_ioctl_arg(cmd, &data);
482         if (ret) {
483                 pr_warn_once("%s: ioctl validate failed\n", __func__);
484                 return ret;
485         }
486
487         if (!(_IOC_DIR(cmd) & _IOC_WRITE))
488                 memset(&data, 0, sizeof(data));
489
490         switch (cmd) {
491         case ION_IOC_ALLOC:
492         {
493                 int fd;
494
495                 fd = ion_alloc(data.allocation.len,
496                                data.allocation.heap_id_mask,
497                                data.allocation.flags);
498                 if (fd < 0)
499                         return fd;
500
501                 data.allocation.fd = fd;
502
503                 break;
504         }
505         case ION_IOC_HEAP_QUERY:
506                 ret = ion_query_heaps(&data.query);
507                 break;
508         default:
509                 return -ENOTTY;
510         }
511
512         if (_IOC_DIR(cmd) & _IOC_READ) {
513                 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd)))
514                         return -EFAULT;
515         }
516         return ret;
517 }
518
519 static const struct file_operations ion_fops = {
520         .owner          = THIS_MODULE,
521         .unlocked_ioctl = ion_ioctl,
522         .compat_ioctl   = compat_ptr_ioctl,
523 };
524
525 static int debug_shrink_set(void *data, u64 val)
526 {
527         struct ion_heap *heap = data;
528         struct shrink_control sc;
529         int objs;
530
531         sc.gfp_mask = GFP_HIGHUSER;
532         sc.nr_to_scan = val;
533
534         if (!val) {
535                 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
536                 sc.nr_to_scan = objs;
537         }
538
539         heap->shrinker.scan_objects(&heap->shrinker, &sc);
540         return 0;
541 }
542
543 static int debug_shrink_get(void *data, u64 *val)
544 {
545         struct ion_heap *heap = data;
546         struct shrink_control sc;
547         int objs;
548
549         sc.gfp_mask = GFP_HIGHUSER;
550         sc.nr_to_scan = 0;
551
552         objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
553         *val = objs;
554         return 0;
555 }
556
557 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
558                         debug_shrink_set, "%llu\n");
559
560 void ion_device_add_heap(struct ion_heap *heap)
561 {
562         struct ion_device *dev = internal_dev;
563         int ret;
564         struct dentry *heap_root;
565         char debug_name[64];
566
567         if (!heap->ops->allocate || !heap->ops->free)
568                 pr_err("%s: can not add heap with invalid ops struct.\n",
569                        __func__);
570
571         spin_lock_init(&heap->free_lock);
572         spin_lock_init(&heap->stat_lock);
573         heap->free_list_size = 0;
574
575         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
576                 ion_heap_init_deferred_free(heap);
577
578         if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink) {
579                 ret = ion_heap_init_shrinker(heap);
580                 if (ret)
581                         pr_err("%s: Failed to register shrinker\n", __func__);
582         }
583
584         heap->dev = dev;
585         heap->num_of_buffers = 0;
586         heap->num_of_alloc_bytes = 0;
587         heap->alloc_bytes_wm = 0;
588
589         heap_root = debugfs_create_dir(heap->name, dev->debug_root);
590         debugfs_create_u64("num_of_buffers",
591                            0444, heap_root,
592                            &heap->num_of_buffers);
593         debugfs_create_u64("num_of_alloc_bytes",
594                            0444,
595                            heap_root,
596                            &heap->num_of_alloc_bytes);
597         debugfs_create_u64("alloc_bytes_wm",
598                            0444,
599                            heap_root,
600                            &heap->alloc_bytes_wm);
601
602         if (heap->shrinker.count_objects &&
603             heap->shrinker.scan_objects) {
604                 snprintf(debug_name, 64, "%s_shrink", heap->name);
605                 debugfs_create_file(debug_name,
606                                     0644,
607                                     heap_root,
608                                     heap,
609                                     &debug_shrink_fops);
610         }
611
612         down_write(&dev->lock);
613         heap->id = heap_id++;
614         /*
615          * use negative heap->id to reverse the priority -- when traversing
616          * the list later attempt higher id numbers first
617          */
618         plist_node_init(&heap->node, -heap->id);
619         plist_add(&heap->node, &dev->heaps);
620
621         dev->heap_cnt++;
622         up_write(&dev->lock);
623 }
624 EXPORT_SYMBOL(ion_device_add_heap);
625
626 static int ion_device_create(void)
627 {
628         struct ion_device *idev;
629         int ret;
630
631         idev = kzalloc(sizeof(*idev), GFP_KERNEL);
632         if (!idev)
633                 return -ENOMEM;
634
635         idev->dev.minor = MISC_DYNAMIC_MINOR;
636         idev->dev.name = "ion";
637         idev->dev.fops = &ion_fops;
638         idev->dev.parent = NULL;
639         ret = misc_register(&idev->dev);
640         if (ret) {
641                 pr_err("ion: failed to register misc device.\n");
642                 kfree(idev);
643                 return ret;
644         }
645
646         idev->debug_root = debugfs_create_dir("ion", NULL);
647         init_rwsem(&idev->lock);
648         plist_head_init(&idev->heaps);
649         internal_dev = idev;
650         return 0;
651 }
652 subsys_initcall(ion_device_create);