1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2011 Google, Inc.
8 #include <linux/debugfs.h>
9 #include <linux/device.h>
10 #include <linux/dma-buf.h>
11 #include <linux/err.h>
12 #include <linux/export.h>
13 #include <linux/file.h>
14 #include <linux/freezer.h>
16 #include <linux/kthread.h>
17 #include <linux/list.h>
18 #include <linux/miscdevice.h>
20 #include <linux/mm_types.h>
21 #include <linux/rbtree.h>
22 #include <linux/sched/task.h>
23 #include <linux/slab.h>
24 #include <linux/uaccess.h>
25 #include <linux/vmalloc.h>
29 static struct ion_device *internal_dev;
32 /* this function should only be called while dev->lock is held */
33 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
34 struct ion_device *dev,
38 struct ion_buffer *buffer;
41 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
43 return ERR_PTR(-ENOMEM);
46 buffer->flags = flags;
50 ret = heap->ops->allocate(heap, buffer, len, flags);
53 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
56 ion_heap_freelist_drain(heap, 0);
57 ret = heap->ops->allocate(heap, buffer, len, flags);
62 if (!buffer->sg_table) {
63 WARN_ONCE(1, "This heap needs to set the sgtable");
68 spin_lock(&heap->stat_lock);
69 heap->num_of_buffers++;
70 heap->num_of_alloc_bytes += len;
71 if (heap->num_of_alloc_bytes > heap->alloc_bytes_wm)
72 heap->alloc_bytes_wm = heap->num_of_alloc_bytes;
73 spin_unlock(&heap->stat_lock);
75 INIT_LIST_HEAD(&buffer->attachments);
76 mutex_init(&buffer->lock);
80 heap->ops->free(buffer);
86 void ion_buffer_destroy(struct ion_buffer *buffer)
88 if (buffer->kmap_cnt > 0) {
89 pr_warn_once("%s: buffer still mapped in the kernel\n",
91 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
93 buffer->heap->ops->free(buffer);
94 spin_lock(&buffer->heap->stat_lock);
95 buffer->heap->num_of_buffers--;
96 buffer->heap->num_of_alloc_bytes -= buffer->size;
97 spin_unlock(&buffer->heap->stat_lock);
102 static void _ion_buffer_destroy(struct ion_buffer *buffer)
104 struct ion_heap *heap = buffer->heap;
106 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
107 ion_heap_freelist_add(heap, buffer);
109 ion_buffer_destroy(buffer);
112 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
116 if (buffer->kmap_cnt) {
118 return buffer->vaddr;
120 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
121 if (WARN_ONCE(!vaddr,
122 "heap->ops->map_kernel should return ERR_PTR on error"))
123 return ERR_PTR(-EINVAL);
126 buffer->vaddr = vaddr;
131 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
134 if (!buffer->kmap_cnt) {
135 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
136 buffer->vaddr = NULL;
140 static struct sg_table *dup_sg_table(struct sg_table *table)
142 struct sg_table *new_table;
144 struct scatterlist *sg, *new_sg;
146 new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
148 return ERR_PTR(-ENOMEM);
150 ret = sg_alloc_table(new_table, table->nents, GFP_KERNEL);
153 return ERR_PTR(-ENOMEM);
156 new_sg = new_table->sgl;
157 for_each_sg(table->sgl, sg, table->nents, i) {
158 memcpy(new_sg, sg, sizeof(*sg));
159 new_sg->dma_address = 0;
160 new_sg = sg_next(new_sg);
166 static void free_duped_table(struct sg_table *table)
168 sg_free_table(table);
172 struct ion_dma_buf_attachment {
174 struct sg_table *table;
175 struct list_head list;
178 static int ion_dma_buf_attach(struct dma_buf *dmabuf,
179 struct dma_buf_attachment *attachment)
181 struct ion_dma_buf_attachment *a;
182 struct sg_table *table;
183 struct ion_buffer *buffer = dmabuf->priv;
185 a = kzalloc(sizeof(*a), GFP_KERNEL);
189 table = dup_sg_table(buffer->sg_table);
196 a->dev = attachment->dev;
197 INIT_LIST_HEAD(&a->list);
199 attachment->priv = a;
201 mutex_lock(&buffer->lock);
202 list_add(&a->list, &buffer->attachments);
203 mutex_unlock(&buffer->lock);
208 static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
209 struct dma_buf_attachment *attachment)
211 struct ion_dma_buf_attachment *a = attachment->priv;
212 struct ion_buffer *buffer = dmabuf->priv;
214 mutex_lock(&buffer->lock);
216 mutex_unlock(&buffer->lock);
217 free_duped_table(a->table);
222 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
223 enum dma_data_direction direction)
225 struct ion_dma_buf_attachment *a = attachment->priv;
226 struct sg_table *table;
230 if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
232 return ERR_PTR(-ENOMEM);
237 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
238 struct sg_table *table,
239 enum dma_data_direction direction)
241 dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
244 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
246 struct ion_buffer *buffer = dmabuf->priv;
249 if (!buffer->heap->ops->map_user) {
250 pr_err("%s: this heap does not define a method for mapping to userspace\n",
255 if (!(buffer->flags & ION_FLAG_CACHED))
256 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
258 mutex_lock(&buffer->lock);
259 /* now map it to userspace */
260 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
261 mutex_unlock(&buffer->lock);
264 pr_err("%s: failure mapping buffer to userspace\n",
270 static void ion_dma_buf_release(struct dma_buf *dmabuf)
272 struct ion_buffer *buffer = dmabuf->priv;
274 _ion_buffer_destroy(buffer);
277 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
278 enum dma_data_direction direction)
280 struct ion_buffer *buffer = dmabuf->priv;
282 struct ion_dma_buf_attachment *a;
286 * TODO: Move this elsewhere because we don't always need a vaddr
288 if (buffer->heap->ops->map_kernel) {
289 mutex_lock(&buffer->lock);
290 vaddr = ion_buffer_kmap_get(buffer);
292 ret = PTR_ERR(vaddr);
295 mutex_unlock(&buffer->lock);
298 mutex_lock(&buffer->lock);
299 list_for_each_entry(a, &buffer->attachments, list) {
300 dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
305 mutex_unlock(&buffer->lock);
309 static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
310 enum dma_data_direction direction)
312 struct ion_buffer *buffer = dmabuf->priv;
313 struct ion_dma_buf_attachment *a;
315 if (buffer->heap->ops->map_kernel) {
316 mutex_lock(&buffer->lock);
317 ion_buffer_kmap_put(buffer);
318 mutex_unlock(&buffer->lock);
321 mutex_lock(&buffer->lock);
322 list_for_each_entry(a, &buffer->attachments, list) {
323 dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
326 mutex_unlock(&buffer->lock);
331 static const struct dma_buf_ops dma_buf_ops = {
332 .map_dma_buf = ion_map_dma_buf,
333 .unmap_dma_buf = ion_unmap_dma_buf,
335 .release = ion_dma_buf_release,
336 .attach = ion_dma_buf_attach,
337 .detach = ion_dma_buf_detatch,
338 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
339 .end_cpu_access = ion_dma_buf_end_cpu_access,
342 static int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags)
344 struct ion_device *dev = internal_dev;
345 struct ion_buffer *buffer = NULL;
346 struct ion_heap *heap;
347 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
349 struct dma_buf *dmabuf;
351 pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
352 len, heap_id_mask, flags);
354 * traverse the list of heaps available in this system in priority
355 * order. If the heap type is supported by the client, and matches the
356 * request of the caller allocate from it. Repeat until allocate has
357 * succeeded or all heaps have been tried
359 len = PAGE_ALIGN(len);
364 down_read(&dev->lock);
365 plist_for_each_entry(heap, &dev->heaps, node) {
366 /* if the caller didn't specify this heap id */
367 if (!((1 << heap->id) & heap_id_mask))
369 buffer = ion_buffer_create(heap, dev, len, flags);
379 return PTR_ERR(buffer);
381 exp_info.ops = &dma_buf_ops;
382 exp_info.size = buffer->size;
383 exp_info.flags = O_RDWR;
384 exp_info.priv = buffer;
386 dmabuf = dma_buf_export(&exp_info);
387 if (IS_ERR(dmabuf)) {
388 _ion_buffer_destroy(buffer);
389 return PTR_ERR(dmabuf);
392 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
399 static int ion_query_heaps(struct ion_heap_query *query)
401 struct ion_device *dev = internal_dev;
402 struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
403 int ret = -EINVAL, cnt = 0, max_cnt;
404 struct ion_heap *heap;
405 struct ion_heap_data hdata;
407 memset(&hdata, 0, sizeof(hdata));
409 down_read(&dev->lock);
411 query->cnt = dev->heap_cnt;
419 max_cnt = query->cnt;
421 plist_for_each_entry(heap, &dev->heaps, node) {
422 strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
423 hdata.name[sizeof(hdata.name) - 1] = '\0';
424 hdata.type = heap->type;
425 hdata.heap_id = heap->id;
427 if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
444 union ion_ioctl_arg {
445 struct ion_allocation_data allocation;
446 struct ion_heap_query query;
449 static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg)
452 case ION_IOC_HEAP_QUERY:
453 if (arg->query.reserved0 ||
454 arg->query.reserved1 ||
455 arg->query.reserved2)
465 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
468 union ion_ioctl_arg data;
470 if (_IOC_SIZE(cmd) > sizeof(data))
474 * The copy_from_user is unconditional here for both read and write
475 * to do the validate. If there is no write for the ioctl, the
478 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
481 ret = validate_ioctl_arg(cmd, &data);
483 pr_warn_once("%s: ioctl validate failed\n", __func__);
487 if (!(_IOC_DIR(cmd) & _IOC_WRITE))
488 memset(&data, 0, sizeof(data));
495 fd = ion_alloc(data.allocation.len,
496 data.allocation.heap_id_mask,
497 data.allocation.flags);
501 data.allocation.fd = fd;
505 case ION_IOC_HEAP_QUERY:
506 ret = ion_query_heaps(&data.query);
512 if (_IOC_DIR(cmd) & _IOC_READ) {
513 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd)))
519 static const struct file_operations ion_fops = {
520 .owner = THIS_MODULE,
521 .unlocked_ioctl = ion_ioctl,
522 .compat_ioctl = compat_ptr_ioctl,
525 static int debug_shrink_set(void *data, u64 val)
527 struct ion_heap *heap = data;
528 struct shrink_control sc;
531 sc.gfp_mask = GFP_HIGHUSER;
535 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
536 sc.nr_to_scan = objs;
539 heap->shrinker.scan_objects(&heap->shrinker, &sc);
543 static int debug_shrink_get(void *data, u64 *val)
545 struct ion_heap *heap = data;
546 struct shrink_control sc;
549 sc.gfp_mask = GFP_HIGHUSER;
552 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
557 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
558 debug_shrink_set, "%llu\n");
560 void ion_device_add_heap(struct ion_heap *heap)
562 struct ion_device *dev = internal_dev;
564 struct dentry *heap_root;
567 if (!heap->ops->allocate || !heap->ops->free)
568 pr_err("%s: can not add heap with invalid ops struct.\n",
571 spin_lock_init(&heap->free_lock);
572 spin_lock_init(&heap->stat_lock);
573 heap->free_list_size = 0;
575 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
576 ion_heap_init_deferred_free(heap);
578 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink) {
579 ret = ion_heap_init_shrinker(heap);
581 pr_err("%s: Failed to register shrinker\n", __func__);
585 heap->num_of_buffers = 0;
586 heap->num_of_alloc_bytes = 0;
587 heap->alloc_bytes_wm = 0;
589 heap_root = debugfs_create_dir(heap->name, dev->debug_root);
590 debugfs_create_u64("num_of_buffers",
592 &heap->num_of_buffers);
593 debugfs_create_u64("num_of_alloc_bytes",
596 &heap->num_of_alloc_bytes);
597 debugfs_create_u64("alloc_bytes_wm",
600 &heap->alloc_bytes_wm);
602 if (heap->shrinker.count_objects &&
603 heap->shrinker.scan_objects) {
604 snprintf(debug_name, 64, "%s_shrink", heap->name);
605 debugfs_create_file(debug_name,
612 down_write(&dev->lock);
613 heap->id = heap_id++;
615 * use negative heap->id to reverse the priority -- when traversing
616 * the list later attempt higher id numbers first
618 plist_node_init(&heap->node, -heap->id);
619 plist_add(&heap->node, &dev->heaps);
622 up_write(&dev->lock);
624 EXPORT_SYMBOL(ion_device_add_heap);
626 static int ion_device_create(void)
628 struct ion_device *idev;
631 idev = kzalloc(sizeof(*idev), GFP_KERNEL);
635 idev->dev.minor = MISC_DYNAMIC_MINOR;
636 idev->dev.name = "ion";
637 idev->dev.fops = &ion_fops;
638 idev->dev.parent = NULL;
639 ret = misc_register(&idev->dev);
641 pr_err("ion: failed to register misc device.\n");
646 idev->debug_root = debugfs_create_dir("ion", NULL);
647 init_rwsem(&idev->lock);
648 plist_head_init(&idev->heaps);
652 subsys_initcall(ion_device_create);