1 // SPDX-License-Identifier: GPL-2.0
3 * ION memory allocator chunk heap helper
5 * Copyright (C) 2012 Google, Inc.
8 #include <linux/dma-mapping.h>
10 #include <linux/genalloc.h>
13 #include <linux/scatterlist.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
18 struct ion_chunk_heap {
20 struct gen_pool *pool;
22 unsigned long chunk_size;
24 unsigned long allocated;
27 static int ion_chunk_heap_allocate(struct ion_heap *heap,
28 struct ion_buffer *buffer,
32 struct ion_chunk_heap *chunk_heap =
33 container_of(heap, struct ion_chunk_heap, heap);
34 struct sg_table *table;
35 struct scatterlist *sg;
37 unsigned long num_chunks;
38 unsigned long allocated_size;
40 allocated_size = ALIGN(size, chunk_heap->chunk_size);
41 num_chunks = allocated_size / chunk_heap->chunk_size;
43 if (allocated_size > chunk_heap->size - chunk_heap->allocated)
46 table = kmalloc(sizeof(*table), GFP_KERNEL);
49 ret = sg_alloc_table(table, num_chunks, GFP_KERNEL);
56 for (i = 0; i < num_chunks; i++) {
57 unsigned long paddr = gen_pool_alloc(chunk_heap->pool,
58 chunk_heap->chunk_size);
61 sg_set_page(sg, pfn_to_page(PFN_DOWN(paddr)),
62 chunk_heap->chunk_size, 0);
66 buffer->sg_table = table;
67 chunk_heap->allocated += allocated_size;
71 for (i -= 1; i >= 0; i--) {
72 gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
81 static void ion_chunk_heap_free(struct ion_buffer *buffer)
83 struct ion_heap *heap = buffer->heap;
84 struct ion_chunk_heap *chunk_heap =
85 container_of(heap, struct ion_chunk_heap, heap);
86 struct sg_table *table = buffer->sg_table;
87 struct scatterlist *sg;
89 unsigned long allocated_size;
91 allocated_size = ALIGN(buffer->size, chunk_heap->chunk_size);
93 ion_heap_buffer_zero(buffer);
95 for_each_sg(table->sgl, sg, table->nents, i) {
96 gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
99 chunk_heap->allocated -= allocated_size;
100 sg_free_table(table);
104 static struct ion_heap_ops chunk_heap_ops = {
105 .allocate = ion_chunk_heap_allocate,
106 .free = ion_chunk_heap_free,
107 .map_user = ion_heap_map_user,
108 .map_kernel = ion_heap_map_kernel,
109 .unmap_kernel = ion_heap_unmap_kernel,
112 struct ion_heap *ion_chunk_heap_create(phys_addr_t base, size_t size, size_t chunk_size)
114 struct ion_chunk_heap *chunk_heap;
118 page = pfn_to_page(PFN_DOWN(base));
119 ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
123 chunk_heap = kzalloc(sizeof(*chunk_heap), GFP_KERNEL);
125 return ERR_PTR(-ENOMEM);
127 chunk_heap->chunk_size = chunk_size;
128 chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) +
130 if (!chunk_heap->pool) {
132 goto error_gen_pool_create;
134 chunk_heap->base = base;
135 chunk_heap->size = size;
136 chunk_heap->allocated = 0;
138 gen_pool_add(chunk_heap->pool, chunk_heap->base, size, -1);
139 chunk_heap->heap.ops = &chunk_heap_ops;
140 chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
141 chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
142 pr_debug("%s: base %pa size %zu\n", __func__,
143 &chunk_heap->base, size);
145 return &chunk_heap->heap;
147 error_gen_pool_create: