2 * drivers/staging/android/ion/ion_carveout_heap.c
4 * Copyright (C) 2011 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/spinlock.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/err.h>
19 #include <linux/genalloc.h>
22 #include <linux/scatterlist.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
28 struct ion_carveout_heap {
30 struct gen_pool *pool;
34 ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
38 struct ion_carveout_heap *carveout_heap =
39 container_of(heap, struct ion_carveout_heap, heap);
40 unsigned long offset = gen_pool_alloc(carveout_heap->pool, size);
43 return ION_CARVEOUT_ALLOCATE_FAIL;
48 void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
51 struct ion_carveout_heap *carveout_heap =
52 container_of(heap, struct ion_carveout_heap, heap);
54 if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
56 gen_pool_free(carveout_heap->pool, addr, size);
59 static int ion_carveout_heap_allocate(struct ion_heap *heap,
60 struct ion_buffer *buffer,
61 unsigned long size, unsigned long align,
64 struct sg_table *table;
65 ion_phys_addr_t paddr;
68 if (align > PAGE_SIZE)
71 table = kmalloc(sizeof(*table), GFP_KERNEL);
74 ret = sg_alloc_table(table, 1, GFP_KERNEL);
78 paddr = ion_carveout_allocate(heap, size, align);
79 if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) {
84 sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
85 buffer->sg_table = table;
96 static void ion_carveout_heap_free(struct ion_buffer *buffer)
98 struct ion_heap *heap = buffer->heap;
99 struct sg_table *table = buffer->sg_table;
100 struct page *page = sg_page(table->sgl);
101 ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
103 ion_heap_buffer_zero(buffer);
105 if (ion_buffer_cached(buffer))
106 dma_sync_sg_for_device(NULL, table->sgl, table->nents,
109 ion_carveout_free(heap, paddr, buffer->size);
110 sg_free_table(table);
114 static struct ion_heap_ops carveout_heap_ops = {
115 .allocate = ion_carveout_heap_allocate,
116 .free = ion_carveout_heap_free,
117 .map_user = ion_heap_map_user,
118 .map_kernel = ion_heap_map_kernel,
119 .unmap_kernel = ion_heap_unmap_kernel,
122 struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
124 struct ion_carveout_heap *carveout_heap;
130 page = pfn_to_page(PFN_DOWN(heap_data->base));
131 size = heap_data->size;
133 ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
135 ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
139 carveout_heap = kzalloc(sizeof(*carveout_heap), GFP_KERNEL);
141 return ERR_PTR(-ENOMEM);
143 carveout_heap->pool = gen_pool_create(PAGE_SHIFT, -1);
144 if (!carveout_heap->pool) {
145 kfree(carveout_heap);
146 return ERR_PTR(-ENOMEM);
148 carveout_heap->base = heap_data->base;
149 gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size,
151 carveout_heap->heap.ops = &carveout_heap_ops;
152 carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
153 carveout_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
155 return &carveout_heap->heap;
158 void ion_carveout_heap_destroy(struct ion_heap *heap)
160 struct ion_carveout_heap *carveout_heap =
161 container_of(heap, struct ion_carveout_heap, heap);
163 gen_pool_destroy(carveout_heap->pool);
164 kfree(carveout_heap);
165 carveout_heap = NULL;