]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/staging/android/ion/ion_chunk_heap.c
b82eac1c2d7a0e644d01e39aeff75682d759710a
[linux.git] / drivers / staging / android / ion / ion_chunk_heap.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ION memory allocator chunk heap helper
4  *
5  * Copyright (C) 2012 Google, Inc.
6  */
7
8 #include <linux/dma-mapping.h>
9 #include <linux/err.h>
10 #include <linux/genalloc.h>
11 #include <linux/io.h>
12 #include <linux/mm.h>
13 #include <linux/scatterlist.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include "ion.h"
17
18 struct ion_chunk_heap {
19         struct ion_heap heap;
20         struct gen_pool *pool;
21         phys_addr_t base;
22         unsigned long chunk_size;
23         unsigned long size;
24         unsigned long allocated;
25 };
26
27 static int ion_chunk_heap_allocate(struct ion_heap *heap,
28                                    struct ion_buffer *buffer,
29                                    unsigned long size,
30                                    unsigned long flags)
31 {
32         struct ion_chunk_heap *chunk_heap =
33                 container_of(heap, struct ion_chunk_heap, heap);
34         struct sg_table *table;
35         struct scatterlist *sg;
36         int ret, i;
37         unsigned long num_chunks;
38         unsigned long allocated_size;
39
40         allocated_size = ALIGN(size, chunk_heap->chunk_size);
41         num_chunks = allocated_size / chunk_heap->chunk_size;
42
43         if (allocated_size > chunk_heap->size - chunk_heap->allocated)
44                 return -ENOMEM;
45
46         table = kmalloc(sizeof(*table), GFP_KERNEL);
47         if (!table)
48                 return -ENOMEM;
49         ret = sg_alloc_table(table, num_chunks, GFP_KERNEL);
50         if (ret) {
51                 kfree(table);
52                 return ret;
53         }
54
55         sg = table->sgl;
56         for (i = 0; i < num_chunks; i++) {
57                 unsigned long paddr = gen_pool_alloc(chunk_heap->pool,
58                                                      chunk_heap->chunk_size);
59                 if (!paddr)
60                         goto err;
61                 sg_set_page(sg, pfn_to_page(PFN_DOWN(paddr)),
62                             chunk_heap->chunk_size, 0);
63                 sg = sg_next(sg);
64         }
65
66         buffer->sg_table = table;
67         chunk_heap->allocated += allocated_size;
68         return 0;
69 err:
70         sg = table->sgl;
71         for (i -= 1; i >= 0; i--) {
72                 gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
73                               sg->length);
74                 sg = sg_next(sg);
75         }
76         sg_free_table(table);
77         kfree(table);
78         return -ENOMEM;
79 }
80
81 static void ion_chunk_heap_free(struct ion_buffer *buffer)
82 {
83         struct ion_heap *heap = buffer->heap;
84         struct ion_chunk_heap *chunk_heap =
85                 container_of(heap, struct ion_chunk_heap, heap);
86         struct sg_table *table = buffer->sg_table;
87         struct scatterlist *sg;
88         int i;
89         unsigned long allocated_size;
90
91         allocated_size = ALIGN(buffer->size, chunk_heap->chunk_size);
92
93         ion_heap_buffer_zero(buffer);
94
95         for_each_sg(table->sgl, sg, table->nents, i) {
96                 gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
97                               sg->length);
98         }
99         chunk_heap->allocated -= allocated_size;
100         sg_free_table(table);
101         kfree(table);
102 }
103
104 static struct ion_heap_ops chunk_heap_ops = {
105         .allocate = ion_chunk_heap_allocate,
106         .free = ion_chunk_heap_free,
107         .map_user = ion_heap_map_user,
108         .map_kernel = ion_heap_map_kernel,
109         .unmap_kernel = ion_heap_unmap_kernel,
110 };
111
112 struct ion_heap *ion_chunk_heap_create(phys_addr_t base, size_t size, size_t chunk_size)
113 {
114         struct ion_chunk_heap *chunk_heap;
115         int ret;
116         struct page *page;
117
118         page = pfn_to_page(PFN_DOWN(base));
119         ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
120         if (ret)
121                 return ERR_PTR(ret);
122
123         chunk_heap = kzalloc(sizeof(*chunk_heap), GFP_KERNEL);
124         if (!chunk_heap)
125                 return ERR_PTR(-ENOMEM);
126
127         chunk_heap->chunk_size = chunk_size;
128         chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) +
129                                            PAGE_SHIFT, -1);
130         if (!chunk_heap->pool) {
131                 ret = -ENOMEM;
132                 goto error_gen_pool_create;
133         }
134         chunk_heap->base = base;
135         chunk_heap->size = size;
136         chunk_heap->allocated = 0;
137
138         gen_pool_add(chunk_heap->pool, chunk_heap->base, size, -1);
139         chunk_heap->heap.ops = &chunk_heap_ops;
140         chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
141         chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
142         pr_debug("%s: base %pa size %zu\n", __func__,
143                  &chunk_heap->base, size);
144
145         return &chunk_heap->heap;
146
147 error_gen_pool_create:
148         kfree(chunk_heap);
149         return ERR_PTR(ret);
150 }