]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/udl/udl_dmabuf.c
Merge branch 'asoc-5.2' into asoc-linus
[linux.git] / drivers / gpu / drm / udl / udl_dmabuf.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * udl_dmabuf.c
4  *
5  * Copyright (c) 2014 The Chromium OS Authors
6  */
7
8 #include <drm/drmP.h>
9 #include "udl_drv.h"
10 #include <linux/shmem_fs.h>
11 #include <linux/dma-buf.h>
12
13 struct udl_drm_dmabuf_attachment {
14         struct sg_table sgt;
15         enum dma_data_direction dir;
16         bool is_mapped;
17 };
18
19 static int udl_attach_dma_buf(struct dma_buf *dmabuf,
20                               struct dma_buf_attachment *attach)
21 {
22         struct udl_drm_dmabuf_attachment *udl_attach;
23
24         DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev),
25                         attach->dmabuf->size);
26
27         udl_attach = kzalloc(sizeof(*udl_attach), GFP_KERNEL);
28         if (!udl_attach)
29                 return -ENOMEM;
30
31         udl_attach->dir = DMA_NONE;
32         attach->priv = udl_attach;
33
34         return 0;
35 }
36
37 static void udl_detach_dma_buf(struct dma_buf *dmabuf,
38                                struct dma_buf_attachment *attach)
39 {
40         struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
41         struct sg_table *sgt;
42
43         if (!udl_attach)
44                 return;
45
46         DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev),
47                         attach->dmabuf->size);
48
49         sgt = &udl_attach->sgt;
50
51         if (udl_attach->dir != DMA_NONE)
52                 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
53                                 udl_attach->dir);
54
55         sg_free_table(sgt);
56         kfree(udl_attach);
57         attach->priv = NULL;
58 }
59
60 static struct sg_table *udl_map_dma_buf(struct dma_buf_attachment *attach,
61                                         enum dma_data_direction dir)
62 {
63         struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
64         struct udl_gem_object *obj = to_udl_bo(attach->dmabuf->priv);
65         struct drm_device *dev = obj->base.dev;
66         struct udl_device *udl = dev->dev_private;
67         struct scatterlist *rd, *wr;
68         struct sg_table *sgt = NULL;
69         unsigned int i;
70         int page_count;
71         int nents, ret;
72
73         DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir=%d\n", dev_name(attach->dev),
74                         attach->dmabuf->size, dir);
75
76         /* just return current sgt if already requested. */
77         if (udl_attach->dir == dir && udl_attach->is_mapped)
78                 return &udl_attach->sgt;
79
80         if (!obj->pages) {
81                 ret = udl_gem_get_pages(obj);
82                 if (ret) {
83                         DRM_ERROR("failed to map pages.\n");
84                         return ERR_PTR(ret);
85                 }
86         }
87
88         page_count = obj->base.size / PAGE_SIZE;
89         obj->sg = drm_prime_pages_to_sg(obj->pages, page_count);
90         if (IS_ERR(obj->sg)) {
91                 DRM_ERROR("failed to allocate sgt.\n");
92                 return ERR_CAST(obj->sg);
93         }
94
95         sgt = &udl_attach->sgt;
96
97         ret = sg_alloc_table(sgt, obj->sg->orig_nents, GFP_KERNEL);
98         if (ret) {
99                 DRM_ERROR("failed to alloc sgt.\n");
100                 return ERR_PTR(-ENOMEM);
101         }
102
103         mutex_lock(&udl->gem_lock);
104
105         rd = obj->sg->sgl;
106         wr = sgt->sgl;
107         for (i = 0; i < sgt->orig_nents; ++i) {
108                 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
109                 rd = sg_next(rd);
110                 wr = sg_next(wr);
111         }
112
113         if (dir != DMA_NONE) {
114                 nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
115                 if (!nents) {
116                         DRM_ERROR("failed to map sgl with iommu.\n");
117                         sg_free_table(sgt);
118                         sgt = ERR_PTR(-EIO);
119                         goto err_unlock;
120                 }
121         }
122
123         udl_attach->is_mapped = true;
124         udl_attach->dir = dir;
125         attach->priv = udl_attach;
126
127 err_unlock:
128         mutex_unlock(&udl->gem_lock);
129         return sgt;
130 }
131
132 static void udl_unmap_dma_buf(struct dma_buf_attachment *attach,
133                               struct sg_table *sgt,
134                               enum dma_data_direction dir)
135 {
136         /* Nothing to do. */
137         DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir:%d\n", dev_name(attach->dev),
138                         attach->dmabuf->size, dir);
139 }
140
141 static void *udl_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
142 {
143         /* TODO */
144
145         return NULL;
146 }
147
148 static void udl_dmabuf_kunmap(struct dma_buf *dma_buf,
149                               unsigned long page_num, void *addr)
150 {
151         /* TODO */
152 }
153
154 static int udl_dmabuf_mmap(struct dma_buf *dma_buf,
155                            struct vm_area_struct *vma)
156 {
157         /* TODO */
158
159         return -EINVAL;
160 }
161
162 static const struct dma_buf_ops udl_dmabuf_ops = {
163         .attach                 = udl_attach_dma_buf,
164         .detach                 = udl_detach_dma_buf,
165         .map_dma_buf            = udl_map_dma_buf,
166         .unmap_dma_buf          = udl_unmap_dma_buf,
167         .map                    = udl_dmabuf_kmap,
168         .unmap                  = udl_dmabuf_kunmap,
169         .mmap                   = udl_dmabuf_mmap,
170         .release                = drm_gem_dmabuf_release,
171 };
172
173 struct dma_buf *udl_gem_prime_export(struct drm_device *dev,
174                                      struct drm_gem_object *obj, int flags)
175 {
176         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
177
178         exp_info.ops = &udl_dmabuf_ops;
179         exp_info.size = obj->size;
180         exp_info.flags = flags;
181         exp_info.priv = obj;
182
183         return drm_gem_dmabuf_export(dev, &exp_info);
184 }
185
186 static int udl_prime_create(struct drm_device *dev,
187                             size_t size,
188                             struct sg_table *sg,
189                             struct udl_gem_object **obj_p)
190 {
191         struct udl_gem_object *obj;
192         int npages;
193
194         npages = size / PAGE_SIZE;
195
196         *obj_p = NULL;
197         obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
198         if (!obj)
199                 return -ENOMEM;
200
201         obj->sg = sg;
202         obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
203         if (obj->pages == NULL) {
204                 DRM_ERROR("obj pages is NULL %d\n", npages);
205                 return -ENOMEM;
206         }
207
208         drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
209
210         *obj_p = obj;
211         return 0;
212 }
213
214 struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
215                                 struct dma_buf *dma_buf)
216 {
217         struct dma_buf_attachment *attach;
218         struct sg_table *sg;
219         struct udl_gem_object *uobj;
220         int ret;
221
222         /* need to attach */
223         get_device(dev->dev);
224         attach = dma_buf_attach(dma_buf, dev->dev);
225         if (IS_ERR(attach)) {
226                 put_device(dev->dev);
227                 return ERR_CAST(attach);
228         }
229
230         get_dma_buf(dma_buf);
231
232         sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
233         if (IS_ERR(sg)) {
234                 ret = PTR_ERR(sg);
235                 goto fail_detach;
236         }
237
238         ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
239         if (ret)
240                 goto fail_unmap;
241
242         uobj->base.import_attach = attach;
243         uobj->flags = UDL_BO_WC;
244
245         return &uobj->base;
246
247 fail_unmap:
248         dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
249 fail_detach:
250         dma_buf_detach(dma_buf, attach);
251         dma_buf_put(dma_buf);
252         put_device(dev->dev);
253         return ERR_PTR(ret);
254 }