]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 234
[linux.git] / drivers / gpu / drm / omapdrm / omap_gem_dmabuf.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
4  * Author: Rob Clark <rob.clark@linaro.org>
5  */
6
7 #include <linux/dma-buf.h>
8
9 #include "omap_drv.h"
10
11 /* -----------------------------------------------------------------------------
12  * DMABUF Export
13  */
14
15 static struct sg_table *omap_gem_map_dma_buf(
16                 struct dma_buf_attachment *attachment,
17                 enum dma_data_direction dir)
18 {
19         struct drm_gem_object *obj = attachment->dmabuf->priv;
20         struct sg_table *sg;
21         dma_addr_t dma_addr;
22         int ret;
23
24         sg = kzalloc(sizeof(*sg), GFP_KERNEL);
25         if (!sg)
26                 return ERR_PTR(-ENOMEM);
27
28         /* camera, etc, need physically contiguous.. but we need a
29          * better way to know this..
30          */
31         ret = omap_gem_pin(obj, &dma_addr);
32         if (ret)
33                 goto out;
34
35         ret = sg_alloc_table(sg, 1, GFP_KERNEL);
36         if (ret)
37                 goto out;
38
39         sg_init_table(sg->sgl, 1);
40         sg_dma_len(sg->sgl) = obj->size;
41         sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(dma_addr)), obj->size, 0);
42         sg_dma_address(sg->sgl) = dma_addr;
43
44         /* this must be after omap_gem_pin() to ensure we have pages attached */
45         omap_gem_dma_sync_buffer(obj, dir);
46
47         return sg;
48 out:
49         kfree(sg);
50         return ERR_PTR(ret);
51 }
52
53 static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
54                 struct sg_table *sg, enum dma_data_direction dir)
55 {
56         struct drm_gem_object *obj = attachment->dmabuf->priv;
57         omap_gem_unpin(obj);
58         sg_free_table(sg);
59         kfree(sg);
60 }
61
62 static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
63                 enum dma_data_direction dir)
64 {
65         struct drm_gem_object *obj = buffer->priv;
66         struct page **pages;
67         if (omap_gem_flags(obj) & OMAP_BO_TILED) {
68                 /* TODO we would need to pin at least part of the buffer to
69                  * get de-tiled view.  For now just reject it.
70                  */
71                 return -ENOMEM;
72         }
73         /* make sure we have the pages: */
74         return omap_gem_get_pages(obj, &pages, true);
75 }
76
77 static int omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
78                                           enum dma_data_direction dir)
79 {
80         struct drm_gem_object *obj = buffer->priv;
81         omap_gem_put_pages(obj);
82         return 0;
83 }
84
85 static void *omap_gem_dmabuf_kmap(struct dma_buf *buffer,
86                 unsigned long page_num)
87 {
88         struct drm_gem_object *obj = buffer->priv;
89         struct page **pages;
90         omap_gem_get_pages(obj, &pages, false);
91         omap_gem_cpu_sync_page(obj, page_num);
92         return kmap(pages[page_num]);
93 }
94
95 static void omap_gem_dmabuf_kunmap(struct dma_buf *buffer,
96                 unsigned long page_num, void *addr)
97 {
98         struct drm_gem_object *obj = buffer->priv;
99         struct page **pages;
100         omap_gem_get_pages(obj, &pages, false);
101         kunmap(pages[page_num]);
102 }
103
104 static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
105                 struct vm_area_struct *vma)
106 {
107         struct drm_gem_object *obj = buffer->priv;
108         int ret = 0;
109
110         ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
111         if (ret < 0)
112                 return ret;
113
114         return omap_gem_mmap_obj(obj, vma);
115 }
116
117 static const struct dma_buf_ops omap_dmabuf_ops = {
118         .map_dma_buf = omap_gem_map_dma_buf,
119         .unmap_dma_buf = omap_gem_unmap_dma_buf,
120         .release = drm_gem_dmabuf_release,
121         .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
122         .end_cpu_access = omap_gem_dmabuf_end_cpu_access,
123         .map = omap_gem_dmabuf_kmap,
124         .unmap = omap_gem_dmabuf_kunmap,
125         .mmap = omap_gem_dmabuf_mmap,
126 };
127
128 struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
129                 struct drm_gem_object *obj, int flags)
130 {
131         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
132
133         exp_info.ops = &omap_dmabuf_ops;
134         exp_info.size = obj->size;
135         exp_info.flags = flags;
136         exp_info.priv = obj;
137
138         return drm_gem_dmabuf_export(dev, &exp_info);
139 }
140
141 /* -----------------------------------------------------------------------------
142  * DMABUF Import
143  */
144
145 struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
146                                              struct dma_buf *dma_buf)
147 {
148         struct dma_buf_attachment *attach;
149         struct drm_gem_object *obj;
150         struct sg_table *sgt;
151         int ret;
152
153         if (dma_buf->ops == &omap_dmabuf_ops) {
154                 obj = dma_buf->priv;
155                 if (obj->dev == dev) {
156                         /*
157                          * Importing dmabuf exported from out own gem increases
158                          * refcount on gem itself instead of f_count of dmabuf.
159                          */
160                         drm_gem_object_get(obj);
161                         return obj;
162                 }
163         }
164
165         attach = dma_buf_attach(dma_buf, dev->dev);
166         if (IS_ERR(attach))
167                 return ERR_CAST(attach);
168
169         get_dma_buf(dma_buf);
170
171         sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
172         if (IS_ERR(sgt)) {
173                 ret = PTR_ERR(sgt);
174                 goto fail_detach;
175         }
176
177         obj = omap_gem_new_dmabuf(dev, dma_buf->size, sgt);
178         if (IS_ERR(obj)) {
179                 ret = PTR_ERR(obj);
180                 goto fail_unmap;
181         }
182
183         obj->import_attach = attach;
184
185         return obj;
186
187 fail_unmap:
188         dma_buf_unmap_attachment(attach, sgt, DMA_TO_DEVICE);
189 fail_detach:
190         dma_buf_detach(dma_buf, attach);
191         dma_buf_put(dma_buf);
192
193         return ERR_PTR(ret);
194 }