]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/virtio/virtgpu_plane.c
drm/virtio: simplify cursor updates
[linux.git] / drivers / gpu / drm / virtio / virtgpu_plane.c
1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  */
25
26 #include <drm/drm_atomic_helper.h>
27 #include <drm/drm_fourcc.h>
28 #include <drm/drm_plane_helper.h>
29
30 #include "virtgpu_drv.h"
31
32 static const uint32_t virtio_gpu_formats[] = {
33         DRM_FORMAT_HOST_XRGB8888,
34 };
35
36 static const uint32_t virtio_gpu_cursor_formats[] = {
37         DRM_FORMAT_HOST_ARGB8888,
38 };
39
40 uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc)
41 {
42         uint32_t format;
43
44         switch (drm_fourcc) {
45         case DRM_FORMAT_XRGB8888:
46                 format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
47                 break;
48         case DRM_FORMAT_ARGB8888:
49                 format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
50                 break;
51         case DRM_FORMAT_BGRX8888:
52                 format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
53                 break;
54         case DRM_FORMAT_BGRA8888:
55                 format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
56                 break;
57         default:
58                 /*
59                  * This should not happen, we handle everything listed
60                  * in virtio_gpu_formats[].
61                  */
62                 format = 0;
63                 break;
64         }
65         WARN_ON(format == 0);
66         return format;
67 }
68
69 static void virtio_gpu_plane_destroy(struct drm_plane *plane)
70 {
71         drm_plane_cleanup(plane);
72         kfree(plane);
73 }
74
75 static const struct drm_plane_funcs virtio_gpu_plane_funcs = {
76         .update_plane           = drm_atomic_helper_update_plane,
77         .disable_plane          = drm_atomic_helper_disable_plane,
78         .destroy                = virtio_gpu_plane_destroy,
79         .reset                  = drm_atomic_helper_plane_reset,
80         .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
81         .atomic_destroy_state   = drm_atomic_helper_plane_destroy_state,
82 };
83
84 static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
85                                          struct drm_plane_state *state)
86 {
87         bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR;
88         struct drm_crtc_state *crtc_state;
89         int ret;
90
91         if (!state->fb || !state->crtc)
92                 return 0;
93
94         crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
95         if (IS_ERR(crtc_state))
96                 return PTR_ERR(crtc_state);
97
98         ret = drm_atomic_helper_check_plane_state(state, crtc_state,
99                                                   DRM_PLANE_HELPER_NO_SCALING,
100                                                   DRM_PLANE_HELPER_NO_SCALING,
101                                                   is_cursor, true);
102         return ret;
103 }
104
105 static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
106                                             struct drm_plane_state *old_state)
107 {
108         struct drm_device *dev = plane->dev;
109         struct virtio_gpu_device *vgdev = dev->dev_private;
110         struct virtio_gpu_output *output = NULL;
111         struct virtio_gpu_framebuffer *vgfb;
112         struct virtio_gpu_object *bo;
113         uint32_t handle;
114
115         if (plane->state->crtc)
116                 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
117         if (old_state->crtc)
118                 output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
119         if (WARN_ON(!output))
120                 return;
121
122         if (plane->state->fb && output->enabled) {
123                 vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
124                 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
125                 handle = bo->hw_res_handle;
126                 if (bo->dumb) {
127                         virtio_gpu_cmd_transfer_to_host_2d
128                                 (vgdev, bo, 0,
129                                  cpu_to_le32(plane->state->src_w >> 16),
130                                  cpu_to_le32(plane->state->src_h >> 16),
131                                  cpu_to_le32(plane->state->src_x >> 16),
132                                  cpu_to_le32(plane->state->src_y >> 16), NULL);
133                 }
134         } else {
135                 handle = 0;
136         }
137
138         DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n", handle,
139                   plane->state->crtc_w, plane->state->crtc_h,
140                   plane->state->crtc_x, plane->state->crtc_y,
141                   plane->state->src_w >> 16,
142                   plane->state->src_h >> 16,
143                   plane->state->src_x >> 16,
144                   plane->state->src_y >> 16);
145         virtio_gpu_cmd_set_scanout(vgdev, output->index, handle,
146                                    plane->state->src_w >> 16,
147                                    plane->state->src_h >> 16,
148                                    plane->state->src_x >> 16,
149                                    plane->state->src_y >> 16);
150         if (handle)
151                 virtio_gpu_cmd_resource_flush(vgdev, handle,
152                                               plane->state->src_x >> 16,
153                                               plane->state->src_y >> 16,
154                                               plane->state->src_w >> 16,
155                                               plane->state->src_h >> 16);
156 }
157
158 static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane,
159                                         struct drm_plane_state *new_state)
160 {
161         struct drm_device *dev = plane->dev;
162         struct virtio_gpu_device *vgdev = dev->dev_private;
163         struct virtio_gpu_framebuffer *vgfb;
164         struct virtio_gpu_object *bo;
165
166         if (!new_state->fb)
167                 return 0;
168
169         vgfb = to_virtio_gpu_framebuffer(new_state->fb);
170         bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
171         if (bo && bo->dumb && (plane->state->fb != new_state->fb)) {
172                 vgfb->fence = virtio_gpu_fence_alloc(vgdev);
173                 if (!vgfb->fence)
174                         return -ENOMEM;
175         }
176
177         return 0;
178 }
179
180 static void virtio_gpu_cursor_cleanup_fb(struct drm_plane *plane,
181                                          struct drm_plane_state *old_state)
182 {
183         struct virtio_gpu_framebuffer *vgfb;
184
185         if (!plane->state->fb)
186                 return;
187
188         vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
189         if (vgfb->fence) {
190                 dma_fence_put(&vgfb->fence->f);
191                 vgfb->fence = NULL;
192         }
193 }
194
195 static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
196                                            struct drm_plane_state *old_state)
197 {
198         struct drm_device *dev = plane->dev;
199         struct virtio_gpu_device *vgdev = dev->dev_private;
200         struct virtio_gpu_output *output = NULL;
201         struct virtio_gpu_framebuffer *vgfb;
202         struct virtio_gpu_object *bo = NULL;
203         uint32_t handle;
204
205         if (plane->state->crtc)
206                 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
207         if (old_state->crtc)
208                 output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
209         if (WARN_ON(!output))
210                 return;
211
212         if (plane->state->fb) {
213                 vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
214                 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
215                 handle = bo->hw_res_handle;
216         } else {
217                 handle = 0;
218         }
219
220         if (bo && bo->dumb && (plane->state->fb != old_state->fb)) {
221                 /* new cursor -- update & wait */
222                 virtio_gpu_cmd_transfer_to_host_2d
223                         (vgdev, bo, 0,
224                          cpu_to_le32(plane->state->crtc_w),
225                          cpu_to_le32(plane->state->crtc_h),
226                          0, 0, vgfb->fence);
227                 dma_fence_wait(&vgfb->fence->f, true);
228                 dma_fence_put(&vgfb->fence->f);
229                 vgfb->fence = NULL;
230         }
231
232         if (plane->state->fb != old_state->fb) {
233                 DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle,
234                           plane->state->crtc_x,
235                           plane->state->crtc_y,
236                           plane->state->fb ? plane->state->fb->hot_x : 0,
237                           plane->state->fb ? plane->state->fb->hot_y : 0);
238                 output->cursor.hdr.type =
239                         cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
240                 output->cursor.resource_id = cpu_to_le32(handle);
241                 if (plane->state->fb) {
242                         output->cursor.hot_x =
243                                 cpu_to_le32(plane->state->fb->hot_x);
244                         output->cursor.hot_y =
245                                 cpu_to_le32(plane->state->fb->hot_y);
246                 } else {
247                         output->cursor.hot_x = cpu_to_le32(0);
248                         output->cursor.hot_y = cpu_to_le32(0);
249                 }
250         } else {
251                 DRM_DEBUG("move +%d+%d\n",
252                           plane->state->crtc_x,
253                           plane->state->crtc_y);
254                 output->cursor.hdr.type =
255                         cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR);
256         }
257         output->cursor.pos.x = cpu_to_le32(plane->state->crtc_x);
258         output->cursor.pos.y = cpu_to_le32(plane->state->crtc_y);
259         virtio_gpu_cursor_ping(vgdev, output);
260 }
261
262 static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = {
263         .atomic_check           = virtio_gpu_plane_atomic_check,
264         .atomic_update          = virtio_gpu_primary_plane_update,
265 };
266
267 static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = {
268         .prepare_fb             = virtio_gpu_cursor_prepare_fb,
269         .cleanup_fb             = virtio_gpu_cursor_cleanup_fb,
270         .atomic_check           = virtio_gpu_plane_atomic_check,
271         .atomic_update          = virtio_gpu_cursor_plane_update,
272 };
273
274 struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
275                                         enum drm_plane_type type,
276                                         int index)
277 {
278         struct drm_device *dev = vgdev->ddev;
279         const struct drm_plane_helper_funcs *funcs;
280         struct drm_plane *plane;
281         const uint32_t *formats;
282         int ret, nformats;
283
284         plane = kzalloc(sizeof(*plane), GFP_KERNEL);
285         if (!plane)
286                 return ERR_PTR(-ENOMEM);
287
288         if (type == DRM_PLANE_TYPE_CURSOR) {
289                 formats = virtio_gpu_cursor_formats;
290                 nformats = ARRAY_SIZE(virtio_gpu_cursor_formats);
291                 funcs = &virtio_gpu_cursor_helper_funcs;
292         } else {
293                 formats = virtio_gpu_formats;
294                 nformats = ARRAY_SIZE(virtio_gpu_formats);
295                 funcs = &virtio_gpu_primary_helper_funcs;
296         }
297         ret = drm_universal_plane_init(dev, plane, 1 << index,
298                                        &virtio_gpu_plane_funcs,
299                                        formats, nformats,
300                                        NULL, type, NULL);
301         if (ret)
302                 goto err_plane_init;
303
304         drm_plane_helper_add(plane, funcs);
305         return plane;
306
307 err_plane_init:
308         kfree(plane);
309         return ERR_PTR(ret);
310 }