2 * Copyright (C) 2015 Red Hat, Inc.
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice shall be included in
17 * all copies or substantial portions of the Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
29 #include <drm/virtgpu_drm.h>
30 #include <drm/ttm/ttm_execbuf_util.h>
31 #include <linux/sync_file.h>
33 #include "virtgpu_drv.h"
35 static void convert_to_hw_box(struct virtio_gpu_box *dst,
36 const struct drm_virtgpu_3d_box *src)
38 dst->x = cpu_to_le32(src->x);
39 dst->y = cpu_to_le32(src->y);
40 dst->z = cpu_to_le32(src->z);
41 dst->w = cpu_to_le32(src->w);
42 dst->h = cpu_to_le32(src->h);
43 dst->d = cpu_to_le32(src->d);
46 static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
47 struct drm_file *file_priv)
49 struct virtio_gpu_device *vgdev = dev->dev_private;
50 struct drm_virtgpu_map *virtio_gpu_map = data;
52 return virtio_gpu_mode_dumb_mmap(file_priv, vgdev->ddev,
53 virtio_gpu_map->handle,
54 &virtio_gpu_map->offset);
57 int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
58 struct list_head *head)
60 struct ttm_operation_ctx ctx = { false, false };
61 struct ttm_validate_buffer *buf;
62 struct ttm_buffer_object *bo;
63 struct virtio_gpu_object *qobj;
66 ret = ttm_eu_reserve_buffers(ticket, head, true, NULL);
70 list_for_each_entry(buf, head, head) {
72 qobj = container_of(bo, struct virtio_gpu_object, tbo);
73 ret = ttm_bo_validate(bo, &qobj->placement, &ctx);
75 ttm_eu_backoff_reservation(ticket, head);
82 void virtio_gpu_unref_list(struct list_head *head)
84 struct ttm_validate_buffer *buf;
85 struct ttm_buffer_object *bo;
86 struct virtio_gpu_object *qobj;
88 list_for_each_entry(buf, head, head) {
90 qobj = container_of(bo, struct virtio_gpu_object, tbo);
92 drm_gem_object_put_unlocked(&qobj->gem_base);
97 * Usage of execbuffer:
98 * Relocations need to take into account the full VIRTIO_GPUDrawable size.
99 * However, the command as passed from user space must *not* contain the initial
100 * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
102 static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
103 struct drm_file *drm_file)
105 struct drm_virtgpu_execbuffer *exbuf = data;
106 struct virtio_gpu_device *vgdev = dev->dev_private;
107 struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
108 struct drm_gem_object *gobj;
109 struct virtio_gpu_fence *out_fence;
110 struct virtio_gpu_object *qobj;
112 uint32_t *bo_handles = NULL;
113 void __user *user_bo_handles = NULL;
114 struct list_head validate_list;
115 struct ttm_validate_buffer *buflist = NULL;
117 struct ww_acquire_ctx ticket;
118 struct sync_file *sync_file;
119 int in_fence_fd = exbuf->fence_fd;
120 int out_fence_fd = -1;
123 if (vgdev->has_virgl_3d == false)
126 if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS))
129 exbuf->fence_fd = -1;
131 if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
132 struct dma_fence *in_fence;
134 in_fence = sync_file_get_fence(in_fence_fd);
140 * Wait if the fence is from a foreign context, or if the fence
141 * array contains any fence from a foreign context.
144 if (!dma_fence_match_context(in_fence, vgdev->fence_drv.context))
145 ret = dma_fence_wait(in_fence, true);
147 dma_fence_put(in_fence);
152 if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
153 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
154 if (out_fence_fd < 0)
158 INIT_LIST_HEAD(&validate_list);
159 if (exbuf->num_bo_handles) {
161 bo_handles = kvmalloc_array(exbuf->num_bo_handles,
162 sizeof(uint32_t), GFP_KERNEL);
163 buflist = kvmalloc_array(exbuf->num_bo_handles,
164 sizeof(struct ttm_validate_buffer),
165 GFP_KERNEL | __GFP_ZERO);
166 if (!bo_handles || !buflist) {
171 user_bo_handles = (void __user *)(uintptr_t)exbuf->bo_handles;
172 if (copy_from_user(bo_handles, user_bo_handles,
173 exbuf->num_bo_handles * sizeof(uint32_t))) {
178 for (i = 0; i < exbuf->num_bo_handles; i++) {
179 gobj = drm_gem_object_lookup(drm_file, bo_handles[i]);
185 qobj = gem_to_virtio_gpu_obj(gobj);
186 buflist[i].bo = &qobj->tbo;
188 list_add(&buflist[i].head, &validate_list);
194 ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
198 buf = memdup_user((void __user *)(uintptr_t)exbuf->command,
205 out_fence = virtio_gpu_fence_alloc(vgdev);
211 if (out_fence_fd >= 0) {
212 sync_file = sync_file_create(&out_fence->f);
214 dma_fence_put(&out_fence->f);
219 exbuf->fence_fd = out_fence_fd;
220 fd_install(out_fence_fd, sync_file->file);
223 virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
224 vfpriv->ctx_id, out_fence);
226 ttm_eu_fence_buffer_objects(&ticket, &validate_list, &out_fence->f);
228 /* fence the command bo */
229 virtio_gpu_unref_list(&validate_list);
236 ttm_eu_backoff_reservation(&ticket, &validate_list);
238 virtio_gpu_unref_list(&validate_list);
243 if (out_fence_fd >= 0)
244 put_unused_fd(out_fence_fd);
249 static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
250 struct drm_file *file_priv)
252 struct virtio_gpu_device *vgdev = dev->dev_private;
253 struct drm_virtgpu_getparam *param = data;
256 switch (param->param) {
257 case VIRTGPU_PARAM_3D_FEATURES:
258 value = vgdev->has_virgl_3d == true ? 1 : 0;
260 case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
266 if (copy_to_user((void __user *)(unsigned long)param->value,
267 &value, sizeof(int))) {
273 static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
274 struct drm_file *file_priv)
276 struct virtio_gpu_device *vgdev = dev->dev_private;
277 struct drm_virtgpu_resource_create *rc = data;
278 struct virtio_gpu_fence *fence;
280 struct virtio_gpu_object *qobj;
281 struct drm_gem_object *obj;
283 struct virtio_gpu_object_params params = { 0 };
285 if (vgdev->has_virgl_3d == false) {
288 if (rc->nr_samples > 1)
290 if (rc->last_level > 1)
294 if (rc->array_size > 1)
298 params.format = rc->format;
299 params.width = rc->width;
300 params.height = rc->height;
301 params.size = rc->size;
302 if (vgdev->has_virgl_3d) {
304 params.target = rc->target;
305 params.bind = rc->bind;
306 params.depth = rc->depth;
307 params.array_size = rc->array_size;
308 params.last_level = rc->last_level;
309 params.nr_samples = rc->nr_samples;
310 params.flags = rc->flags;
312 /* allocate a single page size object */
313 if (params.size == 0)
314 params.size = PAGE_SIZE;
316 fence = virtio_gpu_fence_alloc(vgdev);
319 qobj = virtio_gpu_alloc_object(dev, ¶ms, fence);
320 dma_fence_put(&fence->f);
322 return PTR_ERR(qobj);
323 obj = &qobj->gem_base;
325 ret = drm_gem_handle_create(file_priv, obj, &handle);
327 drm_gem_object_release(obj);
330 drm_gem_object_put_unlocked(obj);
332 rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
333 rc->bo_handle = handle;
337 static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
338 struct drm_file *file_priv)
340 struct drm_virtgpu_resource_info *ri = data;
341 struct drm_gem_object *gobj = NULL;
342 struct virtio_gpu_object *qobj = NULL;
344 gobj = drm_gem_object_lookup(file_priv, ri->bo_handle);
348 qobj = gem_to_virtio_gpu_obj(gobj);
350 ri->size = qobj->gem_base.size;
351 ri->res_handle = qobj->hw_res_handle;
352 drm_gem_object_put_unlocked(gobj);
356 static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
358 struct drm_file *file)
360 struct virtio_gpu_device *vgdev = dev->dev_private;
361 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
362 struct drm_virtgpu_3d_transfer_from_host *args = data;
363 struct ttm_operation_ctx ctx = { true, false };
364 struct drm_gem_object *gobj = NULL;
365 struct virtio_gpu_object *qobj = NULL;
366 struct virtio_gpu_fence *fence;
368 u32 offset = args->offset;
369 struct virtio_gpu_box box;
371 if (vgdev->has_virgl_3d == false)
374 gobj = drm_gem_object_lookup(file, args->bo_handle);
378 qobj = gem_to_virtio_gpu_obj(gobj);
380 ret = virtio_gpu_object_reserve(qobj, false);
384 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
388 convert_to_hw_box(&box, &args->box);
390 fence = virtio_gpu_fence_alloc(vgdev);
395 virtio_gpu_cmd_transfer_from_host_3d
396 (vgdev, qobj->hw_res_handle,
397 vfpriv->ctx_id, offset, args->level,
399 reservation_object_add_excl_fence(qobj->tbo.resv,
402 dma_fence_put(&fence->f);
404 virtio_gpu_object_unreserve(qobj);
406 drm_gem_object_put_unlocked(gobj);
410 static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
411 struct drm_file *file)
413 struct virtio_gpu_device *vgdev = dev->dev_private;
414 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
415 struct drm_virtgpu_3d_transfer_to_host *args = data;
416 struct ttm_operation_ctx ctx = { true, false };
417 struct drm_gem_object *gobj = NULL;
418 struct virtio_gpu_object *qobj = NULL;
419 struct virtio_gpu_fence *fence;
420 struct virtio_gpu_box box;
422 u32 offset = args->offset;
424 gobj = drm_gem_object_lookup(file, args->bo_handle);
428 qobj = gem_to_virtio_gpu_obj(gobj);
430 ret = virtio_gpu_object_reserve(qobj, false);
434 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
438 convert_to_hw_box(&box, &args->box);
439 if (!vgdev->has_virgl_3d) {
440 virtio_gpu_cmd_transfer_to_host_2d
441 (vgdev, qobj, offset,
442 box.w, box.h, box.x, box.y, NULL);
444 fence = virtio_gpu_fence_alloc(vgdev);
449 virtio_gpu_cmd_transfer_to_host_3d
451 vfpriv ? vfpriv->ctx_id : 0, offset,
452 args->level, &box, fence);
453 reservation_object_add_excl_fence(qobj->tbo.resv,
455 dma_fence_put(&fence->f);
459 virtio_gpu_object_unreserve(qobj);
461 drm_gem_object_put_unlocked(gobj);
465 static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
466 struct drm_file *file)
468 struct drm_virtgpu_3d_wait *args = data;
469 struct drm_gem_object *gobj = NULL;
470 struct virtio_gpu_object *qobj = NULL;
474 gobj = drm_gem_object_lookup(file, args->handle);
478 qobj = gem_to_virtio_gpu_obj(gobj);
480 if (args->flags & VIRTGPU_WAIT_NOWAIT)
482 ret = virtio_gpu_object_wait(qobj, nowait);
484 drm_gem_object_put_unlocked(gobj);
488 static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
489 void *data, struct drm_file *file)
491 struct virtio_gpu_device *vgdev = dev->dev_private;
492 struct drm_virtgpu_get_caps *args = data;
493 unsigned size, host_caps_size;
495 int found_valid = -1;
497 struct virtio_gpu_drv_cap_cache *cache_ent;
500 if (vgdev->num_capsets == 0)
503 /* don't allow userspace to pass 0 */
507 spin_lock(&vgdev->display_info_lock);
508 for (i = 0; i < vgdev->num_capsets; i++) {
509 if (vgdev->capsets[i].id == args->cap_set_id) {
510 if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
517 if (found_valid == -1) {
518 spin_unlock(&vgdev->display_info_lock);
522 host_caps_size = vgdev->capsets[found_valid].max_size;
523 /* only copy to user the minimum of the host caps size or the guest caps size */
524 size = min(args->size, host_caps_size);
526 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
527 if (cache_ent->id == args->cap_set_id &&
528 cache_ent->version == args->cap_set_ver) {
529 ptr = cache_ent->caps_cache;
530 spin_unlock(&vgdev->display_info_lock);
534 spin_unlock(&vgdev->display_info_lock);
536 /* not in cache - need to talk to hw */
537 virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
540 ret = wait_event_timeout(vgdev->resp_wq,
541 atomic_read(&cache_ent->is_valid), 5 * HZ);
545 ptr = cache_ent->caps_cache;
548 if (copy_to_user((void __user *)(unsigned long)args->addr, ptr, size))
554 struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
555 DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
556 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
558 DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
559 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
561 DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
562 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
564 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
565 virtio_gpu_resource_create_ioctl,
566 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
568 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
569 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
571 /* make transfer async to the main ring? - no sure, can we
572 * thread these in the underlying GL
574 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
575 virtio_gpu_transfer_from_host_ioctl,
576 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
577 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
578 virtio_gpu_transfer_to_host_ioctl,
579 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
581 DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
582 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
584 DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
585 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),