]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
Merge tag 'trace-v4.20' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[linux.git] / drivers / gpu / drm / vmwgfx / vmwgfx_execbuf.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 #include <linux/sync_file.h>
28
29 #include "vmwgfx_drv.h"
30 #include "vmwgfx_reg.h"
31 #include <drm/ttm/ttm_bo_api.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include "vmwgfx_so.h"
34 #include "vmwgfx_binding.h"
35
36 #define VMW_RES_HT_ORDER 12
37
38 /*
39  * struct vmw_relocation - Buffer object relocation
40  *
41  * @head: List head for the command submission context's relocation list
42  * @vbo: Non ref-counted pointer to buffer object
43  * @mob_loc: Pointer to location for mob id to be modified
44  * @location: Pointer to location for guest pointer to be modified
45  */
46 struct vmw_relocation {
47         struct list_head head;
48         struct vmw_buffer_object *vbo;
49         union {
50                 SVGAMobId *mob_loc;
51                 SVGAGuestPtr *location;
52         };
53 };
54
55 /**
56  * enum vmw_resource_relocation_type - Relocation type for resources
57  *
58  * @vmw_res_rel_normal: Traditional relocation. The resource id in the
59  * command stream is replaced with the actual id after validation.
60  * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
61  * with a NOP.
62  * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id
63  * after validation is -1, the command is replaced with a NOP. Otherwise no
64  * action.
65  */
66 enum vmw_resource_relocation_type {
67         vmw_res_rel_normal,
68         vmw_res_rel_nop,
69         vmw_res_rel_cond_nop,
70         vmw_res_rel_max
71 };
72
73 /**
74  * struct vmw_resource_relocation - Relocation info for resources
75  *
76  * @head: List head for the software context's relocation list.
77  * @res: Non-ref-counted pointer to the resource.
78  * @offset: Offset of single byte entries into the command buffer where the
79  * id that needs fixup is located.
80  * @rel_type: Type of relocation.
81  */
82 struct vmw_resource_relocation {
83         struct list_head head;
84         const struct vmw_resource *res;
85         u32 offset:29;
86         enum vmw_resource_relocation_type rel_type:3;
87 };
88
89 /*
90  * struct vmw_ctx_validation_info - Extra validation metadata for contexts
91  * @head: List head of context list
92  * @ctx: The context resource
93  * @cur: The context's persistent binding state
94  * @staged: The binding state changes of this command buffer
95  */
96 struct vmw_ctx_validation_info {
97         struct list_head head;
98         struct vmw_resource *ctx;
99         struct vmw_ctx_binding_state *cur;
100         struct vmw_ctx_binding_state *staged;
101 };
102
103 /**
104  * struct vmw_cmd_entry - Describe a command for the verifier
105  *
106  * @user_allow: Whether allowed from the execbuf ioctl.
107  * @gb_disable: Whether disabled if guest-backed objects are available.
108  * @gb_enable: Whether enabled iff guest-backed objects are available.
109  */
110 struct vmw_cmd_entry {
111         int (*func) (struct vmw_private *, struct vmw_sw_context *,
112                      SVGA3dCmdHeader *);
113         bool user_allow;
114         bool gb_disable;
115         bool gb_enable;
116         const char *cmd_name;
117 };
118
119 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)  \
120         [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
121                                        (_gb_disable), (_gb_enable), #_cmd}
122
123 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
124                                         struct vmw_sw_context *sw_context,
125                                         struct vmw_resource *ctx);
126 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
127                                  struct vmw_sw_context *sw_context,
128                                  SVGAMobId *id,
129                                  struct vmw_buffer_object **vmw_bo_p);
130 /**
131  * vmw_ptr_diff - Compute the offset from a to b in bytes
132  *
133  * @a: A starting pointer.
134  * @b: A pointer offset in the same address space.
135  *
136  * Returns: The offset in bytes between the two pointers.
137  */
138 static size_t vmw_ptr_diff(void *a, void *b)
139 {
140         return (unsigned long) b - (unsigned long) a;
141 }
142
143 /**
144  * vmw_execbuf_bindings_commit - Commit modified binding state
145  * @sw_context: The command submission context
146  * @backoff: Whether this is part of the error path and binding state
147  * changes should be ignored
148  */
149 static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
150                                         bool backoff)
151 {
152         struct vmw_ctx_validation_info *entry;
153
154         list_for_each_entry(entry, &sw_context->ctx_list, head) {
155                 if (!backoff)
156                         vmw_binding_state_commit(entry->cur, entry->staged);
157                 if (entry->staged != sw_context->staged_bindings)
158                         vmw_binding_state_free(entry->staged);
159                 else
160                         sw_context->staged_bindings_inuse = false;
161         }
162
163         /* List entries are freed with the validation context */
164         INIT_LIST_HEAD(&sw_context->ctx_list);
165 }
166
167 /**
168  * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
169  * @sw_context: The command submission context
170  */
171 static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
172 {
173         if (sw_context->dx_query_mob)
174                 vmw_context_bind_dx_query(sw_context->dx_query_ctx,
175                                           sw_context->dx_query_mob);
176 }
177
178 /**
179  * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
180  * added to the validate list.
181  *
182  * @dev_priv: Pointer to the device private:
183  * @sw_context: The command submission context
184  * @node: The validation node holding the context resource metadata
185  */
186 static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
187                                    struct vmw_sw_context *sw_context,
188                                    struct vmw_resource *res,
189                                    struct vmw_ctx_validation_info *node)
190 {
191         int ret;
192
193         ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
194         if (unlikely(ret != 0))
195                 goto out_err;
196
197         if (!sw_context->staged_bindings) {
198                 sw_context->staged_bindings =
199                         vmw_binding_state_alloc(dev_priv);
200                 if (IS_ERR(sw_context->staged_bindings)) {
201                         DRM_ERROR("Failed to allocate context binding "
202                                   "information.\n");
203                         ret = PTR_ERR(sw_context->staged_bindings);
204                         sw_context->staged_bindings = NULL;
205                         goto out_err;
206                 }
207         }
208
209         if (sw_context->staged_bindings_inuse) {
210                 node->staged = vmw_binding_state_alloc(dev_priv);
211                 if (IS_ERR(node->staged)) {
212                         DRM_ERROR("Failed to allocate context binding "
213                                   "information.\n");
214                         ret = PTR_ERR(node->staged);
215                         node->staged = NULL;
216                         goto out_err;
217                 }
218         } else {
219                 node->staged = sw_context->staged_bindings;
220                 sw_context->staged_bindings_inuse = true;
221         }
222
223         node->ctx = res;
224         node->cur = vmw_context_binding_state(res);
225         list_add_tail(&node->head, &sw_context->ctx_list);
226
227         return 0;
228 out_err:
229         return ret;
230 }
231
232 /**
233  * vmw_execbuf_res_size - calculate extra size fore the resource validation
234  * node
235  * @dev_priv: Pointer to the device private struct.
236  * @res_type: The resource type.
237  *
238  * Guest-backed contexts and DX contexts require extra size to store
239  * execbuf private information in the validation node. Typically the
240  * binding manager associated data structures.
241  *
242  * Returns: The extra size requirement based on resource type.
243  */
244 static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
245                                          enum vmw_res_type res_type)
246 {
247         return (res_type == vmw_res_dx_context ||
248                 (res_type == vmw_res_context && dev_priv->has_mob)) ?
249                 sizeof(struct vmw_ctx_validation_info) : 0;
250 }
251
252 /**
253  * vmw_execbuf_rcache_update - Update a resource-node cache entry
254  *
255  * @rcache: Pointer to the entry to update.
256  * @res: Pointer to the resource.
257  * @private: Pointer to the execbuf-private space in the resource
258  * validation node.
259  */
260 static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
261                                       struct vmw_resource *res,
262                                       void *private)
263 {
264         rcache->res = res;
265         rcache->private = private;
266         rcache->valid = 1;
267         rcache->valid_handle = 0;
268 }
269
270 /**
271  * vmw_execbuf_res_noref_val_add - Add a resource described by an
272  * unreferenced rcu-protected pointer to the validation list.
273  * @sw_context: Pointer to the software context.
274  * @res: Unreferenced rcu-protected pointer to the resource.
275  *
276  * Returns: 0 on success. Negative error code on failure. Typical error
277  * codes are %-EINVAL on inconsistency and %-ESRCH if the resource was
278  * doomed.
279  */
280 static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
281                                          struct vmw_resource *res)
282 {
283         struct vmw_private *dev_priv = res->dev_priv;
284         int ret;
285         enum vmw_res_type res_type = vmw_res_type(res);
286         struct vmw_res_cache_entry *rcache;
287         struct vmw_ctx_validation_info *ctx_info;
288         bool first_usage;
289         unsigned int priv_size;
290
291         rcache = &sw_context->res_cache[res_type];
292         if (likely(rcache->valid && rcache->res == res)) {
293                 vmw_user_resource_noref_release();
294                 return 0;
295         }
296
297         priv_size = vmw_execbuf_res_size(dev_priv, res_type);
298         ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
299                                           (void **)&ctx_info, &first_usage);
300         vmw_user_resource_noref_release();
301         if (ret)
302                 return ret;
303
304         if (priv_size && first_usage) {
305                 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
306                                               ctx_info);
307                 if (ret)
308                         return ret;
309         }
310
311         vmw_execbuf_rcache_update(rcache, res, ctx_info);
312         return 0;
313 }
314
315 /**
316  * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
317  * validation list if it's not already on it
318  * @sw_context: Pointer to the software context.
319  * @res: Pointer to the resource.
320  *
321  * Returns: Zero on success. Negative error code on failure.
322  */
323 static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
324                                          struct vmw_resource *res)
325 {
326         struct vmw_res_cache_entry *rcache;
327         enum vmw_res_type res_type = vmw_res_type(res);
328         void *ptr;
329         int ret;
330
331         rcache = &sw_context->res_cache[res_type];
332         if (likely(rcache->valid && rcache->res == res))
333                 return 0;
334
335         ret = vmw_validation_add_resource(sw_context->ctx, res, 0, &ptr, NULL);
336         if (ret)
337                 return ret;
338
339         vmw_execbuf_rcache_update(rcache, res, ptr);
340
341         return 0;
342 }
343
344 /**
345  * vmw_view_res_val_add - Add a view and the surface it's pointing to
346  * to the validation list
347  *
348  * @sw_context: The software context holding the validation list.
349  * @view: Pointer to the view resource.
350  *
351  * Returns 0 if success, negative error code otherwise.
352  */
353 static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
354                                 struct vmw_resource *view)
355 {
356         int ret;
357
358         /*
359          * First add the resource the view is pointing to, otherwise
360          * it may be swapped out when the view is validated.
361          */
362         ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view));
363         if (ret)
364                 return ret;
365
366         return vmw_execbuf_res_noctx_val_add(sw_context, view);
367 }
368
369 /**
370  * vmw_view_id_val_add - Look up a view and add it and the surface it's
371  * pointing to to the validation list.
372  *
373  * @sw_context: The software context holding the validation list.
374  * @view_type: The view type to look up.
375  * @id: view id of the view.
376  *
377  * The view is represented by a view id and the DX context it's created on,
378  * or scheduled for creation on. If there is no DX context set, the function
379  * will return an -EINVAL error pointer.
380  *
381  * Returns: Unreferenced pointer to the resource on success, negative error
382  * pointer on failure.
383  */
384 static struct vmw_resource *
385 vmw_view_id_val_add(struct vmw_sw_context *sw_context,
386                     enum vmw_view_type view_type, u32 id)
387 {
388         struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
389         struct vmw_resource *view;
390         int ret;
391
392         if (!ctx_node) {
393                 DRM_ERROR("DX Context not set.\n");
394                 return ERR_PTR(-EINVAL);
395         }
396
397         view = vmw_view_lookup(sw_context->man, view_type, id);
398         if (IS_ERR(view))
399                 return view;
400
401         ret = vmw_view_res_val_add(sw_context, view);
402         if (ret)
403                 return ERR_PTR(ret);
404
405         return view;
406 }
407
408 /**
409  * vmw_resource_context_res_add - Put resources previously bound to a context on
410  * the validation list
411  *
412  * @dev_priv: Pointer to a device private structure
413  * @sw_context: Pointer to a software context used for this command submission
414  * @ctx: Pointer to the context resource
415  *
416  * This function puts all resources that were previously bound to @ctx on
417  * the resource validation list. This is part of the context state reemission
418  */
419 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
420                                         struct vmw_sw_context *sw_context,
421                                         struct vmw_resource *ctx)
422 {
423         struct list_head *binding_list;
424         struct vmw_ctx_bindinfo *entry;
425         int ret = 0;
426         struct vmw_resource *res;
427         u32 i;
428
429         /* Add all cotables to the validation list. */
430         if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
431                 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
432                         res = vmw_context_cotable(ctx, i);
433                         if (IS_ERR(res))
434                                 continue;
435
436                         ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
437                         if (unlikely(ret != 0))
438                                 return ret;
439                 }
440         }
441
442
443         /* Add all resources bound to the context to the validation list */
444         mutex_lock(&dev_priv->binding_mutex);
445         binding_list = vmw_context_binding_list(ctx);
446
447         list_for_each_entry(entry, binding_list, ctx_list) {
448                 if (vmw_res_type(entry->res) == vmw_res_view)
449                         ret = vmw_view_res_val_add(sw_context, entry->res);
450                 else
451                         ret = vmw_execbuf_res_noctx_val_add(sw_context,
452                                                             entry->res);
453                 if (unlikely(ret != 0))
454                         break;
455         }
456
457         if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
458                 struct vmw_buffer_object *dx_query_mob;
459
460                 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
461                 if (dx_query_mob)
462                         ret = vmw_validation_add_bo(sw_context->ctx,
463                                                     dx_query_mob, true, false);
464         }
465
466         mutex_unlock(&dev_priv->binding_mutex);
467         return ret;
468 }
469
470 /**
471  * vmw_resource_relocation_add - Add a relocation to the relocation list
472  *
473  * @list: Pointer to head of relocation list.
474  * @res: The resource.
475  * @offset: Offset into the command buffer currently being parsed where the
476  * id that needs fixup is located. Granularity is one byte.
477  * @rel_type: Relocation type.
478  */
479 static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
480                                        const struct vmw_resource *res,
481                                        unsigned long offset,
482                                        enum vmw_resource_relocation_type
483                                        rel_type)
484 {
485         struct vmw_resource_relocation *rel;
486
487         rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
488         if (unlikely(!rel)) {
489                 DRM_ERROR("Failed to allocate a resource relocation.\n");
490                 return -ENOMEM;
491         }
492
493         rel->res = res;
494         rel->offset = offset;
495         rel->rel_type = rel_type;
496         list_add_tail(&rel->head, &sw_context->res_relocations);
497
498         return 0;
499 }
500
501 /**
502  * vmw_resource_relocations_free - Free all relocations on a list
503  *
504  * @list: Pointer to the head of the relocation list
505  */
506 static void vmw_resource_relocations_free(struct list_head *list)
507 {
508         /* Memory is validation context memory, so no need to free it */
509
510         INIT_LIST_HEAD(list);
511 }
512
513 /**
514  * vmw_resource_relocations_apply - Apply all relocations on a list
515  *
516  * @cb: Pointer to the start of the command buffer bein patch. This need
517  * not be the same buffer as the one being parsed when the relocation
518  * list was built, but the contents must be the same modulo the
519  * resource ids.
520  * @list: Pointer to the head of the relocation list.
521  */
522 static void vmw_resource_relocations_apply(uint32_t *cb,
523                                            struct list_head *list)
524 {
525         struct vmw_resource_relocation *rel;
526
527         /* Validate the struct vmw_resource_relocation member size */
528         BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
529         BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
530
531         list_for_each_entry(rel, list, head) {
532                 u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
533                 switch (rel->rel_type) {
534                 case vmw_res_rel_normal:
535                         *addr = rel->res->id;
536                         break;
537                 case vmw_res_rel_nop:
538                         *addr = SVGA_3D_CMD_NOP;
539                         break;
540                 default:
541                         if (rel->res->id == -1)
542                                 *addr = SVGA_3D_CMD_NOP;
543                         break;
544                 }
545         }
546 }
547
548 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
549                            struct vmw_sw_context *sw_context,
550                            SVGA3dCmdHeader *header)
551 {
552         return -EINVAL;
553 }
554
555 static int vmw_cmd_ok(struct vmw_private *dev_priv,
556                       struct vmw_sw_context *sw_context,
557                       SVGA3dCmdHeader *header)
558 {
559         return 0;
560 }
561
562 /**
563  * vmw_resources_reserve - Reserve all resources on the sw_context's
564  * resource list.
565  *
566  * @sw_context: Pointer to the software context.
567  *
568  * Note that since vmware's command submission currently is protected by
569  * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
570  * since only a single thread at once will attempt this.
571  */
572 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
573 {
574         int ret;
575
576         ret = vmw_validation_res_reserve(sw_context->ctx, true);
577         if (ret)
578                 return ret;
579
580         if (sw_context->dx_query_mob) {
581                 struct vmw_buffer_object *expected_dx_query_mob;
582
583                 expected_dx_query_mob =
584                         vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
585                 if (expected_dx_query_mob &&
586                     expected_dx_query_mob != sw_context->dx_query_mob) {
587                         ret = -EINVAL;
588                 }
589         }
590
591         return ret;
592 }
593
594 /**
595  * vmw_cmd_res_check - Check that a resource is present and if so, put it
596  * on the resource validate list unless it's already there.
597  *
598  * @dev_priv: Pointer to a device private structure.
599  * @sw_context: Pointer to the software context.
600  * @res_type: Resource type.
601  * @converter: User-space visisble type specific information.
602  * @id_loc: Pointer to the location in the command buffer currently being
603  * parsed from where the user-space resource id handle is located.
604  * @p_val: Pointer to pointer to resource validalidation node. Populated
605  * on exit.
606  */
607 static int
608 vmw_cmd_res_check(struct vmw_private *dev_priv,
609                   struct vmw_sw_context *sw_context,
610                   enum vmw_res_type res_type,
611                   const struct vmw_user_resource_conv *converter,
612                   uint32_t *id_loc,
613                   struct vmw_resource **p_res)
614 {
615         struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
616         struct vmw_resource *res;
617         int ret;
618
619         if (p_res)
620                 *p_res = NULL;
621
622         if (*id_loc == SVGA3D_INVALID_ID) {
623                 if (res_type == vmw_res_context) {
624                         DRM_ERROR("Illegal context invalid id.\n");
625                         return -EINVAL;
626                 }
627                 return 0;
628         }
629
630         if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
631                 res = rcache->res;
632         } else {
633                 unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
634
635                 ret = vmw_validation_preload_res(sw_context->ctx, size);
636                 if (ret)
637                         return ret;
638
639                 res = vmw_user_resource_noref_lookup_handle
640                         (dev_priv, sw_context->fp->tfile, *id_loc, converter);
641                 if (unlikely(IS_ERR(res))) {
642                         DRM_ERROR("Could not find or use resource 0x%08x.\n",
643                                   (unsigned int) *id_loc);
644                         return PTR_ERR(res);
645                 }
646
647                 ret = vmw_execbuf_res_noref_val_add(sw_context, res);
648                 if (unlikely(ret != 0))
649                         return ret;
650
651                 if (rcache->valid && rcache->res == res) {
652                         rcache->valid_handle = true;
653                         rcache->handle = *id_loc;
654                 }
655         }
656
657         ret = vmw_resource_relocation_add(sw_context, res,
658                                           vmw_ptr_diff(sw_context->buf_start,
659                                                        id_loc),
660                                           vmw_res_rel_normal);
661         if (p_res)
662                 *p_res = res;
663
664         return 0;
665 }
666
667 /**
668  * vmw_rebind_dx_query - Rebind DX query associated with the context
669  *
670  * @ctx_res: context the query belongs to
671  *
672  * This function assumes binding_mutex is held.
673  */
674 static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
675 {
676         struct vmw_private *dev_priv = ctx_res->dev_priv;
677         struct vmw_buffer_object *dx_query_mob;
678         struct {
679                 SVGA3dCmdHeader header;
680                 SVGA3dCmdDXBindAllQuery body;
681         } *cmd;
682
683
684         dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
685
686         if (!dx_query_mob || dx_query_mob->dx_query_ctx)
687                 return 0;
688
689         cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
690
691         if (cmd == NULL) {
692                 DRM_ERROR("Failed to rebind queries.\n");
693                 return -ENOMEM;
694         }
695
696         cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
697         cmd->header.size = sizeof(cmd->body);
698         cmd->body.cid = ctx_res->id;
699         cmd->body.mobid = dx_query_mob->base.mem.start;
700         vmw_fifo_commit(dev_priv, sizeof(*cmd));
701
702         vmw_context_bind_dx_query(ctx_res, dx_query_mob);
703
704         return 0;
705 }
706
707 /**
708  * vmw_rebind_contexts - Rebind all resources previously bound to
709  * referenced contexts.
710  *
711  * @sw_context: Pointer to the software context.
712  *
713  * Rebind context binding points that have been scrubbed because of eviction.
714  */
715 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
716 {
717         struct vmw_ctx_validation_info *val;
718         int ret;
719
720         list_for_each_entry(val, &sw_context->ctx_list, head) {
721                 ret = vmw_binding_rebind_all(val->cur);
722                 if (unlikely(ret != 0)) {
723                         if (ret != -ERESTARTSYS)
724                                 DRM_ERROR("Failed to rebind context.\n");
725                         return ret;
726                 }
727
728                 ret = vmw_rebind_all_dx_query(val->ctx);
729                 if (ret != 0)
730                         return ret;
731         }
732
733         return 0;
734 }
735
736 /**
737  * vmw_view_bindings_add - Add an array of view bindings to a context
738  * binding state tracker.
739  *
740  * @sw_context: The execbuf state used for this command.
741  * @view_type: View type for the bindings.
742  * @binding_type: Binding type for the bindings.
743  * @shader_slot: The shader slot to user for the bindings.
744  * @view_ids: Array of view ids to be bound.
745  * @num_views: Number of view ids in @view_ids.
746  * @first_slot: The binding slot to be used for the first view id in @view_ids.
747  */
748 static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
749                                  enum vmw_view_type view_type,
750                                  enum vmw_ctx_binding_type binding_type,
751                                  uint32 shader_slot,
752                                  uint32 view_ids[], u32 num_views,
753                                  u32 first_slot)
754 {
755         struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
756         u32 i;
757
758         if (!ctx_node) {
759                 DRM_ERROR("DX Context not set.\n");
760                 return -EINVAL;
761         }
762
763         for (i = 0; i < num_views; ++i) {
764                 struct vmw_ctx_bindinfo_view binding;
765                 struct vmw_resource *view = NULL;
766
767                 if (view_ids[i] != SVGA3D_INVALID_ID) {
768                         view = vmw_view_id_val_add(sw_context, view_type,
769                                                    view_ids[i]);
770                         if (IS_ERR(view)) {
771                                 DRM_ERROR("View not found.\n");
772                                 return PTR_ERR(view);
773                         }
774                 }
775                 binding.bi.ctx = ctx_node->ctx;
776                 binding.bi.res = view;
777                 binding.bi.bt = binding_type;
778                 binding.shader_slot = shader_slot;
779                 binding.slot = first_slot + i;
780                 vmw_binding_add(ctx_node->staged, &binding.bi,
781                                 shader_slot, binding.slot);
782         }
783
784         return 0;
785 }
786
787 /**
788  * vmw_cmd_cid_check - Check a command header for valid context information.
789  *
790  * @dev_priv: Pointer to a device private structure.
791  * @sw_context: Pointer to the software context.
792  * @header: A command header with an embedded user-space context handle.
793  *
794  * Convenience function: Call vmw_cmd_res_check with the user-space context
795  * handle embedded in @header.
796  */
797 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
798                              struct vmw_sw_context *sw_context,
799                              SVGA3dCmdHeader *header)
800 {
801         struct vmw_cid_cmd {
802                 SVGA3dCmdHeader header;
803                 uint32_t cid;
804         } *cmd;
805
806         cmd = container_of(header, struct vmw_cid_cmd, header);
807         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
808                                  user_context_converter, &cmd->cid, NULL);
809 }
810
811 /**
812  * vmw_execbuf_info_from_res - Get the private validation metadata for a
813  * recently validated resource
814  * @sw_context: Pointer to the command submission context
815  * @res: The resource
816  *
817  * The resource pointed to by @res needs to be present in the command submission
818  * context's resource cache and hence the last resource of that type to be
819  * processed by the validation code.
820  *
821  * Return: a pointer to the private metadata of the resource, or NULL
822  * if it wasn't found
823  */
824 static struct vmw_ctx_validation_info *
825 vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
826                           struct vmw_resource *res)
827 {
828         struct vmw_res_cache_entry *rcache =
829                 &sw_context->res_cache[vmw_res_type(res)];
830
831         if (rcache->valid && rcache->res == res)
832                 return rcache->private;
833
834         WARN_ON_ONCE(true);
835         return NULL;
836 }
837
838
839 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
840                                            struct vmw_sw_context *sw_context,
841                                            SVGA3dCmdHeader *header)
842 {
843         struct vmw_sid_cmd {
844                 SVGA3dCmdHeader header;
845                 SVGA3dCmdSetRenderTarget body;
846         } *cmd;
847         struct vmw_resource *ctx;
848         struct vmw_resource *res;
849         int ret;
850
851         cmd = container_of(header, struct vmw_sid_cmd, header);
852
853         if (cmd->body.type >= SVGA3D_RT_MAX) {
854                 DRM_ERROR("Illegal render target type %u.\n",
855                           (unsigned) cmd->body.type);
856                 return -EINVAL;
857         }
858
859         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
860                                 user_context_converter, &cmd->body.cid,
861                                 &ctx);
862         if (unlikely(ret != 0))
863                 return ret;
864
865         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
866                                 user_surface_converter, &cmd->body.target.sid,
867                                 &res);
868         if (unlikely(ret))
869                 return ret;
870
871         if (dev_priv->has_mob) {
872                 struct vmw_ctx_bindinfo_view binding;
873                 struct vmw_ctx_validation_info *node;
874
875                 node = vmw_execbuf_info_from_res(sw_context, ctx);
876                 if (!node)
877                         return -EINVAL;
878
879                 binding.bi.ctx = ctx;
880                 binding.bi.res = res;
881                 binding.bi.bt = vmw_ctx_binding_rt;
882                 binding.slot = cmd->body.type;
883                 vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
884         }
885
886         return 0;
887 }
888
889 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
890                                       struct vmw_sw_context *sw_context,
891                                       SVGA3dCmdHeader *header)
892 {
893         struct vmw_sid_cmd {
894                 SVGA3dCmdHeader header;
895                 SVGA3dCmdSurfaceCopy body;
896         } *cmd;
897         int ret;
898
899         cmd = container_of(header, struct vmw_sid_cmd, header);
900
901         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
902                                           user_surface_converter,
903                                           &cmd->body.src.sid, NULL);
904         if (ret)
905                 return ret;
906
907         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
908                                  user_surface_converter,
909                                  &cmd->body.dest.sid, NULL);
910 }
911
912 static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
913                                       struct vmw_sw_context *sw_context,
914                                       SVGA3dCmdHeader *header)
915 {
916         struct {
917                 SVGA3dCmdHeader header;
918                 SVGA3dCmdDXBufferCopy body;
919         } *cmd;
920         int ret;
921
922         cmd = container_of(header, typeof(*cmd), header);
923         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
924                                 user_surface_converter,
925                                 &cmd->body.src, NULL);
926         if (ret != 0)
927                 return ret;
928
929         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
930                                  user_surface_converter,
931                                  &cmd->body.dest, NULL);
932 }
933
934 static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
935                                    struct vmw_sw_context *sw_context,
936                                    SVGA3dCmdHeader *header)
937 {
938         struct {
939                 SVGA3dCmdHeader header;
940                 SVGA3dCmdDXPredCopyRegion body;
941         } *cmd;
942         int ret;
943
944         cmd = container_of(header, typeof(*cmd), header);
945         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
946                                 user_surface_converter,
947                                 &cmd->body.srcSid, NULL);
948         if (ret != 0)
949                 return ret;
950
951         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
952                                  user_surface_converter,
953                                  &cmd->body.dstSid, NULL);
954 }
955
956 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
957                                      struct vmw_sw_context *sw_context,
958                                      SVGA3dCmdHeader *header)
959 {
960         struct vmw_sid_cmd {
961                 SVGA3dCmdHeader header;
962                 SVGA3dCmdSurfaceStretchBlt body;
963         } *cmd;
964         int ret;
965
966         cmd = container_of(header, struct vmw_sid_cmd, header);
967         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
968                                 user_surface_converter,
969                                 &cmd->body.src.sid, NULL);
970         if (unlikely(ret != 0))
971                 return ret;
972         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
973                                  user_surface_converter,
974                                  &cmd->body.dest.sid, NULL);
975 }
976
977 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
978                                          struct vmw_sw_context *sw_context,
979                                          SVGA3dCmdHeader *header)
980 {
981         struct vmw_sid_cmd {
982                 SVGA3dCmdHeader header;
983                 SVGA3dCmdBlitSurfaceToScreen body;
984         } *cmd;
985
986         cmd = container_of(header, struct vmw_sid_cmd, header);
987
988         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
989                                  user_surface_converter,
990                                  &cmd->body.srcImage.sid, NULL);
991 }
992
993 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
994                                  struct vmw_sw_context *sw_context,
995                                  SVGA3dCmdHeader *header)
996 {
997         struct vmw_sid_cmd {
998                 SVGA3dCmdHeader header;
999                 SVGA3dCmdPresent body;
1000         } *cmd;
1001
1002
1003         cmd = container_of(header, struct vmw_sid_cmd, header);
1004
1005         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1006                                  user_surface_converter, &cmd->body.sid,
1007                                  NULL);
1008 }
1009
1010 /**
1011  * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1012  *
1013  * @dev_priv: The device private structure.
1014  * @new_query_bo: The new buffer holding query results.
1015  * @sw_context: The software context used for this command submission.
1016  *
1017  * This function checks whether @new_query_bo is suitable for holding
1018  * query results, and if another buffer currently is pinned for query
1019  * results. If so, the function prepares the state of @sw_context for
1020  * switching pinned buffers after successful submission of the current
1021  * command batch.
1022  */
1023 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1024                                        struct vmw_buffer_object *new_query_bo,
1025                                        struct vmw_sw_context *sw_context)
1026 {
1027         struct vmw_res_cache_entry *ctx_entry =
1028                 &sw_context->res_cache[vmw_res_context];
1029         int ret;
1030
1031         BUG_ON(!ctx_entry->valid);
1032         sw_context->last_query_ctx = ctx_entry->res;
1033
1034         if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1035
1036                 if (unlikely(new_query_bo->base.num_pages > 4)) {
1037                         DRM_ERROR("Query buffer too large.\n");
1038                         return -EINVAL;
1039                 }
1040
1041                 if (unlikely(sw_context->cur_query_bo != NULL)) {
1042                         sw_context->needs_post_query_barrier = true;
1043                         ret = vmw_validation_add_bo(sw_context->ctx,
1044                                                     sw_context->cur_query_bo,
1045                                                     dev_priv->has_mob, false);
1046                         if (unlikely(ret != 0))
1047                                 return ret;
1048                 }
1049                 sw_context->cur_query_bo = new_query_bo;
1050
1051                 ret = vmw_validation_add_bo(sw_context->ctx,
1052                                             dev_priv->dummy_query_bo,
1053                                             dev_priv->has_mob, false);
1054                 if (unlikely(ret != 0))
1055                         return ret;
1056
1057         }
1058
1059         return 0;
1060 }
1061
1062
1063 /**
1064  * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1065  *
1066  * @dev_priv: The device private structure.
1067  * @sw_context: The software context used for this command submission batch.
1068  *
1069  * This function will check if we're switching query buffers, and will then,
1070  * issue a dummy occlusion query wait used as a query barrier. When the fence
1071  * object following that query wait has signaled, we are sure that all
1072  * preceding queries have finished, and the old query buffer can be unpinned.
1073  * However, since both the new query buffer and the old one are fenced with
1074  * that fence, we can do an asynchronus unpin now, and be sure that the
1075  * old query buffer won't be moved until the fence has signaled.
1076  *
1077  * As mentioned above, both the new - and old query buffers need to be fenced
1078  * using a sequence emitted *after* calling this function.
1079  */
1080 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1081                                      struct vmw_sw_context *sw_context)
1082 {
1083         /*
1084          * The validate list should still hold references to all
1085          * contexts here.
1086          */
1087
1088         if (sw_context->needs_post_query_barrier) {
1089                 struct vmw_res_cache_entry *ctx_entry =
1090                         &sw_context->res_cache[vmw_res_context];
1091                 struct vmw_resource *ctx;
1092                 int ret;
1093
1094                 BUG_ON(!ctx_entry->valid);
1095                 ctx = ctx_entry->res;
1096
1097                 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
1098
1099                 if (unlikely(ret != 0))
1100                         DRM_ERROR("Out of fifo space for dummy query.\n");
1101         }
1102
1103         if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1104                 if (dev_priv->pinned_bo) {
1105                         vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1106                         vmw_bo_unreference(&dev_priv->pinned_bo);
1107                 }
1108
1109                 if (!sw_context->needs_post_query_barrier) {
1110                         vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1111
1112                         /*
1113                          * We pin also the dummy_query_bo buffer so that we
1114                          * don't need to validate it when emitting
1115                          * dummy queries in context destroy paths.
1116                          */
1117
1118                         if (!dev_priv->dummy_query_bo_pinned) {
1119                                 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1120                                                     true);
1121                                 dev_priv->dummy_query_bo_pinned = true;
1122                         }
1123
1124                         BUG_ON(sw_context->last_query_ctx == NULL);
1125                         dev_priv->query_cid = sw_context->last_query_ctx->id;
1126                         dev_priv->query_cid_valid = true;
1127                         dev_priv->pinned_bo =
1128                                 vmw_bo_reference(sw_context->cur_query_bo);
1129                 }
1130         }
1131 }
1132
1133 /**
1134  * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
1135  * handle to a MOB id.
1136  *
1137  * @dev_priv: Pointer to a device private structure.
1138  * @sw_context: The software context used for this command batch validation.
1139  * @id: Pointer to the user-space handle to be translated.
1140  * @vmw_bo_p: Points to a location that, on successful return will carry
1141  * a non-reference-counted pointer to the buffer object identified by the
1142  * user-space handle in @id.
1143  *
1144  * This function saves information needed to translate a user-space buffer
1145  * handle to a MOB id. The translation does not take place immediately, but
1146  * during a call to vmw_apply_relocations(). This function builds a relocation
1147  * list and a list of buffers to validate. The former needs to be freed using
1148  * either vmw_apply_relocations() or vmw_free_relocations(). The latter
1149  * needs to be freed using vmw_clear_validations.
1150  */
1151 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1152                                  struct vmw_sw_context *sw_context,
1153                                  SVGAMobId *id,
1154                                  struct vmw_buffer_object **vmw_bo_p)
1155 {
1156         struct vmw_buffer_object *vmw_bo;
1157         uint32_t handle = *id;
1158         struct vmw_relocation *reloc;
1159         int ret;
1160
1161         vmw_validation_preload_bo(sw_context->ctx);
1162         vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1163         if (IS_ERR(vmw_bo)) {
1164                 DRM_ERROR("Could not find or use MOB buffer.\n");
1165                 return PTR_ERR(vmw_bo);
1166         }
1167
1168         ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
1169         vmw_user_bo_noref_release();
1170         if (unlikely(ret != 0))
1171                 return ret;
1172
1173         reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1174         if (!reloc)
1175                 return -ENOMEM;
1176
1177         reloc->mob_loc = id;
1178         reloc->vbo = vmw_bo;
1179
1180         *vmw_bo_p = vmw_bo;
1181         list_add_tail(&reloc->head, &sw_context->bo_relocations);
1182
1183         return 0;
1184 }
1185
1186 /**
1187  * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
1188  * handle to a valid SVGAGuestPtr
1189  *
1190  * @dev_priv: Pointer to a device private structure.
1191  * @sw_context: The software context used for this command batch validation.
1192  * @ptr: Pointer to the user-space handle to be translated.
1193  * @vmw_bo_p: Points to a location that, on successful return will carry
1194  * a non-reference-counted pointer to the DMA buffer identified by the
1195  * user-space handle in @id.
1196  *
1197  * This function saves information needed to translate a user-space buffer
1198  * handle to a valid SVGAGuestPtr. The translation does not take place
1199  * immediately, but during a call to vmw_apply_relocations().
1200  * This function builds a relocation list and a list of buffers to validate.
1201  * The former needs to be freed using either vmw_apply_relocations() or
1202  * vmw_free_relocations(). The latter needs to be freed using
1203  * vmw_clear_validations.
1204  */
1205 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1206                                    struct vmw_sw_context *sw_context,
1207                                    SVGAGuestPtr *ptr,
1208                                    struct vmw_buffer_object **vmw_bo_p)
1209 {
1210         struct vmw_buffer_object *vmw_bo;
1211         uint32_t handle = ptr->gmrId;
1212         struct vmw_relocation *reloc;
1213         int ret;
1214
1215         vmw_validation_preload_bo(sw_context->ctx);
1216         vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1217         if (IS_ERR(vmw_bo)) {
1218                 DRM_ERROR("Could not find or use GMR region.\n");
1219                 return PTR_ERR(vmw_bo);
1220         }
1221
1222         ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
1223         vmw_user_bo_noref_release();
1224         if (unlikely(ret != 0))
1225                 return ret;
1226
1227         reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1228         if (!reloc)
1229                 return -ENOMEM;
1230
1231         reloc->location = ptr;
1232         reloc->vbo = vmw_bo;
1233         *vmw_bo_p = vmw_bo;
1234         list_add_tail(&reloc->head, &sw_context->bo_relocations);
1235
1236         return 0;
1237 }
1238
1239
1240
1241 /**
1242  * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
1243  *
1244  * @dev_priv: Pointer to a device private struct.
1245  * @sw_context: The software context used for this command submission.
1246  * @header: Pointer to the command header in the command stream.
1247  *
1248  * This function adds the new query into the query COTABLE
1249  */
1250 static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1251                                    struct vmw_sw_context *sw_context,
1252                                    SVGA3dCmdHeader *header)
1253 {
1254         struct vmw_dx_define_query_cmd {
1255                 SVGA3dCmdHeader header;
1256                 SVGA3dCmdDXDefineQuery q;
1257         } *cmd;
1258
1259         int    ret;
1260         struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
1261         struct vmw_resource *cotable_res;
1262
1263
1264         if (ctx_node == NULL) {
1265                 DRM_ERROR("DX Context not set for query.\n");
1266                 return -EINVAL;
1267         }
1268
1269         cmd = container_of(header, struct vmw_dx_define_query_cmd, header);
1270
1271         if (cmd->q.type <  SVGA3D_QUERYTYPE_MIN ||
1272             cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
1273                 return -EINVAL;
1274
1275         cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
1276         ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
1277
1278         return ret;
1279 }
1280
1281
1282
1283 /**
1284  * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
1285  *
1286  * @dev_priv: Pointer to a device private struct.
1287  * @sw_context: The software context used for this command submission.
1288  * @header: Pointer to the command header in the command stream.
1289  *
1290  * The query bind operation will eventually associate the query ID
1291  * with its backing MOB.  In this function, we take the user mode
1292  * MOB ID and use vmw_translate_mob_ptr() to translate it to its
1293  * kernel mode equivalent.
1294  */
1295 static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1296                                  struct vmw_sw_context *sw_context,
1297                                  SVGA3dCmdHeader *header)
1298 {
1299         struct vmw_dx_bind_query_cmd {
1300                 SVGA3dCmdHeader header;
1301                 SVGA3dCmdDXBindQuery q;
1302         } *cmd;
1303
1304         struct vmw_buffer_object *vmw_bo;
1305         int    ret;
1306
1307
1308         cmd = container_of(header, struct vmw_dx_bind_query_cmd, header);
1309
1310         /*
1311          * Look up the buffer pointed to by q.mobid, put it on the relocation
1312          * list so its kernel mode MOB ID can be filled in later
1313          */
1314         ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid,
1315                                     &vmw_bo);
1316
1317         if (ret != 0)
1318                 return ret;
1319
1320         sw_context->dx_query_mob = vmw_bo;
1321         sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
1322         return 0;
1323 }
1324
1325
1326
1327 /**
1328  * vmw_cmd_begin_gb_query - validate a  SVGA_3D_CMD_BEGIN_GB_QUERY command.
1329  *
1330  * @dev_priv: Pointer to a device private struct.
1331  * @sw_context: The software context used for this command submission.
1332  * @header: Pointer to the command header in the command stream.
1333  */
1334 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1335                                   struct vmw_sw_context *sw_context,
1336                                   SVGA3dCmdHeader *header)
1337 {
1338         struct vmw_begin_gb_query_cmd {
1339                 SVGA3dCmdHeader header;
1340                 SVGA3dCmdBeginGBQuery q;
1341         } *cmd;
1342
1343         cmd = container_of(header, struct vmw_begin_gb_query_cmd,
1344                            header);
1345
1346         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1347                                  user_context_converter, &cmd->q.cid,
1348                                  NULL);
1349 }
1350
1351 /**
1352  * vmw_cmd_begin_query - validate a  SVGA_3D_CMD_BEGIN_QUERY command.
1353  *
1354  * @dev_priv: Pointer to a device private struct.
1355  * @sw_context: The software context used for this command submission.
1356  * @header: Pointer to the command header in the command stream.
1357  */
1358 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1359                                struct vmw_sw_context *sw_context,
1360                                SVGA3dCmdHeader *header)
1361 {
1362         struct vmw_begin_query_cmd {
1363                 SVGA3dCmdHeader header;
1364                 SVGA3dCmdBeginQuery q;
1365         } *cmd;
1366
1367         cmd = container_of(header, struct vmw_begin_query_cmd,
1368                            header);
1369
1370         if (unlikely(dev_priv->has_mob)) {
1371                 struct {
1372                         SVGA3dCmdHeader header;
1373                         SVGA3dCmdBeginGBQuery q;
1374                 } gb_cmd;
1375
1376                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1377
1378                 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1379                 gb_cmd.header.size = cmd->header.size;
1380                 gb_cmd.q.cid = cmd->q.cid;
1381                 gb_cmd.q.type = cmd->q.type;
1382
1383                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1384                 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1385         }
1386
1387         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1388                                  user_context_converter, &cmd->q.cid,
1389                                  NULL);
1390 }
1391
1392 /**
1393  * vmw_cmd_end_gb_query - validate a  SVGA_3D_CMD_END_GB_QUERY command.
1394  *
1395  * @dev_priv: Pointer to a device private struct.
1396  * @sw_context: The software context used for this command submission.
1397  * @header: Pointer to the command header in the command stream.
1398  */
1399 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1400                                 struct vmw_sw_context *sw_context,
1401                                 SVGA3dCmdHeader *header)
1402 {
1403         struct vmw_buffer_object *vmw_bo;
1404         struct vmw_query_cmd {
1405                 SVGA3dCmdHeader header;
1406                 SVGA3dCmdEndGBQuery q;
1407         } *cmd;
1408         int ret;
1409
1410         cmd = container_of(header, struct vmw_query_cmd, header);
1411         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1412         if (unlikely(ret != 0))
1413                 return ret;
1414
1415         ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1416                                     &cmd->q.mobid,
1417                                     &vmw_bo);
1418         if (unlikely(ret != 0))
1419                 return ret;
1420
1421         ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1422
1423         return ret;
1424 }
1425
1426 /**
1427  * vmw_cmd_end_query - validate a  SVGA_3D_CMD_END_QUERY command.
1428  *
1429  * @dev_priv: Pointer to a device private struct.
1430  * @sw_context: The software context used for this command submission.
1431  * @header: Pointer to the command header in the command stream.
1432  */
1433 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1434                              struct vmw_sw_context *sw_context,
1435                              SVGA3dCmdHeader *header)
1436 {
1437         struct vmw_buffer_object *vmw_bo;
1438         struct vmw_query_cmd {
1439                 SVGA3dCmdHeader header;
1440                 SVGA3dCmdEndQuery q;
1441         } *cmd;
1442         int ret;
1443
1444         cmd = container_of(header, struct vmw_query_cmd, header);
1445         if (dev_priv->has_mob) {
1446                 struct {
1447                         SVGA3dCmdHeader header;
1448                         SVGA3dCmdEndGBQuery q;
1449                 } gb_cmd;
1450
1451                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1452
1453                 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1454                 gb_cmd.header.size = cmd->header.size;
1455                 gb_cmd.q.cid = cmd->q.cid;
1456                 gb_cmd.q.type = cmd->q.type;
1457                 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1458                 gb_cmd.q.offset = cmd->q.guestResult.offset;
1459
1460                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1461                 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1462         }
1463
1464         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1465         if (unlikely(ret != 0))
1466                 return ret;
1467
1468         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1469                                       &cmd->q.guestResult,
1470                                       &vmw_bo);
1471         if (unlikely(ret != 0))
1472                 return ret;
1473
1474         ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1475
1476         return ret;
1477 }
1478
1479 /**
1480  * vmw_cmd_wait_gb_query - validate a  SVGA_3D_CMD_WAIT_GB_QUERY command.
1481  *
1482  * @dev_priv: Pointer to a device private struct.
1483  * @sw_context: The software context used for this command submission.
1484  * @header: Pointer to the command header in the command stream.
1485  */
1486 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1487                                  struct vmw_sw_context *sw_context,
1488                                  SVGA3dCmdHeader *header)
1489 {
1490         struct vmw_buffer_object *vmw_bo;
1491         struct vmw_query_cmd {
1492                 SVGA3dCmdHeader header;
1493                 SVGA3dCmdWaitForGBQuery q;
1494         } *cmd;
1495         int ret;
1496
1497         cmd = container_of(header, struct vmw_query_cmd, header);
1498         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1499         if (unlikely(ret != 0))
1500                 return ret;
1501
1502         ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1503                                     &cmd->q.mobid,
1504                                     &vmw_bo);
1505         if (unlikely(ret != 0))
1506                 return ret;
1507
1508         return 0;
1509 }
1510
1511 /**
1512  * vmw_cmd_wait_query - validate a  SVGA_3D_CMD_WAIT_QUERY command.
1513  *
1514  * @dev_priv: Pointer to a device private struct.
1515  * @sw_context: The software context used for this command submission.
1516  * @header: Pointer to the command header in the command stream.
1517  */
1518 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1519                               struct vmw_sw_context *sw_context,
1520                               SVGA3dCmdHeader *header)
1521 {
1522         struct vmw_buffer_object *vmw_bo;
1523         struct vmw_query_cmd {
1524                 SVGA3dCmdHeader header;
1525                 SVGA3dCmdWaitForQuery q;
1526         } *cmd;
1527         int ret;
1528
1529         cmd = container_of(header, struct vmw_query_cmd, header);
1530         if (dev_priv->has_mob) {
1531                 struct {
1532                         SVGA3dCmdHeader header;
1533                         SVGA3dCmdWaitForGBQuery q;
1534                 } gb_cmd;
1535
1536                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1537
1538                 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1539                 gb_cmd.header.size = cmd->header.size;
1540                 gb_cmd.q.cid = cmd->q.cid;
1541                 gb_cmd.q.type = cmd->q.type;
1542                 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1543                 gb_cmd.q.offset = cmd->q.guestResult.offset;
1544
1545                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1546                 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1547         }
1548
1549         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1550         if (unlikely(ret != 0))
1551                 return ret;
1552
1553         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1554                                       &cmd->q.guestResult,
1555                                       &vmw_bo);
1556         if (unlikely(ret != 0))
1557                 return ret;
1558
1559         return 0;
1560 }
1561
1562 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1563                        struct vmw_sw_context *sw_context,
1564                        SVGA3dCmdHeader *header)
1565 {
1566         struct vmw_buffer_object *vmw_bo = NULL;
1567         struct vmw_surface *srf = NULL;
1568         struct vmw_dma_cmd {
1569                 SVGA3dCmdHeader header;
1570                 SVGA3dCmdSurfaceDMA dma;
1571         } *cmd;
1572         int ret;
1573         SVGA3dCmdSurfaceDMASuffix *suffix;
1574         uint32_t bo_size;
1575
1576         cmd = container_of(header, struct vmw_dma_cmd, header);
1577         suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1578                                                header->size - sizeof(*suffix));
1579
1580         /* Make sure device and verifier stays in sync. */
1581         if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1582                 DRM_ERROR("Invalid DMA suffix size.\n");
1583                 return -EINVAL;
1584         }
1585
1586         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1587                                       &cmd->dma.guest.ptr,
1588                                       &vmw_bo);
1589         if (unlikely(ret != 0))
1590                 return ret;
1591
1592         /* Make sure DMA doesn't cross BO boundaries. */
1593         bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1594         if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1595                 DRM_ERROR("Invalid DMA offset.\n");
1596                 return -EINVAL;
1597         }
1598
1599         bo_size -= cmd->dma.guest.ptr.offset;
1600         if (unlikely(suffix->maximumOffset > bo_size))
1601                 suffix->maximumOffset = bo_size;
1602
1603         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1604                                 user_surface_converter, &cmd->dma.host.sid,
1605                                 NULL);
1606         if (unlikely(ret != 0)) {
1607                 if (unlikely(ret != -ERESTARTSYS))
1608                         DRM_ERROR("could not find surface for DMA.\n");
1609                 return ret;
1610         }
1611
1612         srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1613
1614         vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1615                              header);
1616
1617         return 0;
1618 }
1619
1620 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1621                         struct vmw_sw_context *sw_context,
1622                         SVGA3dCmdHeader *header)
1623 {
1624         struct vmw_draw_cmd {
1625                 SVGA3dCmdHeader header;
1626                 SVGA3dCmdDrawPrimitives body;
1627         } *cmd;
1628         SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1629                 (unsigned long)header + sizeof(*cmd));
1630         SVGA3dPrimitiveRange *range;
1631         uint32_t i;
1632         uint32_t maxnum;
1633         int ret;
1634
1635         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1636         if (unlikely(ret != 0))
1637                 return ret;
1638
1639         cmd = container_of(header, struct vmw_draw_cmd, header);
1640         maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1641
1642         if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1643                 DRM_ERROR("Illegal number of vertex declarations.\n");
1644                 return -EINVAL;
1645         }
1646
1647         for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1648                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1649                                         user_surface_converter,
1650                                         &decl->array.surfaceId, NULL);
1651                 if (unlikely(ret != 0))
1652                         return ret;
1653         }
1654
1655         maxnum = (header->size - sizeof(cmd->body) -
1656                   cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1657         if (unlikely(cmd->body.numRanges > maxnum)) {
1658                 DRM_ERROR("Illegal number of index ranges.\n");
1659                 return -EINVAL;
1660         }
1661
1662         range = (SVGA3dPrimitiveRange *) decl;
1663         for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1664                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1665                                         user_surface_converter,
1666                                         &range->indexArray.surfaceId, NULL);
1667                 if (unlikely(ret != 0))
1668                         return ret;
1669         }
1670         return 0;
1671 }
1672
1673
1674 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1675                              struct vmw_sw_context *sw_context,
1676                              SVGA3dCmdHeader *header)
1677 {
1678         struct vmw_tex_state_cmd {
1679                 SVGA3dCmdHeader header;
1680                 SVGA3dCmdSetTextureState state;
1681         } *cmd;
1682
1683         SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1684           ((unsigned long) header + header->size + sizeof(header));
1685         SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1686                 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
1687         struct vmw_resource *ctx;
1688         struct vmw_resource *res;
1689         int ret;
1690
1691         cmd = container_of(header, struct vmw_tex_state_cmd,
1692                            header);
1693
1694         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1695                                 user_context_converter, &cmd->state.cid,
1696                                 &ctx);
1697         if (unlikely(ret != 0))
1698                 return ret;
1699
1700         for (; cur_state < last_state; ++cur_state) {
1701                 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1702                         continue;
1703
1704                 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1705                         DRM_ERROR("Illegal texture/sampler unit %u.\n",
1706                                   (unsigned) cur_state->stage);
1707                         return -EINVAL;
1708                 }
1709
1710                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1711                                         user_surface_converter,
1712                                         &cur_state->value, &res);
1713                 if (unlikely(ret != 0))
1714                         return ret;
1715
1716                 if (dev_priv->has_mob) {
1717                         struct vmw_ctx_bindinfo_tex binding;
1718                         struct vmw_ctx_validation_info *node;
1719
1720                         node = vmw_execbuf_info_from_res(sw_context, ctx);
1721                         if (!node)
1722                                 return -EINVAL;
1723
1724                         binding.bi.ctx = ctx;
1725                         binding.bi.res = res;
1726                         binding.bi.bt = vmw_ctx_binding_tex;
1727                         binding.texture_stage = cur_state->stage;
1728                         vmw_binding_add(node->staged, &binding.bi, 0,
1729                                         binding.texture_stage);
1730                 }
1731         }
1732
1733         return 0;
1734 }
1735
1736 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1737                                       struct vmw_sw_context *sw_context,
1738                                       void *buf)
1739 {
1740         struct vmw_buffer_object *vmw_bo;
1741         int ret;
1742
1743         struct {
1744                 uint32_t header;
1745                 SVGAFifoCmdDefineGMRFB body;
1746         } *cmd = buf;
1747
1748         return vmw_translate_guest_ptr(dev_priv, sw_context,
1749                                        &cmd->body.ptr,
1750                                        &vmw_bo);
1751         return ret;
1752 }
1753
1754
1755 /**
1756  * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1757  * switching
1758  *
1759  * @dev_priv: Pointer to a device private struct.
1760  * @sw_context: The software context being used for this batch.
1761  * @val_node: The validation node representing the resource.
1762  * @buf_id: Pointer to the user-space backup buffer handle in the command
1763  * stream.
1764  * @backup_offset: Offset of backup into MOB.
1765  *
1766  * This function prepares for registering a switch of backup buffers
1767  * in the resource metadata just prior to unreserving. It's basically a wrapper
1768  * around vmw_cmd_res_switch_backup with a different interface.
1769  */
1770 static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1771                                      struct vmw_sw_context *sw_context,
1772                                      struct vmw_resource *res,
1773                                      uint32_t *buf_id,
1774                                      unsigned long backup_offset)
1775 {
1776         struct vmw_buffer_object *vbo;
1777         void *info;
1778         int ret;
1779
1780         info = vmw_execbuf_info_from_res(sw_context, res);
1781         if (!info)
1782                 return -EINVAL;
1783
1784         ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
1785         if (ret)
1786                 return ret;
1787
1788         vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
1789                                          backup_offset);
1790         return 0;
1791 }
1792
1793
1794 /**
1795  * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1796  *
1797  * @dev_priv: Pointer to a device private struct.
1798  * @sw_context: The software context being used for this batch.
1799  * @res_type: The resource type.
1800  * @converter: Information about user-space binding for this resource type.
1801  * @res_id: Pointer to the user-space resource handle in the command stream.
1802  * @buf_id: Pointer to the user-space backup buffer handle in the command
1803  * stream.
1804  * @backup_offset: Offset of backup into MOB.
1805  *
1806  * This function prepares for registering a switch of backup buffers
1807  * in the resource metadata just prior to unreserving. It's basically a wrapper
1808  * around vmw_cmd_res_switch_backup with a different interface.
1809  */
1810 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1811                                  struct vmw_sw_context *sw_context,
1812                                  enum vmw_res_type res_type,
1813                                  const struct vmw_user_resource_conv
1814                                  *converter,
1815                                  uint32_t *res_id,
1816                                  uint32_t *buf_id,
1817                                  unsigned long backup_offset)
1818 {
1819         struct vmw_resource *res;
1820         int ret;
1821
1822         ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1823                                 converter, res_id, &res);
1824         if (ret)
1825                 return ret;
1826
1827         return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
1828                                          buf_id, backup_offset);
1829 }
1830
1831 /**
1832  * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1833  * command
1834  *
1835  * @dev_priv: Pointer to a device private struct.
1836  * @sw_context: The software context being used for this batch.
1837  * @header: Pointer to the command header in the command stream.
1838  */
1839 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1840                                    struct vmw_sw_context *sw_context,
1841                                    SVGA3dCmdHeader *header)
1842 {
1843         struct vmw_bind_gb_surface_cmd {
1844                 SVGA3dCmdHeader header;
1845                 SVGA3dCmdBindGBSurface body;
1846         } *cmd;
1847
1848         cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
1849
1850         return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1851                                      user_surface_converter,
1852                                      &cmd->body.sid, &cmd->body.mobid,
1853                                      0);
1854 }
1855
1856 /**
1857  * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1858  * command
1859  *
1860  * @dev_priv: Pointer to a device private struct.
1861  * @sw_context: The software context being used for this batch.
1862  * @header: Pointer to the command header in the command stream.
1863  */
1864 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1865                                    struct vmw_sw_context *sw_context,
1866                                    SVGA3dCmdHeader *header)
1867 {
1868         struct vmw_gb_surface_cmd {
1869                 SVGA3dCmdHeader header;
1870                 SVGA3dCmdUpdateGBImage body;
1871         } *cmd;
1872
1873         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1874
1875         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1876                                  user_surface_converter,
1877                                  &cmd->body.image.sid, NULL);
1878 }
1879
1880 /**
1881  * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1882  * command
1883  *
1884  * @dev_priv: Pointer to a device private struct.
1885  * @sw_context: The software context being used for this batch.
1886  * @header: Pointer to the command header in the command stream.
1887  */
1888 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1889                                      struct vmw_sw_context *sw_context,
1890                                      SVGA3dCmdHeader *header)
1891 {
1892         struct vmw_gb_surface_cmd {
1893                 SVGA3dCmdHeader header;
1894                 SVGA3dCmdUpdateGBSurface body;
1895         } *cmd;
1896
1897         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1898
1899         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1900                                  user_surface_converter,
1901                                  &cmd->body.sid, NULL);
1902 }
1903
1904 /**
1905  * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
1906  * command
1907  *
1908  * @dev_priv: Pointer to a device private struct.
1909  * @sw_context: The software context being used for this batch.
1910  * @header: Pointer to the command header in the command stream.
1911  */
1912 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1913                                      struct vmw_sw_context *sw_context,
1914                                      SVGA3dCmdHeader *header)
1915 {
1916         struct vmw_gb_surface_cmd {
1917                 SVGA3dCmdHeader header;
1918                 SVGA3dCmdReadbackGBImage body;
1919         } *cmd;
1920
1921         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1922
1923         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1924                                  user_surface_converter,
1925                                  &cmd->body.image.sid, NULL);
1926 }
1927
1928 /**
1929  * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
1930  * command
1931  *
1932  * @dev_priv: Pointer to a device private struct.
1933  * @sw_context: The software context being used for this batch.
1934  * @header: Pointer to the command header in the command stream.
1935  */
1936 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1937                                        struct vmw_sw_context *sw_context,
1938                                        SVGA3dCmdHeader *header)
1939 {
1940         struct vmw_gb_surface_cmd {
1941                 SVGA3dCmdHeader header;
1942                 SVGA3dCmdReadbackGBSurface body;
1943         } *cmd;
1944
1945         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1946
1947         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1948                                  user_surface_converter,
1949                                  &cmd->body.sid, NULL);
1950 }
1951
1952 /**
1953  * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1954  * command
1955  *
1956  * @dev_priv: Pointer to a device private struct.
1957  * @sw_context: The software context being used for this batch.
1958  * @header: Pointer to the command header in the command stream.
1959  */
1960 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1961                                        struct vmw_sw_context *sw_context,
1962                                        SVGA3dCmdHeader *header)
1963 {
1964         struct vmw_gb_surface_cmd {
1965                 SVGA3dCmdHeader header;
1966                 SVGA3dCmdInvalidateGBImage body;
1967         } *cmd;
1968
1969         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1970
1971         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1972                                  user_surface_converter,
1973                                  &cmd->body.image.sid, NULL);
1974 }
1975
1976 /**
1977  * vmw_cmd_invalidate_gb_surface - Validate an
1978  * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
1979  *
1980  * @dev_priv: Pointer to a device private struct.
1981  * @sw_context: The software context being used for this batch.
1982  * @header: Pointer to the command header in the command stream.
1983  */
1984 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1985                                          struct vmw_sw_context *sw_context,
1986                                          SVGA3dCmdHeader *header)
1987 {
1988         struct vmw_gb_surface_cmd {
1989                 SVGA3dCmdHeader header;
1990                 SVGA3dCmdInvalidateGBSurface body;
1991         } *cmd;
1992
1993         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1994
1995         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1996                                  user_surface_converter,
1997                                  &cmd->body.sid, NULL);
1998 }
1999
2000
2001 /**
2002  * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
2003  * command
2004  *
2005  * @dev_priv: Pointer to a device private struct.
2006  * @sw_context: The software context being used for this batch.
2007  * @header: Pointer to the command header in the command stream.
2008  */
2009 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
2010                                  struct vmw_sw_context *sw_context,
2011                                  SVGA3dCmdHeader *header)
2012 {
2013         struct vmw_shader_define_cmd {
2014                 SVGA3dCmdHeader header;
2015                 SVGA3dCmdDefineShader body;
2016         } *cmd;
2017         int ret;
2018         size_t size;
2019         struct vmw_resource *ctx;
2020
2021         cmd = container_of(header, struct vmw_shader_define_cmd,
2022                            header);
2023
2024         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2025                                 user_context_converter, &cmd->body.cid,
2026                                 &ctx);
2027         if (unlikely(ret != 0))
2028                 return ret;
2029
2030         if (unlikely(!dev_priv->has_mob))
2031                 return 0;
2032
2033         size = cmd->header.size - sizeof(cmd->body);
2034         ret = vmw_compat_shader_add(dev_priv,
2035                                     vmw_context_res_man(ctx),
2036                                     cmd->body.shid, cmd + 1,
2037                                     cmd->body.type, size,
2038                                     &sw_context->staged_cmd_res);
2039         if (unlikely(ret != 0))
2040                 return ret;
2041
2042         return vmw_resource_relocation_add(sw_context,
2043                                            NULL,
2044                                            vmw_ptr_diff(sw_context->buf_start,
2045                                                         &cmd->header.id),
2046                                            vmw_res_rel_nop);
2047 }
2048
2049 /**
2050  * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
2051  * command
2052  *
2053  * @dev_priv: Pointer to a device private struct.
2054  * @sw_context: The software context being used for this batch.
2055  * @header: Pointer to the command header in the command stream.
2056  */
2057 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
2058                                   struct vmw_sw_context *sw_context,
2059                                   SVGA3dCmdHeader *header)
2060 {
2061         struct vmw_shader_destroy_cmd {
2062                 SVGA3dCmdHeader header;
2063                 SVGA3dCmdDestroyShader body;
2064         } *cmd;
2065         int ret;
2066         struct vmw_resource *ctx;
2067
2068         cmd = container_of(header, struct vmw_shader_destroy_cmd,
2069                            header);
2070
2071         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2072                                 user_context_converter, &cmd->body.cid,
2073                                 &ctx);
2074         if (unlikely(ret != 0))
2075                 return ret;
2076
2077         if (unlikely(!dev_priv->has_mob))
2078                 return 0;
2079
2080         ret = vmw_shader_remove(vmw_context_res_man(ctx),
2081                                 cmd->body.shid,
2082                                 cmd->body.type,
2083                                 &sw_context->staged_cmd_res);
2084         if (unlikely(ret != 0))
2085                 return ret;
2086
2087         return vmw_resource_relocation_add(sw_context,
2088                                            NULL,
2089                                            vmw_ptr_diff(sw_context->buf_start,
2090                                                         &cmd->header.id),
2091                                            vmw_res_rel_nop);
2092 }
2093
2094 /**
2095  * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
2096  * command
2097  *
2098  * @dev_priv: Pointer to a device private struct.
2099  * @sw_context: The software context being used for this batch.
2100  * @header: Pointer to the command header in the command stream.
2101  */
2102 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
2103                               struct vmw_sw_context *sw_context,
2104                               SVGA3dCmdHeader *header)
2105 {
2106         struct vmw_set_shader_cmd {
2107                 SVGA3dCmdHeader header;
2108                 SVGA3dCmdSetShader body;
2109         } *cmd;
2110         struct vmw_ctx_bindinfo_shader binding;
2111         struct vmw_resource *ctx, *res = NULL;
2112         struct vmw_ctx_validation_info *ctx_info;
2113         int ret;
2114
2115         cmd = container_of(header, struct vmw_set_shader_cmd,
2116                            header);
2117
2118         if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
2119                 DRM_ERROR("Illegal shader type %u.\n",
2120                           (unsigned) cmd->body.type);
2121                 return -EINVAL;
2122         }
2123
2124         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2125                                 user_context_converter, &cmd->body.cid,
2126                                 &ctx);
2127         if (unlikely(ret != 0))
2128                 return ret;
2129
2130         if (!dev_priv->has_mob)
2131                 return 0;
2132
2133         if (cmd->body.shid != SVGA3D_INVALID_ID) {
2134                 res = vmw_shader_lookup(vmw_context_res_man(ctx),
2135                                         cmd->body.shid,
2136                                         cmd->body.type);
2137
2138                 if (!IS_ERR(res)) {
2139                         ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
2140                         if (unlikely(ret != 0))
2141                                 return ret;
2142                 }
2143         }
2144
2145         if (IS_ERR_OR_NULL(res)) {
2146                 ret = vmw_cmd_res_check(dev_priv, sw_context,
2147                                         vmw_res_shader,
2148                                         user_shader_converter,
2149                                         &cmd->body.shid, &res);
2150                 if (unlikely(ret != 0))
2151                         return ret;
2152         }
2153
2154         ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
2155         if (!ctx_info)
2156                 return -EINVAL;
2157
2158         binding.bi.ctx = ctx;
2159         binding.bi.res = res;
2160         binding.bi.bt = vmw_ctx_binding_shader;
2161         binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2162         vmw_binding_add(ctx_info->staged, &binding.bi,
2163                         binding.shader_slot, 0);
2164         return 0;
2165 }
2166
2167 /**
2168  * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
2169  * command
2170  *
2171  * @dev_priv: Pointer to a device private struct.
2172  * @sw_context: The software context being used for this batch.
2173  * @header: Pointer to the command header in the command stream.
2174  */
2175 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2176                                     struct vmw_sw_context *sw_context,
2177                                     SVGA3dCmdHeader *header)
2178 {
2179         struct vmw_set_shader_const_cmd {
2180                 SVGA3dCmdHeader header;
2181                 SVGA3dCmdSetShaderConst body;
2182         } *cmd;
2183         int ret;
2184
2185         cmd = container_of(header, struct vmw_set_shader_const_cmd,
2186                            header);
2187
2188         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2189                                 user_context_converter, &cmd->body.cid,
2190                                 NULL);
2191         if (unlikely(ret != 0))
2192                 return ret;
2193
2194         if (dev_priv->has_mob)
2195                 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2196
2197         return 0;
2198 }
2199
2200 /**
2201  * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
2202  * command
2203  *
2204  * @dev_priv: Pointer to a device private struct.
2205  * @sw_context: The software context being used for this batch.
2206  * @header: Pointer to the command header in the command stream.
2207  */
2208 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2209                                   struct vmw_sw_context *sw_context,
2210                                   SVGA3dCmdHeader *header)
2211 {
2212         struct vmw_bind_gb_shader_cmd {
2213                 SVGA3dCmdHeader header;
2214                 SVGA3dCmdBindGBShader body;
2215         } *cmd;
2216
2217         cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
2218                            header);
2219
2220         return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2221                                      user_shader_converter,
2222                                      &cmd->body.shid, &cmd->body.mobid,
2223                                      cmd->body.offsetInBytes);
2224 }
2225
2226 /**
2227  * vmw_cmd_dx_set_single_constant_buffer - Validate an
2228  * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2229  *
2230  * @dev_priv: Pointer to a device private struct.
2231  * @sw_context: The software context being used for this batch.
2232  * @header: Pointer to the command header in the command stream.
2233  */
2234 static int
2235 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2236                                       struct vmw_sw_context *sw_context,
2237                                       SVGA3dCmdHeader *header)
2238 {
2239         struct {
2240                 SVGA3dCmdHeader header;
2241                 SVGA3dCmdDXSetSingleConstantBuffer body;
2242         } *cmd;
2243         struct vmw_resource *res = NULL;
2244         struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2245         struct vmw_ctx_bindinfo_cb binding;
2246         int ret;
2247
2248         if (unlikely(ctx_node == NULL)) {
2249                 DRM_ERROR("DX Context not set.\n");
2250                 return -EINVAL;
2251         }
2252
2253         cmd = container_of(header, typeof(*cmd), header);
2254         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2255                                 user_surface_converter,
2256                                 &cmd->body.sid, &res);
2257         if (unlikely(ret != 0))
2258                 return ret;
2259
2260         binding.bi.ctx = ctx_node->ctx;
2261         binding.bi.res = res;
2262         binding.bi.bt = vmw_ctx_binding_cb;
2263         binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2264         binding.offset = cmd->body.offsetInBytes;
2265         binding.size = cmd->body.sizeInBytes;
2266         binding.slot = cmd->body.slot;
2267
2268         if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
2269             binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2270                 DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
2271                           (unsigned) cmd->body.type,
2272                           (unsigned) binding.slot);
2273                 return -EINVAL;
2274         }
2275
2276         vmw_binding_add(ctx_node->staged, &binding.bi,
2277                         binding.shader_slot, binding.slot);
2278
2279         return 0;
2280 }
2281
2282 /**
2283  * vmw_cmd_dx_set_shader_res - Validate an
2284  * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
2285  *
2286  * @dev_priv: Pointer to a device private struct.
2287  * @sw_context: The software context being used for this batch.
2288  * @header: Pointer to the command header in the command stream.
2289  */
2290 static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2291                                      struct vmw_sw_context *sw_context,
2292                                      SVGA3dCmdHeader *header)
2293 {
2294         struct {
2295                 SVGA3dCmdHeader header;
2296                 SVGA3dCmdDXSetShaderResources body;
2297         } *cmd = container_of(header, typeof(*cmd), header);
2298         u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2299                 sizeof(SVGA3dShaderResourceViewId);
2300
2301         if ((u64) cmd->body.startView + (u64) num_sr_view >
2302             (u64) SVGA3D_DX_MAX_SRVIEWS ||
2303             cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2304                 DRM_ERROR("Invalid shader binding.\n");
2305                 return -EINVAL;
2306         }
2307
2308         return vmw_view_bindings_add(sw_context, vmw_view_sr,
2309                                      vmw_ctx_binding_sr,
2310                                      cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2311                                      (void *) &cmd[1], num_sr_view,
2312                                      cmd->body.startView);
2313 }
2314
2315 /**
2316  * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
2317  * command
2318  *
2319  * @dev_priv: Pointer to a device private struct.
2320  * @sw_context: The software context being used for this batch.
2321  * @header: Pointer to the command header in the command stream.
2322  */
2323 static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2324                                  struct vmw_sw_context *sw_context,
2325                                  SVGA3dCmdHeader *header)
2326 {
2327         struct {
2328                 SVGA3dCmdHeader header;
2329                 SVGA3dCmdDXSetShader body;
2330         } *cmd;
2331         struct vmw_resource *res = NULL;
2332         struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2333         struct vmw_ctx_bindinfo_shader binding;
2334         int ret = 0;
2335
2336         if (unlikely(ctx_node == NULL)) {
2337                 DRM_ERROR("DX Context not set.\n");
2338                 return -EINVAL;
2339         }
2340
2341         cmd = container_of(header, typeof(*cmd), header);
2342
2343         if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2344                 DRM_ERROR("Illegal shader type %u.\n",
2345                           (unsigned) cmd->body.type);
2346                 return -EINVAL;
2347         }
2348
2349         if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2350                 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2351                 if (IS_ERR(res)) {
2352                         DRM_ERROR("Could not find shader for binding.\n");
2353                         return PTR_ERR(res);
2354                 }
2355
2356                 ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
2357                 if (ret)
2358                         return ret;
2359         }
2360
2361         binding.bi.ctx = ctx_node->ctx;
2362         binding.bi.res = res;
2363         binding.bi.bt = vmw_ctx_binding_dx_shader;
2364         binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2365
2366         vmw_binding_add(ctx_node->staged, &binding.bi,
2367                         binding.shader_slot, 0);
2368
2369         return 0;
2370 }
2371
2372 /**
2373  * vmw_cmd_dx_set_vertex_buffers - Validates an
2374  * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
2375  *
2376  * @dev_priv: Pointer to a device private struct.
2377  * @sw_context: The software context being used for this batch.
2378  * @header: Pointer to the command header in the command stream.
2379  */
2380 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2381                                          struct vmw_sw_context *sw_context,
2382                                          SVGA3dCmdHeader *header)
2383 {
2384         struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2385         struct vmw_ctx_bindinfo_vb binding;
2386         struct vmw_resource *res;
2387         struct {
2388                 SVGA3dCmdHeader header;
2389                 SVGA3dCmdDXSetVertexBuffers body;
2390                 SVGA3dVertexBuffer buf[];
2391         } *cmd;
2392         int i, ret, num;
2393
2394         if (unlikely(ctx_node == NULL)) {
2395                 DRM_ERROR("DX Context not set.\n");
2396                 return -EINVAL;
2397         }
2398
2399         cmd = container_of(header, typeof(*cmd), header);
2400         num = (cmd->header.size - sizeof(cmd->body)) /
2401                 sizeof(SVGA3dVertexBuffer);
2402         if ((u64)num + (u64)cmd->body.startBuffer >
2403             (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2404                 DRM_ERROR("Invalid number of vertex buffers.\n");
2405                 return -EINVAL;
2406         }
2407
2408         for (i = 0; i < num; i++) {
2409                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2410                                         user_surface_converter,
2411                                         &cmd->buf[i].sid, &res);
2412                 if (unlikely(ret != 0))
2413                         return ret;
2414
2415                 binding.bi.ctx = ctx_node->ctx;
2416                 binding.bi.bt = vmw_ctx_binding_vb;
2417                 binding.bi.res = res;
2418                 binding.offset = cmd->buf[i].offset;
2419                 binding.stride = cmd->buf[i].stride;
2420                 binding.slot = i + cmd->body.startBuffer;
2421
2422                 vmw_binding_add(ctx_node->staged, &binding.bi,
2423                                 0, binding.slot);
2424         }
2425
2426         return 0;
2427 }
2428
2429 /**
2430  * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
2431  * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2432  *
2433  * @dev_priv: Pointer to a device private struct.
2434  * @sw_context: The software context being used for this batch.
2435  * @header: Pointer to the command header in the command stream.
2436  */
2437 static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2438                                        struct vmw_sw_context *sw_context,
2439                                        SVGA3dCmdHeader *header)
2440 {
2441         struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2442         struct vmw_ctx_bindinfo_ib binding;
2443         struct vmw_resource *res;
2444         struct {
2445                 SVGA3dCmdHeader header;
2446                 SVGA3dCmdDXSetIndexBuffer body;
2447         } *cmd;
2448         int ret;
2449
2450         if (unlikely(ctx_node == NULL)) {
2451                 DRM_ERROR("DX Context not set.\n");
2452                 return -EINVAL;
2453         }
2454
2455         cmd = container_of(header, typeof(*cmd), header);
2456         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2457                                 user_surface_converter,
2458                                 &cmd->body.sid, &res);
2459         if (unlikely(ret != 0))
2460                 return ret;
2461
2462         binding.bi.ctx = ctx_node->ctx;
2463         binding.bi.res = res;
2464         binding.bi.bt = vmw_ctx_binding_ib;
2465         binding.offset = cmd->body.offset;
2466         binding.format = cmd->body.format;
2467
2468         vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
2469
2470         return 0;
2471 }
2472
2473 /**
2474  * vmw_cmd_dx_set_rendertarget - Validate an
2475  * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
2476  *
2477  * @dev_priv: Pointer to a device private struct.
2478  * @sw_context: The software context being used for this batch.
2479  * @header: Pointer to the command header in the command stream.
2480  */
2481 static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2482                                         struct vmw_sw_context *sw_context,
2483                                         SVGA3dCmdHeader *header)
2484 {
2485         struct {
2486                 SVGA3dCmdHeader header;
2487                 SVGA3dCmdDXSetRenderTargets body;
2488         } *cmd = container_of(header, typeof(*cmd), header);
2489         int ret;
2490         u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2491                 sizeof(SVGA3dRenderTargetViewId);
2492
2493         if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2494                 DRM_ERROR("Invalid DX Rendertarget binding.\n");
2495                 return -EINVAL;
2496         }
2497
2498         ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
2499                                     vmw_ctx_binding_ds, 0,
2500                                     &cmd->body.depthStencilViewId, 1, 0);
2501         if (ret)
2502                 return ret;
2503
2504         return vmw_view_bindings_add(sw_context, vmw_view_rt,
2505                                      vmw_ctx_binding_dx_rt, 0,
2506                                      (void *)&cmd[1], num_rt_view, 0);
2507 }
2508
2509 /**
2510  * vmw_cmd_dx_clear_rendertarget_view - Validate an
2511  * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2512  *
2513  * @dev_priv: Pointer to a device private struct.
2514  * @sw_context: The software context being used for this batch.
2515  * @header: Pointer to the command header in the command stream.
2516  */
2517 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2518                                               struct vmw_sw_context *sw_context,
2519                                               SVGA3dCmdHeader *header)
2520 {
2521         struct {
2522                 SVGA3dCmdHeader header;
2523                 SVGA3dCmdDXClearRenderTargetView body;
2524         } *cmd = container_of(header, typeof(*cmd), header);
2525
2526         return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_rt,
2527                                            cmd->body.renderTargetViewId));
2528 }
2529
2530 /**
2531  * vmw_cmd_dx_clear_rendertarget_view - Validate an
2532  * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2533  *
2534  * @dev_priv: Pointer to a device private struct.
2535  * @sw_context: The software context being used for this batch.
2536  * @header: Pointer to the command header in the command stream.
2537  */
2538 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2539                                               struct vmw_sw_context *sw_context,
2540                                               SVGA3dCmdHeader *header)
2541 {
2542         struct {
2543                 SVGA3dCmdHeader header;
2544                 SVGA3dCmdDXClearDepthStencilView body;
2545         } *cmd = container_of(header, typeof(*cmd), header);
2546
2547         return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_ds,
2548                                            cmd->body.depthStencilViewId));
2549 }
2550
2551 static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2552                                   struct vmw_sw_context *sw_context,
2553                                   SVGA3dCmdHeader *header)
2554 {
2555         struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2556         struct vmw_resource *srf;
2557         struct vmw_resource *res;
2558         enum vmw_view_type view_type;
2559         int ret;
2560         /*
2561          * This is based on the fact that all affected define commands have
2562          * the same initial command body layout.
2563          */
2564         struct {
2565                 SVGA3dCmdHeader header;
2566                 uint32 defined_id;
2567                 uint32 sid;
2568         } *cmd;
2569
2570         if (unlikely(ctx_node == NULL)) {
2571                 DRM_ERROR("DX Context not set.\n");
2572                 return -EINVAL;
2573         }
2574
2575         view_type = vmw_view_cmd_to_type(header->id);
2576         if (view_type == vmw_view_max)
2577                 return -EINVAL;
2578         cmd = container_of(header, typeof(*cmd), header);
2579         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2580                                 user_surface_converter,
2581                                 &cmd->sid, &srf);
2582         if (unlikely(ret != 0))
2583                 return ret;
2584
2585         res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
2586         ret = vmw_cotable_notify(res, cmd->defined_id);
2587         if (unlikely(ret != 0))
2588                 return ret;
2589
2590         return vmw_view_add(sw_context->man,
2591                             ctx_node->ctx,
2592                             srf,
2593                             view_type,
2594                             cmd->defined_id,
2595                             header,
2596                             header->size + sizeof(*header),
2597                             &sw_context->staged_cmd_res);
2598 }
2599
2600 /**
2601  * vmw_cmd_dx_set_so_targets - Validate an
2602  * SVGA_3D_CMD_DX_SET_SOTARGETS command.
2603  *
2604  * @dev_priv: Pointer to a device private struct.
2605  * @sw_context: The software context being used for this batch.
2606  * @header: Pointer to the command header in the command stream.
2607  */
2608 static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2609                                      struct vmw_sw_context *sw_context,
2610                                      SVGA3dCmdHeader *header)
2611 {
2612         struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2613         struct vmw_ctx_bindinfo_so binding;
2614         struct vmw_resource *res;
2615         struct {
2616                 SVGA3dCmdHeader header;
2617                 SVGA3dCmdDXSetSOTargets body;
2618                 SVGA3dSoTarget targets[];
2619         } *cmd;
2620         int i, ret, num;
2621
2622         if (unlikely(ctx_node == NULL)) {
2623                 DRM_ERROR("DX Context not set.\n");
2624                 return -EINVAL;
2625         }
2626
2627         cmd = container_of(header, typeof(*cmd), header);
2628         num = (cmd->header.size - sizeof(cmd->body)) /
2629                 sizeof(SVGA3dSoTarget);
2630
2631         if (num > SVGA3D_DX_MAX_SOTARGETS) {
2632                 DRM_ERROR("Invalid DX SO binding.\n");
2633                 return -EINVAL;
2634         }
2635
2636         for (i = 0; i < num; i++) {
2637                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2638                                         user_surface_converter,
2639                                         &cmd->targets[i].sid, &res);
2640                 if (unlikely(ret != 0))
2641                         return ret;
2642
2643                 binding.bi.ctx = ctx_node->ctx;
2644                 binding.bi.res = res;
2645                 binding.bi.bt = vmw_ctx_binding_so,
2646                 binding.offset = cmd->targets[i].offset;
2647                 binding.size = cmd->targets[i].sizeInBytes;
2648                 binding.slot = i;
2649
2650                 vmw_binding_add(ctx_node->staged, &binding.bi,
2651                                 0, binding.slot);
2652         }
2653
2654         return 0;
2655 }
2656
2657 static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2658                                 struct vmw_sw_context *sw_context,
2659                                 SVGA3dCmdHeader *header)
2660 {
2661         struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2662         struct vmw_resource *res;
2663         /*
2664          * This is based on the fact that all affected define commands have
2665          * the same initial command body layout.
2666          */
2667         struct {
2668                 SVGA3dCmdHeader header;
2669                 uint32 defined_id;
2670         } *cmd;
2671         enum vmw_so_type so_type;
2672         int ret;
2673
2674         if (unlikely(ctx_node == NULL)) {
2675                 DRM_ERROR("DX Context not set.\n");
2676                 return -EINVAL;
2677         }
2678
2679         so_type = vmw_so_cmd_to_type(header->id);
2680         res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
2681         cmd = container_of(header, typeof(*cmd), header);
2682         ret = vmw_cotable_notify(res, cmd->defined_id);
2683
2684         return ret;
2685 }
2686
2687 /**
2688  * vmw_cmd_dx_check_subresource - Validate an
2689  * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
2690  *
2691  * @dev_priv: Pointer to a device private struct.
2692  * @sw_context: The software context being used for this batch.
2693  * @header: Pointer to the command header in the command stream.
2694  */
2695 static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2696                                         struct vmw_sw_context *sw_context,
2697                                         SVGA3dCmdHeader *header)
2698 {
2699         struct {
2700                 SVGA3dCmdHeader header;
2701                 union {
2702                         SVGA3dCmdDXReadbackSubResource r_body;
2703                         SVGA3dCmdDXInvalidateSubResource i_body;
2704                         SVGA3dCmdDXUpdateSubResource u_body;
2705                         SVGA3dSurfaceId sid;
2706                 };
2707         } *cmd;
2708
2709         BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2710                      offsetof(typeof(*cmd), sid));
2711         BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2712                      offsetof(typeof(*cmd), sid));
2713         BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2714                      offsetof(typeof(*cmd), sid));
2715
2716         cmd = container_of(header, typeof(*cmd), header);
2717
2718         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2719                                  user_surface_converter,
2720                                  &cmd->sid, NULL);
2721 }
2722
2723 static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2724                                 struct vmw_sw_context *sw_context,
2725                                 SVGA3dCmdHeader *header)
2726 {
2727         struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2728
2729         if (unlikely(ctx_node == NULL)) {
2730                 DRM_ERROR("DX Context not set.\n");
2731                 return -EINVAL;
2732         }
2733
2734         return 0;
2735 }
2736
2737 /**
2738  * vmw_cmd_dx_view_remove - validate a view remove command and
2739  * schedule the view resource for removal.
2740  *
2741  * @dev_priv: Pointer to a device private struct.
2742  * @sw_context: The software context being used for this batch.
2743  * @header: Pointer to the command header in the command stream.
2744  *
2745  * Check that the view exists, and if it was not created using this
2746  * command batch, conditionally make this command a NOP.
2747  */
2748 static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2749                                   struct vmw_sw_context *sw_context,
2750                                   SVGA3dCmdHeader *header)
2751 {
2752         struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2753         struct {
2754                 SVGA3dCmdHeader header;
2755                 union vmw_view_destroy body;
2756         } *cmd = container_of(header, typeof(*cmd), header);
2757         enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2758         struct vmw_resource *view;
2759         int ret;
2760
2761         if (!ctx_node) {
2762                 DRM_ERROR("DX Context not set.\n");
2763                 return -EINVAL;
2764         }
2765
2766         ret = vmw_view_remove(sw_context->man,
2767                               cmd->body.view_id, view_type,
2768                               &sw_context->staged_cmd_res,
2769                               &view);
2770         if (ret || !view)
2771                 return ret;
2772
2773         /*
2774          * If the view wasn't created during this command batch, it might
2775          * have been removed due to a context swapout, so add a
2776          * relocation to conditionally make this command a NOP to avoid
2777          * device errors.
2778          */
2779         return vmw_resource_relocation_add(sw_context,
2780                                            view,
2781                                            vmw_ptr_diff(sw_context->buf_start,
2782                                                         &cmd->header.id),
2783                                            vmw_res_rel_cond_nop);
2784 }
2785
2786 /**
2787  * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
2788  * command
2789  *
2790  * @dev_priv: Pointer to a device private struct.
2791  * @sw_context: The software context being used for this batch.
2792  * @header: Pointer to the command header in the command stream.
2793  */
2794 static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2795                                     struct vmw_sw_context *sw_context,
2796                                     SVGA3dCmdHeader *header)
2797 {
2798         struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2799         struct vmw_resource *res;
2800         struct {
2801                 SVGA3dCmdHeader header;
2802                 SVGA3dCmdDXDefineShader body;
2803         } *cmd = container_of(header, typeof(*cmd), header);
2804         int ret;
2805
2806         if (!ctx_node) {
2807                 DRM_ERROR("DX Context not set.\n");
2808                 return -EINVAL;
2809         }
2810
2811         res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
2812         ret = vmw_cotable_notify(res, cmd->body.shaderId);
2813         if (ret)
2814                 return ret;
2815
2816         return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
2817                                  cmd->body.shaderId, cmd->body.type,
2818                                  &sw_context->staged_cmd_res);
2819 }
2820
2821 /**
2822  * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
2823  * command
2824  *
2825  * @dev_priv: Pointer to a device private struct.
2826  * @sw_context: The software context being used for this batch.
2827  * @header: Pointer to the command header in the command stream.
2828  */
2829 static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2830                                      struct vmw_sw_context *sw_context,
2831                                      SVGA3dCmdHeader *header)
2832 {
2833         struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2834         struct {
2835                 SVGA3dCmdHeader header;
2836                 SVGA3dCmdDXDestroyShader body;
2837         } *cmd = container_of(header, typeof(*cmd), header);
2838         int ret;
2839
2840         if (!ctx_node) {
2841                 DRM_ERROR("DX Context not set.\n");
2842                 return -EINVAL;
2843         }
2844
2845         ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2846                                 &sw_context->staged_cmd_res);
2847         if (ret)
2848                 DRM_ERROR("Could not find shader to remove.\n");
2849
2850         return ret;
2851 }
2852
2853 /**
2854  * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
2855  * command
2856  *
2857  * @dev_priv: Pointer to a device private struct.
2858  * @sw_context: The software context being used for this batch.
2859  * @header: Pointer to the command header in the command stream.
2860  */
2861 static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2862                                   struct vmw_sw_context *sw_context,
2863                                   SVGA3dCmdHeader *header)
2864 {
2865         struct vmw_resource *ctx;
2866         struct vmw_resource *res;
2867         struct {
2868                 SVGA3dCmdHeader header;
2869                 SVGA3dCmdDXBindShader body;
2870         } *cmd = container_of(header, typeof(*cmd), header);
2871         int ret;
2872
2873         if (cmd->body.cid != SVGA3D_INVALID_ID) {
2874                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2875                                         user_context_converter,
2876                                         &cmd->body.cid, &ctx);
2877                 if (ret)
2878                         return ret;
2879         } else {
2880                 if (!sw_context->dx_ctx_node) {
2881                         DRM_ERROR("DX Context not set.\n");
2882                         return -EINVAL;
2883                 }
2884                 ctx = sw_context->dx_ctx_node->ctx;
2885         }
2886
2887         res = vmw_shader_lookup(vmw_context_res_man(ctx),
2888                                 cmd->body.shid, 0);
2889         if (IS_ERR(res)) {
2890                 DRM_ERROR("Could not find shader to bind.\n");
2891                 return PTR_ERR(res);
2892         }
2893
2894         ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
2895         if (ret) {
2896                 DRM_ERROR("Error creating resource validation node.\n");
2897                 return ret;
2898         }
2899
2900         return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
2901                                          &cmd->body.mobid,
2902                                          cmd->body.offsetInBytes);
2903 }
2904
2905 /**
2906  * vmw_cmd_dx_genmips - Validate an SVGA_3D_CMD_DX_GENMIPS command
2907  *
2908  * @dev_priv: Pointer to a device private struct.
2909  * @sw_context: The software context being used for this batch.
2910  * @header: Pointer to the command header in the command stream.
2911  */
2912 static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
2913                               struct vmw_sw_context *sw_context,
2914                               SVGA3dCmdHeader *header)
2915 {
2916         struct {
2917                 SVGA3dCmdHeader header;
2918                 SVGA3dCmdDXGenMips body;
2919         } *cmd = container_of(header, typeof(*cmd), header);
2920
2921         return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_sr,
2922                                            cmd->body.shaderResourceViewId));
2923 }
2924
2925 /**
2926  * vmw_cmd_dx_transfer_from_buffer -
2927  * Validate an SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
2928  *
2929  * @dev_priv: Pointer to a device private struct.
2930  * @sw_context: The software context being used for this batch.
2931  * @header: Pointer to the command header in the command stream.
2932  */
2933 static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
2934                                            struct vmw_sw_context *sw_context,
2935                                            SVGA3dCmdHeader *header)
2936 {
2937         struct {
2938                 SVGA3dCmdHeader header;
2939                 SVGA3dCmdDXTransferFromBuffer body;
2940         } *cmd = container_of(header, typeof(*cmd), header);
2941         int ret;
2942
2943         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2944                                 user_surface_converter,
2945                                 &cmd->body.srcSid, NULL);
2946         if (ret != 0)
2947                 return ret;
2948
2949         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2950                                  user_surface_converter,
2951                                  &cmd->body.destSid, NULL);
2952 }
2953
2954 /**
2955  * vmw_cmd_intra_surface_copy -
2956  * Validate an SVGA_3D_CMD_INTRA_SURFACE_COPY command
2957  *
2958  * @dev_priv: Pointer to a device private struct.
2959  * @sw_context: The software context being used for this batch.
2960  * @header: Pointer to the command header in the command stream.
2961  */
2962 static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
2963                                            struct vmw_sw_context *sw_context,
2964                                            SVGA3dCmdHeader *header)
2965 {
2966         struct {
2967                 SVGA3dCmdHeader header;
2968                 SVGA3dCmdIntraSurfaceCopy body;
2969         } *cmd = container_of(header, typeof(*cmd), header);
2970
2971         if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
2972                 return -EINVAL;
2973
2974         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2975                                 user_surface_converter,
2976                                 &cmd->body.surface.sid, NULL);
2977 }
2978
2979
2980 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
2981                                 struct vmw_sw_context *sw_context,
2982                                 void *buf, uint32_t *size)
2983 {
2984         uint32_t size_remaining = *size;
2985         uint32_t cmd_id;
2986
2987         cmd_id = ((uint32_t *)buf)[0];
2988         switch (cmd_id) {
2989         case SVGA_CMD_UPDATE:
2990                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
2991                 break;
2992         case SVGA_CMD_DEFINE_GMRFB:
2993                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
2994                 break;
2995         case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
2996                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
2997                 break;
2998         case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
2999                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3000                 break;
3001         default:
3002                 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
3003                 return -EINVAL;
3004         }
3005
3006         if (*size > size_remaining) {
3007                 DRM_ERROR("Invalid SVGA command (size mismatch):"
3008                           " %u.\n", cmd_id);
3009                 return -EINVAL;
3010         }
3011
3012         if (unlikely(!sw_context->kernel)) {
3013                 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
3014                 return -EPERM;
3015         }
3016
3017         if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3018                 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3019
3020         return 0;
3021 }
3022
3023 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3024         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3025                     false, false, false),
3026         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3027                     false, false, false),
3028         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3029                     true, false, false),
3030         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3031                     true, false, false),
3032         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3033                     true, false, false),
3034         VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3035                     false, false, false),
3036         VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3037                     false, false, false),
3038         VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3039                     true, false, false),
3040         VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3041                     true, false, false),
3042         VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3043                     true, false, false),
3044         VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3045                     &vmw_cmd_set_render_target_check, true, false, false),
3046         VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3047                     true, false, false),
3048         VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3049                     true, false, false),
3050         VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3051                     true, false, false),
3052         VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3053                     true, false, false),
3054         VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3055                     true, false, false),
3056         VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3057                     true, false, false),
3058         VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3059                     true, false, false),
3060         VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3061                     false, false, false),
3062         VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3063                     true, false, false),
3064         VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3065                     true, false, false),
3066         VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3067                     true, false, false),
3068         VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3069                     true, false, false),
3070         VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3071                     true, false, false),
3072         VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3073                     true, false, false),
3074         VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3075                     true, false, false),
3076         VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3077                     true, false, false),
3078         VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3079                     true, false, false),
3080         VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3081                     true, false, false),
3082         VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3083                     &vmw_cmd_blt_surf_screen_check, false, false, false),
3084         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3085                     false, false, false),
3086         VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3087                     false, false, false),
3088         VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3089                     false, false, false),
3090         VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3091                     false, false, false),
3092         VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3093                     false, false, false),
3094         VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
3095                     false, false, false),
3096         VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
3097                     false, false, false),
3098         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
3099                     false, false, false),
3100         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
3101                     false, false, false),
3102         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
3103                     false, false, false),
3104         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
3105                     false, false, false),
3106         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
3107                     false, false, false),
3108         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
3109                     false, false, false),
3110         VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3111                     false, false, true),
3112         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3113                     false, false, true),
3114         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3115                     false, false, true),
3116         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3117                     false, false, true),
3118         VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3119                     false, false, true),
3120         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3121                     false, false, true),
3122         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3123                     false, false, true),
3124         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3125                     false, false, true),
3126         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3127                     true, false, true),
3128         VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3129                     false, false, true),
3130         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3131                     true, false, true),
3132         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3133                     &vmw_cmd_update_gb_surface, true, false, true),
3134         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3135                     &vmw_cmd_readback_gb_image, true, false, true),
3136         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3137                     &vmw_cmd_readback_gb_surface, true, false, true),
3138         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3139                     &vmw_cmd_invalidate_gb_image, true, false, true),
3140         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3141                     &vmw_cmd_invalidate_gb_surface, true, false, true),
3142         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3143                     false, false, true),
3144         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3145                     false, false, true),
3146         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3147                     false, false, true),
3148         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3149                     false, false, true),
3150         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3151                     false, false, true),
3152         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3153                     false, false, true),
3154         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3155                     true, false, true),
3156         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3157                     false, false, true),
3158         VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3159                     false, false, false),
3160         VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3161                     true, false, true),
3162         VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3163                     true, false, true),
3164         VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3165                     true, false, true),
3166         VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3167                     true, false, true),
3168         VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
3169                     true, false, true),
3170         VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3171                     false, false, true),
3172         VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3173                     false, false, true),
3174         VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3175                     false, false, true),
3176         VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3177                     false, false, true),
3178         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3179                     false, false, true),
3180         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3181                     false, false, true),
3182         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3183                     false, false, true),
3184         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3185                     false, false, true),
3186         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3187                     false, false, true),
3188         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3189                     false, false, true),
3190         VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3191                     true, false, true),
3192         VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3193                     false, false, true),
3194         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3195                     false, false, true),
3196         VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3197                     false, false, true),
3198         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3199                     false, false, true),
3200
3201         /*
3202          * DX commands
3203          */
3204         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3205                     false, false, true),
3206         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3207                     false, false, true),
3208         VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3209                     false, false, true),
3210         VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3211                     false, false, true),
3212         VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3213                     false, false, true),
3214         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3215                     &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3216         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3217                     &vmw_cmd_dx_set_shader_res, true, false, true),
3218         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3219                     true, false, true),
3220         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3221                     true, false, true),
3222         VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3223                     true, false, true),
3224         VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3225                     true, false, true),
3226         VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3227                     true, false, true),
3228         VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3229                     &vmw_cmd_dx_cid_check, true, false, true),
3230         VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3231                     true, false, true),
3232         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3233                     &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3234         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3235                     &vmw_cmd_dx_set_index_buffer, true, false, true),
3236         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3237                     &vmw_cmd_dx_set_rendertargets, true, false, true),
3238         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3239                     true, false, true),
3240         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3241                     &vmw_cmd_dx_cid_check, true, false, true),
3242         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3243                     &vmw_cmd_dx_cid_check, true, false, true),
3244         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3245                     true, false, true),
3246         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3247                     true, false, true),
3248         VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3249                     true, false, true),
3250         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3251                     &vmw_cmd_dx_cid_check, true, false, true),
3252         VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3253                     true, false, true),
3254         VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3255                     true, false, true),
3256         VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3257                     true, false, true),
3258         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3259                     true, false, true),
3260         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3261                     true, false, true),
3262         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3263                     true, false, true),
3264         VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3265                     &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3266         VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3267                     &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3268         VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3269                     true, false, true),
3270         VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3271                     true, false, true),
3272         VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3273                     &vmw_cmd_dx_check_subresource, true, false, true),
3274         VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3275                     &vmw_cmd_dx_check_subresource, true, false, true),
3276         VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3277                     &vmw_cmd_dx_check_subresource, true, false, true),
3278         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3279                     &vmw_cmd_dx_view_define, true, false, true),
3280         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3281                     &vmw_cmd_dx_view_remove, true, false, true),
3282         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3283                     &vmw_cmd_dx_view_define, true, false, true),
3284         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3285                     &vmw_cmd_dx_view_remove, true, false, true),
3286         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3287                     &vmw_cmd_dx_view_define, true, false, true),
3288         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3289                     &vmw_cmd_dx_view_remove, true, false, true),
3290         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3291                     &vmw_cmd_dx_so_define, true, false, true),
3292         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3293                     &vmw_cmd_dx_cid_check, true, false, true),
3294         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3295                     &vmw_cmd_dx_so_define, true, false, true),
3296         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3297                     &vmw_cmd_dx_cid_check, true, false, true),
3298         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3299                     &vmw_cmd_dx_so_define, true, false, true),
3300         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3301                     &vmw_cmd_dx_cid_check, true, false, true),
3302         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3303                     &vmw_cmd_dx_so_define, true, false, true),
3304         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3305                     &vmw_cmd_dx_cid_check, true, false, true),
3306         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3307                     &vmw_cmd_dx_so_define, true, false, true),
3308         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3309                     &vmw_cmd_dx_cid_check, true, false, true),
3310         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3311                     &vmw_cmd_dx_define_shader, true, false, true),
3312         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3313                     &vmw_cmd_dx_destroy_shader, true, false, true),
3314         VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3315                     &vmw_cmd_dx_bind_shader, true, false, true),
3316         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3317                     &vmw_cmd_dx_so_define, true, false, true),
3318         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3319                     &vmw_cmd_dx_cid_check, true, false, true),
3320         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
3321                     true, false, true),
3322         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3323                     &vmw_cmd_dx_set_so_targets, true, false, true),
3324         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3325                     &vmw_cmd_dx_cid_check, true, false, true),
3326         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3327                     &vmw_cmd_dx_cid_check, true, false, true),
3328         VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3329                     &vmw_cmd_buffer_copy_check, true, false, true),
3330         VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3331                     &vmw_cmd_pred_copy_check, true, false, true),
3332         VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3333                     &vmw_cmd_dx_transfer_from_buffer,
3334                     true, false, true),
3335         VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
3336                     true, false, true),
3337 };
3338
3339 bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3340 {
3341         u32 cmd_id = ((u32 *) buf)[0];
3342
3343         if (cmd_id >= SVGA_CMD_MAX) {
3344                 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3345                 const struct vmw_cmd_entry *entry;
3346
3347                 *size = header->size + sizeof(SVGA3dCmdHeader);
3348                 cmd_id = header->id;
3349                 if (cmd_id >= SVGA_3D_CMD_MAX)
3350                         return false;
3351
3352                 cmd_id -= SVGA_3D_CMD_BASE;
3353                 entry = &vmw_cmd_entries[cmd_id];
3354                 *cmd = entry->cmd_name;
3355                 return true;
3356         }
3357
3358         switch (cmd_id) {
3359         case SVGA_CMD_UPDATE:
3360                 *cmd = "SVGA_CMD_UPDATE";
3361                 *size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3362                 break;
3363         case SVGA_CMD_DEFINE_GMRFB:
3364                 *cmd = "SVGA_CMD_DEFINE_GMRFB";
3365                 *size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3366                 break;
3367         case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3368                 *cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3369                 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3370                 break;
3371         case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3372                 *cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3373                 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3374                 break;
3375         default:
3376                 *cmd = "UNKNOWN";
3377                 *size = 0;
3378                 return false;
3379         }
3380
3381         return true;
3382 }
3383
3384 static int vmw_cmd_check(struct vmw_private *dev_priv,
3385                          struct vmw_sw_context *sw_context,
3386                          void *buf, uint32_t *size)
3387 {
3388         uint32_t cmd_id;
3389         uint32_t size_remaining = *size;
3390         SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3391         int ret;
3392         const struct vmw_cmd_entry *entry;
3393         bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3394
3395         cmd_id = ((uint32_t *)buf)[0];
3396         /* Handle any none 3D commands */
3397         if (unlikely(cmd_id < SVGA_CMD_MAX))
3398                 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3399
3400
3401         cmd_id = header->id;
3402         *size = header->size + sizeof(SVGA3dCmdHeader);
3403
3404         cmd_id -= SVGA_3D_CMD_BASE;
3405         if (unlikely(*size > size_remaining))
3406                 goto out_invalid;
3407
3408         if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3409                 goto out_invalid;
3410
3411         entry = &vmw_cmd_entries[cmd_id];
3412         if (unlikely(!entry->func))
3413                 goto out_invalid;
3414
3415         if (unlikely(!entry->user_allow && !sw_context->kernel))
3416                 goto out_privileged;
3417
3418         if (unlikely(entry->gb_disable && gb))
3419                 goto out_old;
3420
3421         if (unlikely(entry->gb_enable && !gb))
3422                 goto out_new;
3423
3424         ret = entry->func(dev_priv, sw_context, header);
3425         if (unlikely(ret != 0))
3426                 goto out_invalid;
3427
3428         return 0;
3429 out_invalid:
3430         DRM_ERROR("Invalid SVGA3D command: %d\n",
3431                   cmd_id + SVGA_3D_CMD_BASE);
3432         return -EINVAL;
3433 out_privileged:
3434         DRM_ERROR("Privileged SVGA3D command: %d\n",
3435                   cmd_id + SVGA_3D_CMD_BASE);
3436         return -EPERM;
3437 out_old:
3438         DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
3439                   cmd_id + SVGA_3D_CMD_BASE);
3440         return -EINVAL;
3441 out_new:
3442         DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
3443                   cmd_id + SVGA_3D_CMD_BASE);
3444         return -EINVAL;
3445 }
3446
3447 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3448                              struct vmw_sw_context *sw_context,
3449                              void *buf,
3450                              uint32_t size)
3451 {
3452         int32_t cur_size = size;
3453         int ret;
3454
3455         sw_context->buf_start = buf;
3456
3457         while (cur_size > 0) {
3458                 size = cur_size;
3459                 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3460                 if (unlikely(ret != 0))
3461                         return ret;
3462                 buf = (void *)((unsigned long) buf + size);
3463                 cur_size -= size;
3464         }
3465
3466         if (unlikely(cur_size != 0)) {
3467                 DRM_ERROR("Command verifier out of sync.\n");
3468                 return -EINVAL;
3469         }
3470
3471         return 0;
3472 }
3473
3474 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3475 {
3476         /* Memory is validation context memory, so no need to free it */
3477
3478         INIT_LIST_HEAD(&sw_context->bo_relocations);
3479 }
3480
3481 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3482 {
3483         struct vmw_relocation *reloc;
3484         struct ttm_buffer_object *bo;
3485
3486         list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
3487                 bo = &reloc->vbo->base;
3488                 switch (bo->mem.mem_type) {
3489                 case TTM_PL_VRAM:
3490                         reloc->location->offset += bo->offset;
3491                         reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3492                         break;
3493                 case VMW_PL_GMR:
3494                         reloc->location->gmrId = bo->mem.start;
3495                         break;
3496                 case VMW_PL_MOB:
3497                         *reloc->mob_loc = bo->mem.start;
3498                         break;
3499                 default:
3500                         BUG();
3501                 }
3502         }
3503         vmw_free_relocations(sw_context);
3504 }
3505
3506 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3507                                  uint32_t size)
3508 {
3509         if (likely(sw_context->cmd_bounce_size >= size))
3510                 return 0;
3511
3512         if (sw_context->cmd_bounce_size == 0)
3513                 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3514
3515         while (sw_context->cmd_bounce_size < size) {
3516                 sw_context->cmd_bounce_size =
3517                         PAGE_ALIGN(sw_context->cmd_bounce_size +
3518                                    (sw_context->cmd_bounce_size >> 1));
3519         }
3520
3521         vfree(sw_context->cmd_bounce);
3522         sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3523
3524         if (sw_context->cmd_bounce == NULL) {
3525                 DRM_ERROR("Failed to allocate command bounce buffer.\n");
3526                 sw_context->cmd_bounce_size = 0;
3527                 return -ENOMEM;
3528         }
3529
3530         return 0;
3531 }
3532
3533 /**
3534  * vmw_execbuf_fence_commands - create and submit a command stream fence
3535  *
3536  * Creates a fence object and submits a command stream marker.
3537  * If this fails for some reason, We sync the fifo and return NULL.
3538  * It is then safe to fence buffers with a NULL pointer.
3539  *
3540  * If @p_handle is not NULL @file_priv must also not be NULL. Creates
3541  * a userspace handle if @p_handle is not NULL, otherwise not.
3542  */
3543
3544 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3545                                struct vmw_private *dev_priv,
3546                                struct vmw_fence_obj **p_fence,
3547                                uint32_t *p_handle)
3548 {
3549         uint32_t sequence;
3550         int ret;
3551         bool synced = false;
3552
3553         /* p_handle implies file_priv. */
3554         BUG_ON(p_handle != NULL && file_priv == NULL);
3555
3556         ret = vmw_fifo_send_fence(dev_priv, &sequence);
3557         if (unlikely(ret != 0)) {
3558                 DRM_ERROR("Fence submission error. Syncing.\n");
3559                 synced = true;
3560         }
3561
3562         if (p_handle != NULL)
3563                 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3564                                             sequence, p_fence, p_handle);
3565         else
3566                 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3567
3568         if (unlikely(ret != 0 && !synced)) {
3569                 (void) vmw_fallback_wait(dev_priv, false, false,
3570                                          sequence, false,
3571                                          VMW_FENCE_WAIT_TIMEOUT);
3572                 *p_fence = NULL;
3573         }
3574
3575         return 0;
3576 }
3577
3578 /**
3579  * vmw_execbuf_copy_fence_user - copy fence object information to
3580  * user-space.
3581  *
3582  * @dev_priv: Pointer to a vmw_private struct.
3583  * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3584  * @ret: Return value from fence object creation.
3585  * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
3586  * which the information should be copied.
3587  * @fence: Pointer to the fenc object.
3588  * @fence_handle: User-space fence handle.
3589  * @out_fence_fd: exported file descriptor for the fence.  -1 if not used
3590  * @sync_file:  Only used to clean up in case of an error in this function.
3591  *
3592  * This function copies fence information to user-space. If copying fails,
3593  * The user-space struct drm_vmw_fence_rep::error member is hopefully
3594  * left untouched, and if it's preloaded with an -EFAULT by user-space,
3595  * the error will hopefully be detected.
3596  * Also if copying fails, user-space will be unable to signal the fence
3597  * object so we wait for it immediately, and then unreference the
3598  * user-space reference.
3599  */
3600 void
3601 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3602                             struct vmw_fpriv *vmw_fp,
3603                             int ret,
3604                             struct drm_vmw_fence_rep __user *user_fence_rep,
3605                             struct vmw_fence_obj *fence,
3606                             uint32_t fence_handle,
3607                             int32_t out_fence_fd,
3608                             struct sync_file *sync_file)
3609 {
3610         struct drm_vmw_fence_rep fence_rep;
3611
3612         if (user_fence_rep == NULL)
3613                 return;
3614
3615         memset(&fence_rep, 0, sizeof(fence_rep));
3616
3617         fence_rep.error = ret;
3618         fence_rep.fd = out_fence_fd;
3619         if (ret == 0) {
3620                 BUG_ON(fence == NULL);
3621
3622                 fence_rep.handle = fence_handle;
3623                 fence_rep.seqno = fence->base.seqno;
3624                 vmw_update_seqno(dev_priv, &dev_priv->fifo);
3625                 fence_rep.passed_seqno = dev_priv->last_read_seqno;
3626         }
3627
3628         /*
3629          * copy_to_user errors will be detected by user space not
3630          * seeing fence_rep::error filled in. Typically
3631          * user-space would have pre-set that member to -EFAULT.
3632          */
3633         ret = copy_to_user(user_fence_rep, &fence_rep,
3634                            sizeof(fence_rep));
3635
3636         /*
3637          * User-space lost the fence object. We need to sync
3638          * and unreference the handle.
3639          */
3640         if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3641                 if (sync_file)
3642                         fput(sync_file->file);
3643
3644                 if (fence_rep.fd != -1) {
3645                         put_unused_fd(fence_rep.fd);
3646                         fence_rep.fd = -1;
3647                 }
3648
3649                 ttm_ref_object_base_unref(vmw_fp->tfile,
3650                                           fence_handle, TTM_REF_USAGE);
3651                 DRM_ERROR("Fence copy error. Syncing.\n");
3652                 (void) vmw_fence_obj_wait(fence, false, false,
3653                                           VMW_FENCE_WAIT_TIMEOUT);
3654         }
3655 }
3656
3657 /**
3658  * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
3659  * the fifo.
3660  *
3661  * @dev_priv: Pointer to a device private structure.
3662  * @kernel_commands: Pointer to the unpatched command batch.
3663  * @command_size: Size of the unpatched command batch.
3664  * @sw_context: Structure holding the relocation lists.
3665  *
3666  * Side effects: If this function returns 0, then the command batch
3667  * pointed to by @kernel_commands will have been modified.
3668  */
3669 static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3670                                    void *kernel_commands,
3671                                    u32 command_size,
3672                                    struct vmw_sw_context *sw_context)
3673 {
3674         void *cmd;
3675
3676         if (sw_context->dx_ctx_node)
3677                 cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
3678                                           sw_context->dx_ctx_node->ctx->id);
3679         else
3680                 cmd = vmw_fifo_reserve(dev_priv, command_size);
3681         if (!cmd) {
3682                 DRM_ERROR("Failed reserving fifo space for commands.\n");
3683                 return -ENOMEM;
3684         }
3685
3686         vmw_apply_relocations(sw_context);
3687         memcpy(cmd, kernel_commands, command_size);
3688         vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3689         vmw_resource_relocations_free(&sw_context->res_relocations);
3690         vmw_fifo_commit(dev_priv, command_size);
3691
3692         return 0;
3693 }
3694
3695 /**
3696  * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
3697  * the command buffer manager.
3698  *
3699  * @dev_priv: Pointer to a device private structure.
3700  * @header: Opaque handle to the command buffer allocation.
3701  * @command_size: Size of the unpatched command batch.
3702  * @sw_context: Structure holding the relocation lists.
3703  *
3704  * Side effects: If this function returns 0, then the command buffer
3705  * represented by @header will have been modified.
3706  */
3707 static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3708                                      struct vmw_cmdbuf_header *header,
3709                                      u32 command_size,
3710                                      struct vmw_sw_context *sw_context)
3711 {
3712         u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
3713                   SVGA3D_INVALID_ID);
3714         void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
3715                                        id, false, header);
3716
3717         vmw_apply_relocations(sw_context);
3718         vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3719         vmw_resource_relocations_free(&sw_context->res_relocations);
3720         vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3721
3722         return 0;
3723 }
3724
3725 /**
3726  * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3727  * submission using a command buffer.
3728  *
3729  * @dev_priv: Pointer to a device private structure.
3730  * @user_commands: User-space pointer to the commands to be submitted.
3731  * @command_size: Size of the unpatched command batch.
3732  * @header: Out parameter returning the opaque pointer to the command buffer.
3733  *
3734  * This function checks whether we can use the command buffer manager for
3735  * submission and if so, creates a command buffer of suitable size and
3736  * copies the user data into that buffer.
3737  *
3738  * On successful return, the function returns a pointer to the data in the
3739  * command buffer and *@header is set to non-NULL.
3740  * If command buffers could not be used, the function will return the value
3741  * of @kernel_commands on function call. That value may be NULL. In that case,
3742  * the value of *@header will be set to NULL.
3743  * If an error is encountered, the function will return a pointer error value.
3744  * If the function is interrupted by a signal while sleeping, it will return
3745  * -ERESTARTSYS casted to a pointer error value.
3746  */
3747 static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3748                                 void __user *user_commands,
3749                                 void *kernel_commands,
3750                                 u32 command_size,
3751                                 struct vmw_cmdbuf_header **header)
3752 {
3753         size_t cmdbuf_size;
3754         int ret;
3755
3756         *header = NULL;
3757         if (command_size > SVGA_CB_MAX_SIZE) {
3758                 DRM_ERROR("Command buffer is too large.\n");
3759                 return ERR_PTR(-EINVAL);
3760         }
3761
3762         if (!dev_priv->cman || kernel_commands)
3763                 return kernel_commands;
3764
3765         /* If possible, add a little space for fencing. */
3766         cmdbuf_size = command_size + 512;
3767         cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
3768         kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
3769                                            true, header);
3770         if (IS_ERR(kernel_commands))
3771                 return kernel_commands;
3772
3773         ret = copy_from_user(kernel_commands, user_commands,
3774                              command_size);
3775         if (ret) {
3776                 DRM_ERROR("Failed copying commands.\n");
3777                 vmw_cmdbuf_header_free(*header);
3778                 *header = NULL;
3779                 return ERR_PTR(-EFAULT);
3780         }
3781
3782         return kernel_commands;
3783 }
3784
3785 static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
3786                                    struct vmw_sw_context *sw_context,
3787                                    uint32_t handle)
3788 {
3789         struct vmw_resource *res;
3790         int ret;
3791         unsigned int size;
3792
3793         if (handle == SVGA3D_INVALID_ID)
3794                 return 0;
3795
3796         size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
3797         ret = vmw_validation_preload_res(sw_context->ctx, size);
3798         if (ret)
3799                 return ret;
3800
3801         res = vmw_user_resource_noref_lookup_handle
3802                 (dev_priv, sw_context->fp->tfile, handle,
3803                  user_context_converter);
3804         if (unlikely(IS_ERR(res))) {
3805                 DRM_ERROR("Could not find or user DX context 0x%08x.\n",
3806                           (unsigned) handle);
3807                 return PTR_ERR(res);
3808         }
3809
3810         ret = vmw_execbuf_res_noref_val_add(sw_context, res);
3811         if (unlikely(ret != 0))
3812                 return ret;
3813
3814         sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
3815         sw_context->man = vmw_context_res_man(res);
3816
3817         return 0;
3818 }
3819
3820 int vmw_execbuf_process(struct drm_file *file_priv,
3821                         struct vmw_private *dev_priv,
3822                         void __user *user_commands,
3823                         void *kernel_commands,
3824                         uint32_t command_size,
3825                         uint64_t throttle_us,
3826                         uint32_t dx_context_handle,
3827                         struct drm_vmw_fence_rep __user *user_fence_rep,
3828                         struct vmw_fence_obj **out_fence,
3829                         uint32_t flags)
3830 {
3831         struct vmw_sw_context *sw_context = &dev_priv->ctx;
3832         struct vmw_fence_obj *fence = NULL;
3833         struct vmw_cmdbuf_header *header;
3834         uint32_t handle;
3835         int ret;
3836         int32_t out_fence_fd = -1;
3837         struct sync_file *sync_file = NULL;
3838         DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
3839
3840         if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
3841                 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
3842                 if (out_fence_fd < 0) {
3843                         DRM_ERROR("Failed to get a fence file descriptor.\n");
3844                         return out_fence_fd;
3845                 }
3846         }
3847
3848         if (throttle_us) {
3849                 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
3850                                    throttle_us);
3851
3852                 if (ret)
3853                         goto out_free_fence_fd;
3854         }
3855
3856         kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
3857                                              kernel_commands, command_size,
3858                                              &header);
3859         if (IS_ERR(kernel_commands)) {
3860                 ret = PTR_ERR(kernel_commands);
3861                 goto out_free_fence_fd;
3862         }
3863
3864         ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
3865         if (ret) {
3866                 ret = -ERESTARTSYS;
3867                 goto out_free_header;
3868         }
3869
3870         sw_context->kernel = false;
3871         if (kernel_commands == NULL) {
3872                 ret = vmw_resize_cmd_bounce(sw_context, command_size);
3873                 if (unlikely(ret != 0))
3874                         goto out_unlock;
3875
3876
3877                 ret = copy_from_user(sw_context->cmd_bounce,
3878                                      user_commands, command_size);
3879
3880                 if (unlikely(ret != 0)) {
3881                         ret = -EFAULT;
3882                         DRM_ERROR("Failed copying commands.\n");
3883                         goto out_unlock;
3884                 }
3885                 kernel_commands = sw_context->cmd_bounce;
3886         } else if (!header)
3887                 sw_context->kernel = true;
3888
3889         sw_context->fp = vmw_fpriv(file_priv);
3890         INIT_LIST_HEAD(&sw_context->ctx_list);
3891         sw_context->cur_query_bo = dev_priv->pinned_bo;
3892         sw_context->last_query_ctx = NULL;
3893         sw_context->needs_post_query_barrier = false;
3894         sw_context->dx_ctx_node = NULL;
3895         sw_context->dx_query_mob = NULL;
3896         sw_context->dx_query_ctx = NULL;
3897         memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
3898         INIT_LIST_HEAD(&sw_context->res_relocations);
3899         INIT_LIST_HEAD(&sw_context->bo_relocations);
3900         if (sw_context->staged_bindings)
3901                 vmw_binding_state_reset(sw_context->staged_bindings);
3902
3903         if (!sw_context->res_ht_initialized) {
3904                 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
3905                 if (unlikely(ret != 0))
3906                         goto out_unlock;
3907                 sw_context->res_ht_initialized = true;
3908         }
3909         INIT_LIST_HEAD(&sw_context->staged_cmd_res);
3910         sw_context->ctx = &val_ctx;
3911         ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
3912         if (unlikely(ret != 0))
3913                 goto out_err_nores;
3914
3915         ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
3916                                 command_size);
3917         if (unlikely(ret != 0))
3918                 goto out_err_nores;
3919
3920         ret = vmw_resources_reserve(sw_context);
3921         if (unlikely(ret != 0))
3922                 goto out_err_nores;
3923
3924         ret = vmw_validation_bo_reserve(&val_ctx, true);
3925         if (unlikely(ret != 0))
3926                 goto out_err_nores;
3927
3928         ret = vmw_validation_bo_validate(&val_ctx, true);
3929         if (unlikely(ret != 0))
3930                 goto out_err;
3931
3932         ret = vmw_validation_res_validate(&val_ctx, true);
3933         if (unlikely(ret != 0))
3934                 goto out_err;
3935         vmw_validation_drop_ht(&val_ctx);
3936
3937         ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
3938         if (unlikely(ret != 0)) {
3939                 ret = -ERESTARTSYS;
3940                 goto out_err;
3941         }
3942
3943         if (dev_priv->has_mob) {
3944                 ret = vmw_rebind_contexts(sw_context);
3945                 if (unlikely(ret != 0))
3946                         goto out_unlock_binding;
3947         }
3948
3949         if (!header) {
3950                 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
3951                                               command_size, sw_context);
3952         } else {
3953                 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
3954                                                 sw_context);
3955                 header = NULL;
3956         }
3957         mutex_unlock(&dev_priv->binding_mutex);
3958         if (ret)
3959                 goto out_err;
3960
3961         vmw_query_bo_switch_commit(dev_priv, sw_context);
3962         ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
3963                                          &fence,
3964                                          (user_fence_rep) ? &handle : NULL);
3965         /*
3966          * This error is harmless, because if fence submission fails,
3967          * vmw_fifo_send_fence will sync. The error will be propagated to
3968          * user-space in @fence_rep
3969          */
3970
3971         if (ret != 0)
3972                 DRM_ERROR("Fence submission error. Syncing.\n");
3973
3974         vmw_execbuf_bindings_commit(sw_context, false);
3975         vmw_bind_dx_query_mob(sw_context);
3976         vmw_validation_res_unreserve(&val_ctx, false);
3977
3978         vmw_validation_bo_fence(sw_context->ctx, fence);
3979
3980         if (unlikely(dev_priv->pinned_bo != NULL &&
3981                      !dev_priv->query_cid_valid))
3982                 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
3983
3984         /*
3985          * If anything fails here, give up trying to export the fence
3986          * and do a sync since the user mode will not be able to sync
3987          * the fence itself.  This ensures we are still functionally
3988          * correct.
3989          */
3990         if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
3991
3992                 sync_file = sync_file_create(&fence->base);
3993                 if (!sync_file) {
3994                         DRM_ERROR("Unable to create sync file for fence\n");
3995                         put_unused_fd(out_fence_fd);
3996                         out_fence_fd = -1;
3997
3998                         (void) vmw_fence_obj_wait(fence, false, false,
3999                                                   VMW_FENCE_WAIT_TIMEOUT);
4000                 } else {
4001                         /* Link the fence with the FD created earlier */
4002                         fd_install(out_fence_fd, sync_file->file);
4003                 }
4004         }
4005
4006         vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4007                                     user_fence_rep, fence, handle,
4008                                     out_fence_fd, sync_file);
4009
4010         /* Don't unreference when handing fence out */
4011         if (unlikely(out_fence != NULL)) {
4012                 *out_fence = fence;
4013                 fence = NULL;
4014         } else if (likely(fence != NULL)) {
4015                 vmw_fence_obj_unreference(&fence);
4016         }
4017
4018         vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4019         mutex_unlock(&dev_priv->cmdbuf_mutex);
4020
4021         /*
4022          * Unreference resources outside of the cmdbuf_mutex to
4023          * avoid deadlocks in resource destruction paths.
4024          */
4025         vmw_validation_unref_lists(&val_ctx);
4026
4027         return 0;
4028
4029 out_unlock_binding:
4030         mutex_unlock(&dev_priv->binding_mutex);
4031 out_err:
4032         vmw_validation_bo_backoff(&val_ctx);
4033 out_err_nores:
4034         vmw_execbuf_bindings_commit(sw_context, true);
4035         vmw_validation_res_unreserve(&val_ctx, true);
4036         vmw_resource_relocations_free(&sw_context->res_relocations);
4037         vmw_free_relocations(sw_context);
4038         if (unlikely(dev_priv->pinned_bo != NULL &&
4039                      !dev_priv->query_cid_valid))
4040                 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4041 out_unlock:
4042         vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4043         vmw_validation_drop_ht(&val_ctx);
4044         WARN_ON(!list_empty(&sw_context->ctx_list));
4045         mutex_unlock(&dev_priv->cmdbuf_mutex);
4046
4047         /*
4048          * Unreference resources outside of the cmdbuf_mutex to
4049          * avoid deadlocks in resource destruction paths.
4050          */
4051         vmw_validation_unref_lists(&val_ctx);
4052 out_free_header:
4053         if (header)
4054                 vmw_cmdbuf_header_free(header);
4055 out_free_fence_fd:
4056         if (out_fence_fd >= 0)
4057                 put_unused_fd(out_fence_fd);
4058
4059         return ret;
4060 }
4061
4062 /**
4063  * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4064  *
4065  * @dev_priv: The device private structure.
4066  *
4067  * This function is called to idle the fifo and unpin the query buffer
4068  * if the normal way to do this hits an error, which should typically be
4069  * extremely rare.
4070  */
4071 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4072 {
4073         DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
4074
4075         (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4076         vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4077         if (dev_priv->dummy_query_bo_pinned) {
4078                 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4079                 dev_priv->dummy_query_bo_pinned = false;
4080         }
4081 }
4082
4083
4084 /**
4085  * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4086  * query bo.
4087  *
4088  * @dev_priv: The device private structure.
4089  * @fence: If non-NULL should point to a struct vmw_fence_obj issued
4090  * _after_ a query barrier that flushes all queries touching the current
4091  * buffer pointed to by @dev_priv->pinned_bo
4092  *
4093  * This function should be used to unpin the pinned query bo, or
4094  * as a query barrier when we need to make sure that all queries have
4095  * finished before the next fifo command. (For example on hardware
4096  * context destructions where the hardware may otherwise leak unfinished
4097  * queries).
4098  *
4099  * This function does not return any failure codes, but make attempts
4100  * to do safe unpinning in case of errors.
4101  *
4102  * The function will synchronize on the previous query barrier, and will
4103  * thus not finish until that barrier has executed.
4104  *
4105  * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
4106  * before calling this function.
4107  */
4108 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4109                                      struct vmw_fence_obj *fence)
4110 {
4111         int ret = 0;
4112         struct vmw_fence_obj *lfence = NULL;
4113         DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
4114
4115         if (dev_priv->pinned_bo == NULL)
4116                 goto out_unlock;
4117
4118         ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false,
4119                                     false);
4120         if (ret)
4121                 goto out_no_reserve;
4122
4123         ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false,
4124                                     false);
4125         if (ret)
4126                 goto out_no_reserve;
4127
4128         ret = vmw_validation_bo_reserve(&val_ctx, false);
4129         if (ret)
4130                 goto out_no_reserve;
4131
4132         if (dev_priv->query_cid_valid) {
4133                 BUG_ON(fence != NULL);
4134                 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
4135                 if (ret)
4136                         goto out_no_emit;
4137                 dev_priv->query_cid_valid = false;
4138         }
4139
4140         vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4141         if (dev_priv->dummy_query_bo_pinned) {
4142                 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4143                 dev_priv->dummy_query_bo_pinned = false;
4144         }
4145         if (fence == NULL) {
4146                 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4147                                                   NULL);
4148                 fence = lfence;
4149         }
4150         vmw_validation_bo_fence(&val_ctx, fence);
4151         if (lfence != NULL)
4152                 vmw_fence_obj_unreference(&lfence);
4153
4154         vmw_validation_unref_lists(&val_ctx);
4155         vmw_bo_unreference(&dev_priv->pinned_bo);
4156 out_unlock:
4157         return;
4158
4159 out_no_emit:
4160         vmw_validation_bo_backoff(&val_ctx);
4161 out_no_reserve:
4162         vmw_validation_unref_lists(&val_ctx);
4163         vmw_execbuf_unpin_panic(dev_priv);
4164         vmw_bo_unreference(&dev_priv->pinned_bo);
4165
4166 }
4167
4168 /**
4169  * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4170  * query bo.
4171  *
4172  * @dev_priv: The device private structure.
4173  *
4174  * This function should be used to unpin the pinned query bo, or
4175  * as a query barrier when we need to make sure that all queries have
4176  * finished before the next fifo command. (For example on hardware
4177  * context destructions where the hardware may otherwise leak unfinished
4178  * queries).
4179  *
4180  * This function does not return any failure codes, but make attempts
4181  * to do safe unpinning in case of errors.
4182  *
4183  * The function will synchronize on the previous query barrier, and will
4184  * thus not finish until that barrier has executed.
4185  */
4186 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4187 {
4188         mutex_lock(&dev_priv->cmdbuf_mutex);
4189         if (dev_priv->query_cid_valid)
4190                 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4191         mutex_unlock(&dev_priv->cmdbuf_mutex);
4192 }
4193
4194 int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
4195                       struct drm_file *file_priv, size_t size)
4196 {
4197         struct vmw_private *dev_priv = vmw_priv(dev);
4198         struct drm_vmw_execbuf_arg arg;
4199         int ret;
4200         static const size_t copy_offset[] = {
4201                 offsetof(struct drm_vmw_execbuf_arg, context_handle),
4202                 sizeof(struct drm_vmw_execbuf_arg)};
4203         struct dma_fence *in_fence = NULL;
4204
4205         if (unlikely(size < copy_offset[0])) {
4206                 DRM_ERROR("Invalid command size, ioctl %d\n",
4207                           DRM_VMW_EXECBUF);
4208                 return -EINVAL;
4209         }
4210
4211         if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
4212                 return -EFAULT;
4213
4214         /*
4215          * Extend the ioctl argument while
4216          * maintaining backwards compatibility:
4217          * We take different code paths depending on the value of
4218          * arg.version.
4219          */
4220
4221         if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
4222                      arg.version == 0)) {
4223                 DRM_ERROR("Incorrect execbuf version.\n");
4224                 return -EINVAL;
4225         }
4226
4227         if (arg.version > 1 &&
4228             copy_from_user(&arg.context_handle,
4229                            (void __user *) (data + copy_offset[0]),
4230                            copy_offset[arg.version - 1] -
4231                            copy_offset[0]) != 0)
4232                 return -EFAULT;
4233
4234         switch (arg.version) {
4235         case 1:
4236                 arg.context_handle = (uint32_t) -1;
4237                 break;
4238         case 2:
4239         default:
4240                 break;
4241         }
4242
4243
4244         /* If imported a fence FD from elsewhere, then wait on it */
4245         if (arg.flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4246                 in_fence = sync_file_get_fence(arg.imported_fence_fd);
4247
4248                 if (!in_fence) {
4249                         DRM_ERROR("Cannot get imported fence\n");
4250                         return -EINVAL;
4251                 }
4252
4253                 ret = vmw_wait_dma_fence(dev_priv->fman, in_fence);
4254                 if (ret)
4255                         goto out;
4256         }
4257
4258         ret = ttm_read_lock(&dev_priv->reservation_sem, true);
4259         if (unlikely(ret != 0))
4260                 return ret;
4261
4262         ret = vmw_execbuf_process(file_priv, dev_priv,
4263                                   (void __user *)(unsigned long)arg.commands,
4264                                   NULL, arg.command_size, arg.throttle_us,
4265                                   arg.context_handle,
4266                                   (void __user *)(unsigned long)arg.fence_rep,
4267                                   NULL,
4268                                   arg.flags);
4269         ttm_read_unlock(&dev_priv->reservation_sem);
4270         if (unlikely(ret != 0))
4271                 goto out;
4272
4273         vmw_kms_cursor_post_execbuf(dev_priv);
4274
4275 out:
4276         if (in_fence)
4277                 dma_fence_put(in_fence);
4278         return ret;
4279 }