]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
5581a7826b4c06fc814d9c028ff6ec68f51c5ecf
[linux.git] / drivers / gpu / drm / vmwgfx / vmwgfx_resource.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include <drm/ttm/ttm_placement.h>
29
30 #include "vmwgfx_resource_priv.h"
31 #include "vmwgfx_binding.h"
32 #include "vmwgfx_drv.h"
33
34 #define VMW_RES_EVICT_ERR_COUNT 10
35
36 /**
37  * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
38  * @res: The resource
39  */
40 void vmw_resource_mob_attach(struct vmw_resource *res)
41 {
42         struct vmw_buffer_object *backup = res->backup;
43
44         dma_resv_assert_held(res->backup->base.base.resv);
45         res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
46                 res->func->prio;
47         list_add_tail(&res->mob_head, &backup->res_list);
48         vmw_bo_prio_add(backup, res->used_prio);
49 }
50
51 /**
52  * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
53  * @res: The resource
54  */
55 void vmw_resource_mob_detach(struct vmw_resource *res)
56 {
57         struct vmw_buffer_object *backup = res->backup;
58
59         dma_resv_assert_held(backup->base.base.resv);
60         if (vmw_resource_mob_attached(res)) {
61                 list_del_init(&res->mob_head);
62                 vmw_bo_prio_del(backup, res->used_prio);
63         }
64 }
65
66 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
67 {
68         kref_get(&res->kref);
69         return res;
70 }
71
72 struct vmw_resource *
73 vmw_resource_reference_unless_doomed(struct vmw_resource *res)
74 {
75         return kref_get_unless_zero(&res->kref) ? res : NULL;
76 }
77
78 /**
79  * vmw_resource_release_id - release a resource id to the id manager.
80  *
81  * @res: Pointer to the resource.
82  *
83  * Release the resource id to the resource id manager and set it to -1
84  */
85 void vmw_resource_release_id(struct vmw_resource *res)
86 {
87         struct vmw_private *dev_priv = res->dev_priv;
88         struct idr *idr = &dev_priv->res_idr[res->func->res_type];
89
90         spin_lock(&dev_priv->resource_lock);
91         if (res->id != -1)
92                 idr_remove(idr, res->id);
93         res->id = -1;
94         spin_unlock(&dev_priv->resource_lock);
95 }
96
97 static void vmw_resource_release(struct kref *kref)
98 {
99         struct vmw_resource *res =
100             container_of(kref, struct vmw_resource, kref);
101         struct vmw_private *dev_priv = res->dev_priv;
102         int id;
103         struct idr *idr = &dev_priv->res_idr[res->func->res_type];
104
105         spin_lock(&dev_priv->resource_lock);
106         list_del_init(&res->lru_head);
107         spin_unlock(&dev_priv->resource_lock);
108         if (res->backup) {
109                 struct ttm_buffer_object *bo = &res->backup->base;
110
111                 ttm_bo_reserve(bo, false, false, NULL);
112                 if (vmw_resource_mob_attached(res) &&
113                     res->func->unbind != NULL) {
114                         struct ttm_validate_buffer val_buf;
115
116                         val_buf.bo = bo;
117                         val_buf.num_shared = 0;
118                         res->func->unbind(res, false, &val_buf);
119                 }
120                 res->backup_dirty = false;
121                 vmw_resource_mob_detach(res);
122                 ttm_bo_unreserve(bo);
123                 vmw_bo_unreference(&res->backup);
124         }
125
126         if (likely(res->hw_destroy != NULL)) {
127                 mutex_lock(&dev_priv->binding_mutex);
128                 vmw_binding_res_list_kill(&res->binding_head);
129                 mutex_unlock(&dev_priv->binding_mutex);
130                 res->hw_destroy(res);
131         }
132
133         id = res->id;
134         if (res->res_free != NULL)
135                 res->res_free(res);
136         else
137                 kfree(res);
138
139         spin_lock(&dev_priv->resource_lock);
140         if (id != -1)
141                 idr_remove(idr, id);
142         spin_unlock(&dev_priv->resource_lock);
143 }
144
145 void vmw_resource_unreference(struct vmw_resource **p_res)
146 {
147         struct vmw_resource *res = *p_res;
148
149         *p_res = NULL;
150         kref_put(&res->kref, vmw_resource_release);
151 }
152
153
154 /**
155  * vmw_resource_alloc_id - release a resource id to the id manager.
156  *
157  * @res: Pointer to the resource.
158  *
159  * Allocate the lowest free resource from the resource manager, and set
160  * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
161  */
162 int vmw_resource_alloc_id(struct vmw_resource *res)
163 {
164         struct vmw_private *dev_priv = res->dev_priv;
165         int ret;
166         struct idr *idr = &dev_priv->res_idr[res->func->res_type];
167
168         BUG_ON(res->id != -1);
169
170         idr_preload(GFP_KERNEL);
171         spin_lock(&dev_priv->resource_lock);
172
173         ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
174         if (ret >= 0)
175                 res->id = ret;
176
177         spin_unlock(&dev_priv->resource_lock);
178         idr_preload_end();
179         return ret < 0 ? ret : 0;
180 }
181
182 /**
183  * vmw_resource_init - initialize a struct vmw_resource
184  *
185  * @dev_priv:       Pointer to a device private struct.
186  * @res:            The struct vmw_resource to initialize.
187  * @obj_type:       Resource object type.
188  * @delay_id:       Boolean whether to defer device id allocation until
189  *                  the first validation.
190  * @res_free:       Resource destructor.
191  * @func:           Resource function table.
192  */
193 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
194                       bool delay_id,
195                       void (*res_free) (struct vmw_resource *res),
196                       const struct vmw_res_func *func)
197 {
198         kref_init(&res->kref);
199         res->hw_destroy = NULL;
200         res->res_free = res_free;
201         res->dev_priv = dev_priv;
202         res->func = func;
203         INIT_LIST_HEAD(&res->lru_head);
204         INIT_LIST_HEAD(&res->mob_head);
205         INIT_LIST_HEAD(&res->binding_head);
206         res->id = -1;
207         res->backup = NULL;
208         res->backup_offset = 0;
209         res->backup_dirty = false;
210         res->res_dirty = false;
211         res->used_prio = 3;
212         if (delay_id)
213                 return 0;
214         else
215                 return vmw_resource_alloc_id(res);
216 }
217
218
219 /**
220  * vmw_user_resource_lookup_handle - lookup a struct resource from a
221  * TTM user-space handle and perform basic type checks
222  *
223  * @dev_priv:     Pointer to a device private struct
224  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
225  * @handle:       The TTM user-space handle
226  * @converter:    Pointer to an object describing the resource type
227  * @p_res:        On successful return the location pointed to will contain
228  *                a pointer to a refcounted struct vmw_resource.
229  *
230  * If the handle can't be found or is associated with an incorrect resource
231  * type, -EINVAL will be returned.
232  */
233 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
234                                     struct ttm_object_file *tfile,
235                                     uint32_t handle,
236                                     const struct vmw_user_resource_conv
237                                     *converter,
238                                     struct vmw_resource **p_res)
239 {
240         struct ttm_base_object *base;
241         struct vmw_resource *res;
242         int ret = -EINVAL;
243
244         base = ttm_base_object_lookup(tfile, handle);
245         if (unlikely(base == NULL))
246                 return -EINVAL;
247
248         if (unlikely(ttm_base_object_type(base) != converter->object_type))
249                 goto out_bad_resource;
250
251         res = converter->base_obj_to_res(base);
252         kref_get(&res->kref);
253
254         *p_res = res;
255         ret = 0;
256
257 out_bad_resource:
258         ttm_base_object_unref(&base);
259
260         return ret;
261 }
262
263 /**
264  * vmw_user_resource_lookup_handle - lookup a struct resource from a
265  * TTM user-space handle and perform basic type checks
266  *
267  * @dev_priv:     Pointer to a device private struct
268  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
269  * @handle:       The TTM user-space handle
270  * @converter:    Pointer to an object describing the resource type
271  * @p_res:        On successful return the location pointed to will contain
272  *                a pointer to a refcounted struct vmw_resource.
273  *
274  * If the handle can't be found or is associated with an incorrect resource
275  * type, -EINVAL will be returned.
276  */
277 struct vmw_resource *
278 vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
279                                       struct ttm_object_file *tfile,
280                                       uint32_t handle,
281                                       const struct vmw_user_resource_conv
282                                       *converter)
283 {
284         struct ttm_base_object *base;
285
286         base = ttm_base_object_noref_lookup(tfile, handle);
287         if (!base)
288                 return ERR_PTR(-ESRCH);
289
290         if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
291                 ttm_base_object_noref_release();
292                 return ERR_PTR(-EINVAL);
293         }
294
295         return converter->base_obj_to_res(base);
296 }
297
298 /**
299  * Helper function that looks either a surface or bo.
300  *
301  * The pointer this pointed at by out_surf and out_buf needs to be null.
302  */
303 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
304                            struct ttm_object_file *tfile,
305                            uint32_t handle,
306                            struct vmw_surface **out_surf,
307                            struct vmw_buffer_object **out_buf)
308 {
309         struct vmw_resource *res;
310         int ret;
311
312         BUG_ON(*out_surf || *out_buf);
313
314         ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
315                                               user_surface_converter,
316                                               &res);
317         if (!ret) {
318                 *out_surf = vmw_res_to_srf(res);
319                 return 0;
320         }
321
322         *out_surf = NULL;
323         ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
324         return ret;
325 }
326
327 /**
328  * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
329  *
330  * @res:            The resource for which to allocate a backup buffer.
331  * @interruptible:  Whether any sleeps during allocation should be
332  *                  performed while interruptible.
333  */
334 static int vmw_resource_buf_alloc(struct vmw_resource *res,
335                                   bool interruptible)
336 {
337         unsigned long size =
338                 (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
339         struct vmw_buffer_object *backup;
340         int ret;
341
342         if (likely(res->backup)) {
343                 BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
344                 return 0;
345         }
346
347         backup = kzalloc(sizeof(*backup), GFP_KERNEL);
348         if (unlikely(!backup))
349                 return -ENOMEM;
350
351         ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
352                               res->func->backup_placement,
353                               interruptible,
354                               &vmw_bo_bo_free);
355         if (unlikely(ret != 0))
356                 goto out_no_bo;
357
358         res->backup = backup;
359
360 out_no_bo:
361         return ret;
362 }
363
364 /**
365  * vmw_resource_do_validate - Make a resource up-to-date and visible
366  *                            to the device.
367  *
368  * @res:            The resource to make visible to the device.
369  * @val_buf:        Information about a buffer possibly
370  *                  containing backup data if a bind operation is needed.
371  *
372  * On hardware resource shortage, this function returns -EBUSY and
373  * should be retried once resources have been freed up.
374  */
375 static int vmw_resource_do_validate(struct vmw_resource *res,
376                                     struct ttm_validate_buffer *val_buf)
377 {
378         int ret = 0;
379         const struct vmw_res_func *func = res->func;
380
381         if (unlikely(res->id == -1)) {
382                 ret = func->create(res);
383                 if (unlikely(ret != 0))
384                         return ret;
385         }
386
387         if (func->bind &&
388             ((func->needs_backup && !vmw_resource_mob_attached(res) &&
389               val_buf->bo != NULL) ||
390              (!func->needs_backup && val_buf->bo != NULL))) {
391                 ret = func->bind(res, val_buf);
392                 if (unlikely(ret != 0))
393                         goto out_bind_failed;
394                 if (func->needs_backup)
395                         vmw_resource_mob_attach(res);
396         }
397
398         return 0;
399
400 out_bind_failed:
401         func->destroy(res);
402
403         return ret;
404 }
405
406 /**
407  * vmw_resource_unreserve - Unreserve a resource previously reserved for
408  * command submission.
409  *
410  * @res:               Pointer to the struct vmw_resource to unreserve.
411  * @dirty_set:         Change dirty status of the resource.
412  * @dirty:             When changing dirty status indicates the new status.
413  * @switch_backup:     Backup buffer has been switched.
414  * @new_backup:        Pointer to new backup buffer if command submission
415  *                     switched. May be NULL.
416  * @new_backup_offset: New backup offset if @switch_backup is true.
417  *
418  * Currently unreserving a resource means putting it back on the device's
419  * resource lru list, so that it can be evicted if necessary.
420  */
421 void vmw_resource_unreserve(struct vmw_resource *res,
422                             bool dirty_set,
423                             bool dirty,
424                             bool switch_backup,
425                             struct vmw_buffer_object *new_backup,
426                             unsigned long new_backup_offset)
427 {
428         struct vmw_private *dev_priv = res->dev_priv;
429
430         if (!list_empty(&res->lru_head))
431                 return;
432
433         if (switch_backup && new_backup != res->backup) {
434                 if (res->backup) {
435                         vmw_resource_mob_detach(res);
436                         vmw_bo_unreference(&res->backup);
437                 }
438
439                 if (new_backup) {
440                         res->backup = vmw_bo_reference(new_backup);
441                         vmw_resource_mob_attach(res);
442                 } else {
443                         res->backup = NULL;
444                 }
445         }
446         if (switch_backup)
447                 res->backup_offset = new_backup_offset;
448
449         if (dirty_set)
450                 res->res_dirty = dirty;
451
452         if (!res->func->may_evict || res->id == -1 || res->pin_count)
453                 return;
454
455         spin_lock(&dev_priv->resource_lock);
456         list_add_tail(&res->lru_head,
457                       &res->dev_priv->res_lru[res->func->res_type]);
458         spin_unlock(&dev_priv->resource_lock);
459 }
460
461 /**
462  * vmw_resource_check_buffer - Check whether a backup buffer is needed
463  *                             for a resource and in that case, allocate
464  *                             one, reserve and validate it.
465  *
466  * @ticket:         The ww aqcquire context to use, or NULL if trylocking.
467  * @res:            The resource for which to allocate a backup buffer.
468  * @interruptible:  Whether any sleeps during allocation should be
469  *                  performed while interruptible.
470  * @val_buf:        On successful return contains data about the
471  *                  reserved and validated backup buffer.
472  */
473 static int
474 vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
475                           struct vmw_resource *res,
476                           bool interruptible,
477                           struct ttm_validate_buffer *val_buf)
478 {
479         struct ttm_operation_ctx ctx = { true, false };
480         struct list_head val_list;
481         bool backup_dirty = false;
482         int ret;
483
484         if (unlikely(res->backup == NULL)) {
485                 ret = vmw_resource_buf_alloc(res, interruptible);
486                 if (unlikely(ret != 0))
487                         return ret;
488         }
489
490         INIT_LIST_HEAD(&val_list);
491         ttm_bo_get(&res->backup->base);
492         val_buf->bo = &res->backup->base;
493         val_buf->num_shared = 0;
494         list_add_tail(&val_buf->head, &val_list);
495         ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL,
496                                      true);
497         if (unlikely(ret != 0))
498                 goto out_no_reserve;
499
500         if (res->func->needs_backup && !vmw_resource_mob_attached(res))
501                 return 0;
502
503         backup_dirty = res->backup_dirty;
504         ret = ttm_bo_validate(&res->backup->base,
505                               res->func->backup_placement,
506                               &ctx);
507
508         if (unlikely(ret != 0))
509                 goto out_no_validate;
510
511         return 0;
512
513 out_no_validate:
514         ttm_eu_backoff_reservation(ticket, &val_list);
515 out_no_reserve:
516         ttm_bo_put(val_buf->bo);
517         val_buf->bo = NULL;
518         if (backup_dirty)
519                 vmw_bo_unreference(&res->backup);
520
521         return ret;
522 }
523
524 /**
525  * vmw_resource_reserve - Reserve a resource for command submission
526  *
527  * @res:            The resource to reserve.
528  *
529  * This function takes the resource off the LRU list and make sure
530  * a backup buffer is present for guest-backed resources. However,
531  * the buffer may not be bound to the resource at this point.
532  *
533  */
534 int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
535                          bool no_backup)
536 {
537         struct vmw_private *dev_priv = res->dev_priv;
538         int ret;
539
540         spin_lock(&dev_priv->resource_lock);
541         list_del_init(&res->lru_head);
542         spin_unlock(&dev_priv->resource_lock);
543
544         if (res->func->needs_backup && res->backup == NULL &&
545             !no_backup) {
546                 ret = vmw_resource_buf_alloc(res, interruptible);
547                 if (unlikely(ret != 0)) {
548                         DRM_ERROR("Failed to allocate a backup buffer "
549                                   "of size %lu. bytes\n",
550                                   (unsigned long) res->backup_size);
551                         return ret;
552                 }
553         }
554
555         return 0;
556 }
557
558 /**
559  * vmw_resource_backoff_reservation - Unreserve and unreference a
560  *                                    backup buffer
561  *.
562  * @ticket:         The ww acquire ctx used for reservation.
563  * @val_buf:        Backup buffer information.
564  */
565 static void
566 vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
567                                  struct ttm_validate_buffer *val_buf)
568 {
569         struct list_head val_list;
570
571         if (likely(val_buf->bo == NULL))
572                 return;
573
574         INIT_LIST_HEAD(&val_list);
575         list_add_tail(&val_buf->head, &val_list);
576         ttm_eu_backoff_reservation(ticket, &val_list);
577         ttm_bo_put(val_buf->bo);
578         val_buf->bo = NULL;
579 }
580
581 /**
582  * vmw_resource_do_evict - Evict a resource, and transfer its data
583  *                         to a backup buffer.
584  *
585  * @ticket:         The ww acquire ticket to use, or NULL if trylocking.
586  * @res:            The resource to evict.
587  * @interruptible:  Whether to wait interruptible.
588  */
589 static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
590                                  struct vmw_resource *res, bool interruptible)
591 {
592         struct ttm_validate_buffer val_buf;
593         const struct vmw_res_func *func = res->func;
594         int ret;
595
596         BUG_ON(!func->may_evict);
597
598         val_buf.bo = NULL;
599         val_buf.num_shared = 0;
600         ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
601         if (unlikely(ret != 0))
602                 return ret;
603
604         if (unlikely(func->unbind != NULL &&
605                      (!func->needs_backup || vmw_resource_mob_attached(res)))) {
606                 ret = func->unbind(res, res->res_dirty, &val_buf);
607                 if (unlikely(ret != 0))
608                         goto out_no_unbind;
609                 vmw_resource_mob_detach(res);
610         }
611         ret = func->destroy(res);
612         res->backup_dirty = true;
613         res->res_dirty = false;
614 out_no_unbind:
615         vmw_resource_backoff_reservation(ticket, &val_buf);
616
617         return ret;
618 }
619
620
621 /**
622  * vmw_resource_validate - Make a resource up-to-date and visible
623  *                         to the device.
624  * @res: The resource to make visible to the device.
625  * @intr: Perform waits interruptible if possible.
626  *
627  * On succesful return, any backup DMA buffer pointed to by @res->backup will
628  * be reserved and validated.
629  * On hardware resource shortage, this function will repeatedly evict
630  * resources of the same type until the validation succeeds.
631  *
632  * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
633  * on failure.
634  */
635 int vmw_resource_validate(struct vmw_resource *res, bool intr)
636 {
637         int ret;
638         struct vmw_resource *evict_res;
639         struct vmw_private *dev_priv = res->dev_priv;
640         struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
641         struct ttm_validate_buffer val_buf;
642         unsigned err_count = 0;
643
644         if (!res->func->create)
645                 return 0;
646
647         val_buf.bo = NULL;
648         val_buf.num_shared = 0;
649         if (res->backup)
650                 val_buf.bo = &res->backup->base;
651         do {
652                 ret = vmw_resource_do_validate(res, &val_buf);
653                 if (likely(ret != -EBUSY))
654                         break;
655
656                 spin_lock(&dev_priv->resource_lock);
657                 if (list_empty(lru_list) || !res->func->may_evict) {
658                         DRM_ERROR("Out of device device resources "
659                                   "for %s.\n", res->func->type_name);
660                         ret = -EBUSY;
661                         spin_unlock(&dev_priv->resource_lock);
662                         break;
663                 }
664
665                 evict_res = vmw_resource_reference
666                         (list_first_entry(lru_list, struct vmw_resource,
667                                           lru_head));
668                 list_del_init(&evict_res->lru_head);
669
670                 spin_unlock(&dev_priv->resource_lock);
671
672                 /* Trylock backup buffers with a NULL ticket. */
673                 ret = vmw_resource_do_evict(NULL, evict_res, intr);
674                 if (unlikely(ret != 0)) {
675                         spin_lock(&dev_priv->resource_lock);
676                         list_add_tail(&evict_res->lru_head, lru_list);
677                         spin_unlock(&dev_priv->resource_lock);
678                         if (ret == -ERESTARTSYS ||
679                             ++err_count > VMW_RES_EVICT_ERR_COUNT) {
680                                 vmw_resource_unreference(&evict_res);
681                                 goto out_no_validate;
682                         }
683                 }
684
685                 vmw_resource_unreference(&evict_res);
686         } while (1);
687
688         if (unlikely(ret != 0))
689                 goto out_no_validate;
690         else if (!res->func->needs_backup && res->backup) {
691                 WARN_ON_ONCE(vmw_resource_mob_attached(res));
692                 vmw_bo_unreference(&res->backup);
693         }
694
695         return 0;
696
697 out_no_validate:
698         return ret;
699 }
700
701
702 /**
703  * vmw_resource_unbind_list
704  *
705  * @vbo: Pointer to the current backing MOB.
706  *
707  * Evicts the Guest Backed hardware resource if the backup
708  * buffer is being moved out of MOB memory.
709  * Note that this function will not race with the resource
710  * validation code, since resource validation and eviction
711  * both require the backup buffer to be reserved.
712  */
713 void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
714 {
715
716         struct vmw_resource *res, *next;
717         struct ttm_validate_buffer val_buf = {
718                 .bo = &vbo->base,
719                 .num_shared = 0
720         };
721
722         dma_resv_assert_held(vbo->base.base.resv);
723         list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) {
724                 if (!res->func->unbind)
725                         continue;
726
727                 (void) res->func->unbind(res, res->res_dirty, &val_buf);
728                 res->backup_dirty = true;
729                 res->res_dirty = false;
730                 vmw_resource_mob_detach(res);
731         }
732
733         (void) ttm_bo_wait(&vbo->base, false, false);
734 }
735
736
737 /**
738  * vmw_query_readback_all - Read back cached query states
739  *
740  * @dx_query_mob: Buffer containing the DX query MOB
741  *
742  * Read back cached states from the device if they exist.  This function
743  * assumings binding_mutex is held.
744  */
745 int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
746 {
747         struct vmw_resource *dx_query_ctx;
748         struct vmw_private *dev_priv;
749         struct {
750                 SVGA3dCmdHeader header;
751                 SVGA3dCmdDXReadbackAllQuery body;
752         } *cmd;
753
754
755         /* No query bound, so do nothing */
756         if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
757                 return 0;
758
759         dx_query_ctx = dx_query_mob->dx_query_ctx;
760         dev_priv     = dx_query_ctx->dev_priv;
761
762         cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), dx_query_ctx->id);
763         if (unlikely(cmd == NULL))
764                 return -ENOMEM;
765
766         cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
767         cmd->header.size = sizeof(cmd->body);
768         cmd->body.cid    = dx_query_ctx->id;
769
770         vmw_fifo_commit(dev_priv, sizeof(*cmd));
771
772         /* Triggers a rebind the next time affected context is bound */
773         dx_query_mob->dx_query_ctx = NULL;
774
775         return 0;
776 }
777
778
779
780 /**
781  * vmw_query_move_notify - Read back cached query states
782  *
783  * @bo: The TTM buffer object about to move.
784  * @mem: The memory region @bo is moving to.
785  *
786  * Called before the query MOB is swapped out to read back cached query
787  * states from the device.
788  */
789 void vmw_query_move_notify(struct ttm_buffer_object *bo,
790                            struct ttm_mem_reg *mem)
791 {
792         struct vmw_buffer_object *dx_query_mob;
793         struct ttm_bo_device *bdev = bo->bdev;
794         struct vmw_private *dev_priv;
795
796
797         dev_priv = container_of(bdev, struct vmw_private, bdev);
798
799         mutex_lock(&dev_priv->binding_mutex);
800
801         dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
802         if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
803                 mutex_unlock(&dev_priv->binding_mutex);
804                 return;
805         }
806
807         /* If BO is being moved from MOB to system memory */
808         if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
809                 struct vmw_fence_obj *fence;
810
811                 (void) vmw_query_readback_all(dx_query_mob);
812                 mutex_unlock(&dev_priv->binding_mutex);
813
814                 /* Create a fence and attach the BO to it */
815                 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
816                 vmw_bo_fence_single(bo, fence);
817
818                 if (fence != NULL)
819                         vmw_fence_obj_unreference(&fence);
820
821                 (void) ttm_bo_wait(bo, false, false);
822         } else
823                 mutex_unlock(&dev_priv->binding_mutex);
824
825 }
826
827 /**
828  * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
829  *
830  * @res:            The resource being queried.
831  */
832 bool vmw_resource_needs_backup(const struct vmw_resource *res)
833 {
834         return res->func->needs_backup;
835 }
836
837 /**
838  * vmw_resource_evict_type - Evict all resources of a specific type
839  *
840  * @dev_priv:       Pointer to a device private struct
841  * @type:           The resource type to evict
842  *
843  * To avoid thrashing starvation or as part of the hibernation sequence,
844  * try to evict all evictable resources of a specific type.
845  */
846 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
847                                     enum vmw_res_type type)
848 {
849         struct list_head *lru_list = &dev_priv->res_lru[type];
850         struct vmw_resource *evict_res;
851         unsigned err_count = 0;
852         int ret;
853         struct ww_acquire_ctx ticket;
854
855         do {
856                 spin_lock(&dev_priv->resource_lock);
857
858                 if (list_empty(lru_list))
859                         goto out_unlock;
860
861                 evict_res = vmw_resource_reference(
862                         list_first_entry(lru_list, struct vmw_resource,
863                                          lru_head));
864                 list_del_init(&evict_res->lru_head);
865                 spin_unlock(&dev_priv->resource_lock);
866
867                 /* Wait lock backup buffers with a ticket. */
868                 ret = vmw_resource_do_evict(&ticket, evict_res, false);
869                 if (unlikely(ret != 0)) {
870                         spin_lock(&dev_priv->resource_lock);
871                         list_add_tail(&evict_res->lru_head, lru_list);
872                         spin_unlock(&dev_priv->resource_lock);
873                         if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
874                                 vmw_resource_unreference(&evict_res);
875                                 return;
876                         }
877                 }
878
879                 vmw_resource_unreference(&evict_res);
880         } while (1);
881
882 out_unlock:
883         spin_unlock(&dev_priv->resource_lock);
884 }
885
886 /**
887  * vmw_resource_evict_all - Evict all evictable resources
888  *
889  * @dev_priv:       Pointer to a device private struct
890  *
891  * To avoid thrashing starvation or as part of the hibernation sequence,
892  * evict all evictable resources. In particular this means that all
893  * guest-backed resources that are registered with the device are
894  * evicted and the OTable becomes clean.
895  */
896 void vmw_resource_evict_all(struct vmw_private *dev_priv)
897 {
898         enum vmw_res_type type;
899
900         mutex_lock(&dev_priv->cmdbuf_mutex);
901
902         for (type = 0; type < vmw_res_max; ++type)
903                 vmw_resource_evict_type(dev_priv, type);
904
905         mutex_unlock(&dev_priv->cmdbuf_mutex);
906 }
907
908 /**
909  * vmw_resource_pin - Add a pin reference on a resource
910  *
911  * @res: The resource to add a pin reference on
912  *
913  * This function adds a pin reference, and if needed validates the resource.
914  * Having a pin reference means that the resource can never be evicted, and
915  * its id will never change as long as there is a pin reference.
916  * This function returns 0 on success and a negative error code on failure.
917  */
918 int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
919 {
920         struct ttm_operation_ctx ctx = { interruptible, false };
921         struct vmw_private *dev_priv = res->dev_priv;
922         int ret;
923
924         ttm_write_lock(&dev_priv->reservation_sem, interruptible);
925         mutex_lock(&dev_priv->cmdbuf_mutex);
926         ret = vmw_resource_reserve(res, interruptible, false);
927         if (ret)
928                 goto out_no_reserve;
929
930         if (res->pin_count == 0) {
931                 struct vmw_buffer_object *vbo = NULL;
932
933                 if (res->backup) {
934                         vbo = res->backup;
935
936                         ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
937                         if (!vbo->pin_count) {
938                                 ret = ttm_bo_validate
939                                         (&vbo->base,
940                                          res->func->backup_placement,
941                                          &ctx);
942                                 if (ret) {
943                                         ttm_bo_unreserve(&vbo->base);
944                                         goto out_no_validate;
945                                 }
946                         }
947
948                         /* Do we really need to pin the MOB as well? */
949                         vmw_bo_pin_reserved(vbo, true);
950                 }
951                 ret = vmw_resource_validate(res, interruptible);
952                 if (vbo)
953                         ttm_bo_unreserve(&vbo->base);
954                 if (ret)
955                         goto out_no_validate;
956         }
957         res->pin_count++;
958
959 out_no_validate:
960         vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
961 out_no_reserve:
962         mutex_unlock(&dev_priv->cmdbuf_mutex);
963         ttm_write_unlock(&dev_priv->reservation_sem);
964
965         return ret;
966 }
967
968 /**
969  * vmw_resource_unpin - Remove a pin reference from a resource
970  *
971  * @res: The resource to remove a pin reference from
972  *
973  * Having a pin reference means that the resource can never be evicted, and
974  * its id will never change as long as there is a pin reference.
975  */
976 void vmw_resource_unpin(struct vmw_resource *res)
977 {
978         struct vmw_private *dev_priv = res->dev_priv;
979         int ret;
980
981         (void) ttm_read_lock(&dev_priv->reservation_sem, false);
982         mutex_lock(&dev_priv->cmdbuf_mutex);
983
984         ret = vmw_resource_reserve(res, false, true);
985         WARN_ON(ret);
986
987         WARN_ON(res->pin_count == 0);
988         if (--res->pin_count == 0 && res->backup) {
989                 struct vmw_buffer_object *vbo = res->backup;
990
991                 (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
992                 vmw_bo_pin_reserved(vbo, false);
993                 ttm_bo_unreserve(&vbo->base);
994         }
995
996         vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
997
998         mutex_unlock(&dev_priv->cmdbuf_mutex);
999         ttm_read_unlock(&dev_priv->reservation_sem);
1000 }
1001
1002 /**
1003  * vmw_res_type - Return the resource type
1004  *
1005  * @res: Pointer to the resource
1006  */
1007 enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1008 {
1009         return res->func->res_type;
1010 }