]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/ttm/ttm_execbuf_util.c
a17645f705c7458d347f4a19d175f2ead2b4bbc2
[linux.git] / drivers / gpu / drm / ttm / ttm_execbuf_util.c
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28
29 #include <drm/ttm/ttm_execbuf_util.h>
30 #include <drm/ttm/ttm_bo_driver.h>
31 #include <drm/ttm/ttm_placement.h>
32 #include <linux/wait.h>
33 #include <linux/sched.h>
34 #include <linux/module.h>
35
36 static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
37                                               struct ttm_validate_buffer *entry)
38 {
39         list_for_each_entry_continue_reverse(entry, list, head) {
40                 struct ttm_buffer_object *bo = entry->bo;
41
42                 dma_resv_unlock(bo->base.resv);
43         }
44 }
45
46 void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
47                                 struct list_head *list)
48 {
49         struct ttm_validate_buffer *entry;
50         struct ttm_bo_global *glob;
51
52         if (list_empty(list))
53                 return;
54
55         entry = list_first_entry(list, struct ttm_validate_buffer, head);
56         glob = entry->bo->bdev->glob;
57
58         spin_lock(&glob->lru_lock);
59         list_for_each_entry(entry, list, head) {
60                 struct ttm_buffer_object *bo = entry->bo;
61
62                 ttm_bo_move_to_lru_tail(bo, NULL);
63                 dma_resv_unlock(bo->base.resv);
64         }
65         spin_unlock(&glob->lru_lock);
66
67         if (ticket)
68                 ww_acquire_fini(ticket);
69 }
70 EXPORT_SYMBOL(ttm_eu_backoff_reservation);
71
72 /*
73  * Reserve buffers for validation.
74  *
75  * If a buffer in the list is marked for CPU access, we back off and
76  * wait for that buffer to become free for GPU access.
77  *
78  * If a buffer is reserved for another validation, the validator with
79  * the highest validation sequence backs off and waits for that buffer
80  * to become unreserved. This prevents deadlocks when validating multiple
81  * buffers in different orders.
82  */
83
84 int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
85                            struct list_head *list, bool intr,
86                            struct list_head *dups)
87 {
88         struct ttm_bo_global *glob;
89         struct ttm_validate_buffer *entry;
90         int ret;
91
92         if (list_empty(list))
93                 return 0;
94
95         entry = list_first_entry(list, struct ttm_validate_buffer, head);
96         glob = entry->bo->bdev->glob;
97
98         if (ticket)
99                 ww_acquire_init(ticket, &reservation_ww_class);
100
101         list_for_each_entry(entry, list, head) {
102                 struct ttm_buffer_object *bo = entry->bo;
103
104                 ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
105                 if (ret == -EALREADY && dups) {
106                         struct ttm_validate_buffer *safe = entry;
107                         entry = list_prev_entry(entry, head);
108                         list_del(&safe->head);
109                         list_add(&safe->head, dups);
110                         continue;
111                 }
112
113                 if (!ret) {
114                         if (!entry->num_shared)
115                                 continue;
116
117                         ret = dma_resv_reserve_shared(bo->base.resv,
118                                                                 entry->num_shared);
119                         if (!ret)
120                                 continue;
121                 }
122
123                 /* uh oh, we lost out, drop every reservation and try
124                  * to only reserve this buffer, then start over if
125                  * this succeeds.
126                  */
127                 ttm_eu_backoff_reservation_reverse(list, entry);
128
129                 if (ret == -EDEADLK) {
130                         if (intr) {
131                                 ret = dma_resv_lock_slow_interruptible(bo->base.resv,
132                                                                                  ticket);
133                         } else {
134                                 dma_resv_lock_slow(bo->base.resv, ticket);
135                                 ret = 0;
136                         }
137                 }
138
139                 if (!ret && entry->num_shared)
140                         ret = dma_resv_reserve_shared(bo->base.resv,
141                                                                 entry->num_shared);
142
143                 if (unlikely(ret != 0)) {
144                         if (ret == -EINTR)
145                                 ret = -ERESTARTSYS;
146                         if (ticket) {
147                                 ww_acquire_done(ticket);
148                                 ww_acquire_fini(ticket);
149                         }
150                         return ret;
151                 }
152
153                 /* move this item to the front of the list,
154                  * forces correct iteration of the loop without keeping track
155                  */
156                 list_del(&entry->head);
157                 list_add(&entry->head, list);
158         }
159
160         return 0;
161 }
162 EXPORT_SYMBOL(ttm_eu_reserve_buffers);
163
164 void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
165                                  struct list_head *list,
166                                  struct dma_fence *fence)
167 {
168         struct ttm_validate_buffer *entry;
169         struct ttm_buffer_object *bo;
170         struct ttm_bo_global *glob;
171
172         if (list_empty(list))
173                 return;
174
175         bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
176         glob = bo->bdev->glob;
177
178         spin_lock(&glob->lru_lock);
179
180         list_for_each_entry(entry, list, head) {
181                 bo = entry->bo;
182                 if (entry->num_shared)
183                         dma_resv_add_shared_fence(bo->base.resv, fence);
184                 else
185                         dma_resv_add_excl_fence(bo->base.resv, fence);
186                 ttm_bo_move_to_lru_tail(bo, NULL);
187                 dma_resv_unlock(bo->base.resv);
188         }
189         spin_unlock(&glob->lru_lock);
190         if (ticket)
191                 ww_acquire_fini(ticket);
192 }
193 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);