]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
drm/amdgpu: remove fence fallback
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ring.h
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Christian König
23  */
24 #ifndef __AMDGPU_RING_H__
25 #define __AMDGPU_RING_H__
26
27 #include <drm/amdgpu_drm.h>
28 #include <drm/gpu_scheduler.h>
29 #include <drm/drm_print.h>
30
31 /* max number of rings */
32 #define AMDGPU_MAX_RINGS                21
33 #define AMDGPU_MAX_GFX_RINGS            1
34 #define AMDGPU_MAX_COMPUTE_RINGS        8
35 #define AMDGPU_MAX_VCE_RINGS            3
36 #define AMDGPU_MAX_UVD_ENC_RINGS        2
37
38 /* some special values for the owner field */
39 #define AMDGPU_FENCE_OWNER_UNDEFINED    ((void *)0ul)
40 #define AMDGPU_FENCE_OWNER_VM           ((void *)1ul)
41 #define AMDGPU_FENCE_OWNER_KFD          ((void *)2ul)
42
43 #define AMDGPU_FENCE_FLAG_64BIT         (1 << 0)
44 #define AMDGPU_FENCE_FLAG_INT           (1 << 1)
45 #define AMDGPU_FENCE_FLAG_TC_WB_ONLY    (1 << 2)
46
47 #define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
48
49 enum amdgpu_ring_type {
50         AMDGPU_RING_TYPE_GFX,
51         AMDGPU_RING_TYPE_COMPUTE,
52         AMDGPU_RING_TYPE_SDMA,
53         AMDGPU_RING_TYPE_UVD,
54         AMDGPU_RING_TYPE_VCE,
55         AMDGPU_RING_TYPE_KIQ,
56         AMDGPU_RING_TYPE_UVD_ENC,
57         AMDGPU_RING_TYPE_VCN_DEC,
58         AMDGPU_RING_TYPE_VCN_ENC,
59         AMDGPU_RING_TYPE_VCN_JPEG
60 };
61
62 struct amdgpu_device;
63 struct amdgpu_ring;
64 struct amdgpu_ib;
65 struct amdgpu_cs_parser;
66 struct amdgpu_job;
67
68 /*
69  * Fences.
70  */
71 struct amdgpu_fence_driver {
72         uint64_t                        gpu_addr;
73         volatile uint32_t               *cpu_addr;
74         /* sync_seq is protected by ring emission lock */
75         uint32_t                        sync_seq;
76         atomic_t                        last_seq;
77         bool                            initialized;
78         struct amdgpu_irq_src           *irq_src;
79         unsigned                        irq_type;
80         unsigned                        num_fences_mask;
81         spinlock_t                      lock;
82         struct dma_fence                **fences;
83 };
84
85 int amdgpu_fence_driver_init(struct amdgpu_device *adev);
86 void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
87 void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
88
89 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
90                                   unsigned num_hw_submission);
91 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
92                                    struct amdgpu_irq_src *irq_src,
93                                    unsigned irq_type);
94 void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
95 void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
96 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence,
97                       unsigned flags);
98 int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s);
99 void amdgpu_fence_process(struct amdgpu_ring *ring);
100 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
101 signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
102                                       uint32_t wait_seq,
103                                       signed long timeout);
104 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
105
106 /*
107  * Rings.
108  */
109
110 /* provided by hw blocks that expose a ring buffer for commands */
111 struct amdgpu_ring_funcs {
112         enum amdgpu_ring_type   type;
113         uint32_t                align_mask;
114         u32                     nop;
115         bool                    support_64bit_ptrs;
116         unsigned                vmhub;
117         unsigned                extra_dw;
118
119         /* ring read/write ptr handling */
120         u64 (*get_rptr)(struct amdgpu_ring *ring);
121         u64 (*get_wptr)(struct amdgpu_ring *ring);
122         void (*set_wptr)(struct amdgpu_ring *ring);
123         /* validating and patching of IBs */
124         int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
125         int (*patch_cs_in_place)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
126         /* constants to calculate how many DW are needed for an emit */
127         unsigned emit_frame_size;
128         unsigned emit_ib_size;
129         /* command emit functions */
130         void (*emit_ib)(struct amdgpu_ring *ring,
131                         struct amdgpu_ib *ib,
132                         unsigned vmid, bool ctx_switch);
133         void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
134                            uint64_t seq, unsigned flags);
135         void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
136         void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vmid,
137                               uint64_t pd_addr);
138         void (*emit_hdp_flush)(struct amdgpu_ring *ring);
139         void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
140                                 uint32_t gds_base, uint32_t gds_size,
141                                 uint32_t gws_base, uint32_t gws_size,
142                                 uint32_t oa_base, uint32_t oa_size);
143         /* testing functions */
144         int (*test_ring)(struct amdgpu_ring *ring);
145         int (*test_ib)(struct amdgpu_ring *ring, long timeout);
146         /* insert NOP packets */
147         void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
148         void (*insert_start)(struct amdgpu_ring *ring);
149         void (*insert_end)(struct amdgpu_ring *ring);
150         /* pad the indirect buffer to the necessary number of dw */
151         void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
152         unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
153         void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
154         /* note usage for clock and power gating */
155         void (*begin_use)(struct amdgpu_ring *ring);
156         void (*end_use)(struct amdgpu_ring *ring);
157         void (*emit_switch_buffer) (struct amdgpu_ring *ring);
158         void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
159         void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg);
160         void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
161         void (*emit_reg_wait)(struct amdgpu_ring *ring, uint32_t reg,
162                               uint32_t val, uint32_t mask);
163         void (*emit_reg_write_reg_wait)(struct amdgpu_ring *ring,
164                                         uint32_t reg0, uint32_t reg1,
165                                         uint32_t ref, uint32_t mask);
166         void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
167         /* priority functions */
168         void (*set_priority) (struct amdgpu_ring *ring,
169                               enum drm_sched_priority priority);
170         /* Try to soft recover the ring to make the fence signal */
171         void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);
172 };
173
174 struct amdgpu_ring {
175         struct amdgpu_device            *adev;
176         const struct amdgpu_ring_funcs  *funcs;
177         struct amdgpu_fence_driver      fence_drv;
178         struct drm_gpu_scheduler        sched;
179
180         struct amdgpu_bo        *ring_obj;
181         volatile uint32_t       *ring;
182         unsigned                rptr_offs;
183         u64                     wptr;
184         u64                     wptr_old;
185         unsigned                ring_size;
186         unsigned                max_dw;
187         int                     count_dw;
188         uint64_t                gpu_addr;
189         uint64_t                ptr_mask;
190         uint32_t                buf_mask;
191         bool                    ready;
192         u32                     idx;
193         u32                     me;
194         u32                     pipe;
195         u32                     queue;
196         struct amdgpu_bo        *mqd_obj;
197         uint64_t                mqd_gpu_addr;
198         void                    *mqd_ptr;
199         uint64_t                eop_gpu_addr;
200         u32                     doorbell_index;
201         bool                    use_doorbell;
202         bool                    use_pollmem;
203         unsigned                wptr_offs;
204         unsigned                fence_offs;
205         uint64_t                current_ctx;
206         char                    name[16];
207         unsigned                cond_exe_offs;
208         u64                     cond_exe_gpu_addr;
209         volatile u32            *cond_exe_cpu_addr;
210         unsigned                vm_inv_eng;
211         struct dma_fence        *vmid_wait;
212         bool                    has_compute_vm_bug;
213
214         atomic_t                num_jobs[DRM_SCHED_PRIORITY_MAX];
215         struct mutex            priority_mutex;
216         /* protected by priority_mutex */
217         int                     priority;
218
219 #if defined(CONFIG_DEBUG_FS)
220         struct dentry *ent;
221 #endif
222 };
223
224 #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
225 #define amdgpu_ring_patch_cs_in_place(r, p, ib) ((r)->funcs->patch_cs_in_place((p), (ib)))
226 #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
227 #define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
228 #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
229 #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
230 #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
231 #define amdgpu_ring_emit_ib(r, ib, vmid, c) (r)->funcs->emit_ib((r), (ib), (vmid), (c))
232 #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
233 #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
234 #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
235 #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
236 #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
237 #define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
238 #define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
239 #define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d))
240 #define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
241 #define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
242 #define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m))
243 #define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b))
244 #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
245 #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
246 #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
247
248 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
249 void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
250 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
251 void amdgpu_ring_commit(struct amdgpu_ring *ring);
252 void amdgpu_ring_undo(struct amdgpu_ring *ring);
253 void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
254                               enum drm_sched_priority priority);
255 void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
256                               enum drm_sched_priority priority);
257 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
258                      unsigned ring_size, struct amdgpu_irq_src *irq_src,
259                      unsigned irq_type);
260 void amdgpu_ring_fini(struct amdgpu_ring *ring);
261 void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
262                                                 uint32_t reg0, uint32_t val0,
263                                                 uint32_t reg1, uint32_t val1);
264 bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
265                                struct dma_fence *fence);
266
267 static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
268 {
269         int i = 0;
270         while (i <= ring->buf_mask)
271                 ring->ring[i++] = ring->funcs->nop;
272
273 }
274
275 static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
276 {
277         if (ring->count_dw <= 0)
278                 DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
279         ring->ring[ring->wptr++ & ring->buf_mask] = v;
280         ring->wptr &= ring->ptr_mask;
281         ring->count_dw--;
282 }
283
284 static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
285                                               void *src, int count_dw)
286 {
287         unsigned occupied, chunk1, chunk2;
288         void *dst;
289
290         if (unlikely(ring->count_dw < count_dw))
291                 DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
292
293         occupied = ring->wptr & ring->buf_mask;
294         dst = (void *)&ring->ring[occupied];
295         chunk1 = ring->buf_mask + 1 - occupied;
296         chunk1 = (chunk1 >= count_dw) ? count_dw: chunk1;
297         chunk2 = count_dw - chunk1;
298         chunk1 <<= 2;
299         chunk2 <<= 2;
300
301         if (chunk1)
302                 memcpy(dst, src, chunk1);
303
304         if (chunk2) {
305                 src += chunk1;
306                 dst = (void *)ring->ring;
307                 memcpy(dst, src, chunk2);
308         }
309
310         ring->wptr += count_dw;
311         ring->wptr &= ring->ptr_mask;
312         ring->count_dw -= count_dw;
313 }
314
315 #endif