]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/etnaviv/etnaviv_buffer.c
drm/etnaviv: drop use of drmP.h
[linux.git] / drivers / gpu / drm / etnaviv / etnaviv_buffer.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2014-2018 Etnaviv Project
4  */
5
6 #include <drm/drm_drv.h>
7
8 #include "etnaviv_cmdbuf.h"
9 #include "etnaviv_gpu.h"
10 #include "etnaviv_gem.h"
11 #include "etnaviv_mmu.h"
12
13 #include "common.xml.h"
14 #include "state.xml.h"
15 #include "state_hi.xml.h"
16 #include "state_3d.xml.h"
17 #include "cmdstream.xml.h"
18
19 /*
20  * Command Buffer helper:
21  */
22
23
24 static inline void OUT(struct etnaviv_cmdbuf *buffer, u32 data)
25 {
26         u32 *vaddr = (u32 *)buffer->vaddr;
27
28         BUG_ON(buffer->user_size >= buffer->size);
29
30         vaddr[buffer->user_size / 4] = data;
31         buffer->user_size += 4;
32 }
33
34 static inline void CMD_LOAD_STATE(struct etnaviv_cmdbuf *buffer,
35         u32 reg, u32 value)
36 {
37         u32 index = reg >> VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR;
38
39         buffer->user_size = ALIGN(buffer->user_size, 8);
40
41         /* write a register via cmd stream */
42         OUT(buffer, VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE |
43                     VIV_FE_LOAD_STATE_HEADER_COUNT(1) |
44                     VIV_FE_LOAD_STATE_HEADER_OFFSET(index));
45         OUT(buffer, value);
46 }
47
48 static inline void CMD_END(struct etnaviv_cmdbuf *buffer)
49 {
50         buffer->user_size = ALIGN(buffer->user_size, 8);
51
52         OUT(buffer, VIV_FE_END_HEADER_OP_END);
53 }
54
55 static inline void CMD_WAIT(struct etnaviv_cmdbuf *buffer)
56 {
57         buffer->user_size = ALIGN(buffer->user_size, 8);
58
59         OUT(buffer, VIV_FE_WAIT_HEADER_OP_WAIT | 200);
60 }
61
62 static inline void CMD_LINK(struct etnaviv_cmdbuf *buffer,
63         u16 prefetch, u32 address)
64 {
65         buffer->user_size = ALIGN(buffer->user_size, 8);
66
67         OUT(buffer, VIV_FE_LINK_HEADER_OP_LINK |
68                     VIV_FE_LINK_HEADER_PREFETCH(prefetch));
69         OUT(buffer, address);
70 }
71
72 static inline void CMD_STALL(struct etnaviv_cmdbuf *buffer,
73         u32 from, u32 to)
74 {
75         buffer->user_size = ALIGN(buffer->user_size, 8);
76
77         OUT(buffer, VIV_FE_STALL_HEADER_OP_STALL);
78         OUT(buffer, VIV_FE_STALL_TOKEN_FROM(from) | VIV_FE_STALL_TOKEN_TO(to));
79 }
80
81 static inline void CMD_SEM(struct etnaviv_cmdbuf *buffer, u32 from, u32 to)
82 {
83         CMD_LOAD_STATE(buffer, VIVS_GL_SEMAPHORE_TOKEN,
84                        VIVS_GL_SEMAPHORE_TOKEN_FROM(from) |
85                        VIVS_GL_SEMAPHORE_TOKEN_TO(to));
86 }
87
88 static void etnaviv_cmd_select_pipe(struct etnaviv_gpu *gpu,
89         struct etnaviv_cmdbuf *buffer, u8 pipe)
90 {
91         u32 flush = 0;
92
93         lockdep_assert_held(&gpu->lock);
94
95         /*
96          * This assumes that if we're switching to 2D, we're switching
97          * away from 3D, and vice versa.  Hence, if we're switching to
98          * the 2D core, we need to flush the 3D depth and color caches,
99          * otherwise we need to flush the 2D pixel engine cache.
100          */
101         if (gpu->exec_state == ETNA_PIPE_2D)
102                 flush = VIVS_GL_FLUSH_CACHE_PE2D;
103         else if (gpu->exec_state == ETNA_PIPE_3D)
104                 flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR;
105
106         CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
107         CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
108         CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
109
110         CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
111                        VIVS_GL_PIPE_SELECT_PIPE(pipe));
112 }
113
114 static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
115         struct etnaviv_cmdbuf *buf, u32 off, u32 len)
116 {
117         u32 size = buf->size;
118         u32 *ptr = buf->vaddr + off;
119
120         dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n",
121                         ptr, etnaviv_cmdbuf_get_va(buf) + off, size - len * 4 - off);
122
123         print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
124                         ptr, len * 4, 0);
125 }
126
127 /*
128  * Safely replace the WAIT of a waitlink with a new command and argument.
129  * The GPU may be executing this WAIT while we're modifying it, so we have
130  * to write it in a specific order to avoid the GPU branching to somewhere
131  * else.  'wl_offset' is the offset to the first byte of the WAIT command.
132  */
133 static void etnaviv_buffer_replace_wait(struct etnaviv_cmdbuf *buffer,
134         unsigned int wl_offset, u32 cmd, u32 arg)
135 {
136         u32 *lw = buffer->vaddr + wl_offset;
137
138         lw[1] = arg;
139         mb();
140         lw[0] = cmd;
141         mb();
142 }
143
144 /*
145  * Ensure that there is space in the command buffer to contiguously write
146  * 'cmd_dwords' 64-bit words into the buffer, wrapping if necessary.
147  */
148 static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu,
149         struct etnaviv_cmdbuf *buffer, unsigned int cmd_dwords)
150 {
151         if (buffer->user_size + cmd_dwords * sizeof(u64) > buffer->size)
152                 buffer->user_size = 0;
153
154         return etnaviv_cmdbuf_get_va(buffer) + buffer->user_size;
155 }
156
157 u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
158 {
159         struct etnaviv_cmdbuf *buffer = &gpu->buffer;
160
161         lockdep_assert_held(&gpu->lock);
162
163         /* initialize buffer */
164         buffer->user_size = 0;
165
166         CMD_WAIT(buffer);
167         CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) +
168                  buffer->user_size - 4);
169
170         return buffer->user_size / 8;
171 }
172
173 u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr)
174 {
175         struct etnaviv_cmdbuf *buffer = &gpu->buffer;
176
177         lockdep_assert_held(&gpu->lock);
178
179         buffer->user_size = 0;
180
181         if (gpu->identity.features & chipFeatures_PIPE_3D) {
182                 CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
183                                VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_3D));
184                 CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
185                         mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
186                 CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
187                 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
188                 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
189         }
190
191         if (gpu->identity.features & chipFeatures_PIPE_2D) {
192                 CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
193                                VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_2D));
194                 CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
195                         mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
196                 CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
197                 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
198                 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
199         }
200
201         CMD_END(buffer);
202
203         buffer->user_size = ALIGN(buffer->user_size, 8);
204
205         return buffer->user_size / 8;
206 }
207
208 u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu)
209 {
210         struct etnaviv_cmdbuf *buffer = &gpu->buffer;
211
212         lockdep_assert_held(&gpu->lock);
213
214         buffer->user_size = 0;
215
216         CMD_LOAD_STATE(buffer, VIVS_MMUv2_PTA_CONFIG,
217                        VIVS_MMUv2_PTA_CONFIG_INDEX(0));
218
219         CMD_END(buffer);
220
221         buffer->user_size = ALIGN(buffer->user_size, 8);
222
223         return buffer->user_size / 8;
224 }
225
226 void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
227 {
228         struct etnaviv_cmdbuf *buffer = &gpu->buffer;
229         unsigned int waitlink_offset = buffer->user_size - 16;
230         u32 link_target, flush = 0;
231
232         lockdep_assert_held(&gpu->lock);
233
234         if (gpu->exec_state == ETNA_PIPE_2D)
235                 flush = VIVS_GL_FLUSH_CACHE_PE2D;
236         else if (gpu->exec_state == ETNA_PIPE_3D)
237                 flush = VIVS_GL_FLUSH_CACHE_DEPTH |
238                         VIVS_GL_FLUSH_CACHE_COLOR |
239                         VIVS_GL_FLUSH_CACHE_TEXTURE |
240                         VIVS_GL_FLUSH_CACHE_TEXTUREVS |
241                         VIVS_GL_FLUSH_CACHE_SHADER_L2;
242
243         if (flush) {
244                 unsigned int dwords = 7;
245
246                 link_target = etnaviv_buffer_reserve(gpu, buffer, dwords);
247
248                 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
249                 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
250                 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
251                 if (gpu->exec_state == ETNA_PIPE_3D)
252                         CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
253                                        VIVS_TS_FLUSH_CACHE_FLUSH);
254                 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
255                 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
256                 CMD_END(buffer);
257
258                 etnaviv_buffer_replace_wait(buffer, waitlink_offset,
259                                             VIV_FE_LINK_HEADER_OP_LINK |
260                                             VIV_FE_LINK_HEADER_PREFETCH(dwords),
261                                             link_target);
262         } else {
263                 /* Replace the last link-wait with an "END" command */
264                 etnaviv_buffer_replace_wait(buffer, waitlink_offset,
265                                             VIV_FE_END_HEADER_OP_END, 0);
266         }
267 }
268
269 /* Append a 'sync point' to the ring buffer. */
270 void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event)
271 {
272         struct etnaviv_cmdbuf *buffer = &gpu->buffer;
273         unsigned int waitlink_offset = buffer->user_size - 16;
274         u32 dwords, target;
275
276         lockdep_assert_held(&gpu->lock);
277
278         /*
279          * We need at most 3 dwords in the return target:
280          * 1 event + 1 end + 1 wait + 1 link.
281          */
282         dwords = 4;
283         target = etnaviv_buffer_reserve(gpu, buffer, dwords);
284
285         /* Signal sync point event */
286         CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
287                        VIVS_GL_EVENT_FROM_PE);
288
289         /* Stop the FE to 'pause' the GPU */
290         CMD_END(buffer);
291
292         /* Append waitlink */
293         CMD_WAIT(buffer);
294         CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) +
295                             buffer->user_size - 4);
296
297         /*
298          * Kick off the 'sync point' command by replacing the previous
299          * WAIT with a link to the address in the ring buffer.
300          */
301         etnaviv_buffer_replace_wait(buffer, waitlink_offset,
302                                     VIV_FE_LINK_HEADER_OP_LINK |
303                                     VIV_FE_LINK_HEADER_PREFETCH(dwords),
304                                     target);
305 }
306
307 /* Append a command buffer to the ring buffer. */
308 void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
309         unsigned int event, struct etnaviv_cmdbuf *cmdbuf)
310 {
311         struct etnaviv_cmdbuf *buffer = &gpu->buffer;
312         unsigned int waitlink_offset = buffer->user_size - 16;
313         u32 return_target, return_dwords;
314         u32 link_target, link_dwords;
315         bool switch_context = gpu->exec_state != exec_state;
316
317         lockdep_assert_held(&gpu->lock);
318
319         if (drm_debug & DRM_UT_DRIVER)
320                 etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
321
322         link_target = etnaviv_cmdbuf_get_va(cmdbuf);
323         link_dwords = cmdbuf->size / 8;
324
325         /*
326          * If we need maintanence prior to submitting this buffer, we will
327          * need to append a mmu flush load state, followed by a new
328          * link to this buffer - a total of four additional words.
329          */
330         if (gpu->mmu->need_flush || switch_context) {
331                 u32 target, extra_dwords;
332
333                 /* link command */
334                 extra_dwords = 1;
335
336                 /* flush command */
337                 if (gpu->mmu->need_flush) {
338                         if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
339                                 extra_dwords += 1;
340                         else
341                                 extra_dwords += 3;
342                 }
343
344                 /* pipe switch commands */
345                 if (switch_context)
346                         extra_dwords += 4;
347
348                 target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords);
349
350                 if (gpu->mmu->need_flush) {
351                         /* Add the MMU flush */
352                         if (gpu->mmu->version == ETNAVIV_IOMMU_V1) {
353                                 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
354                                                VIVS_GL_FLUSH_MMU_FLUSH_FEMMU |
355                                                VIVS_GL_FLUSH_MMU_FLUSH_UNK1 |
356                                                VIVS_GL_FLUSH_MMU_FLUSH_UNK2 |
357                                                VIVS_GL_FLUSH_MMU_FLUSH_PEMMU |
358                                                VIVS_GL_FLUSH_MMU_FLUSH_UNK4);
359                         } else {
360                                 CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
361                                         VIVS_MMUv2_CONFIGURATION_MODE_MASK |
362                                         VIVS_MMUv2_CONFIGURATION_ADDRESS_MASK |
363                                         VIVS_MMUv2_CONFIGURATION_FLUSH_FLUSH);
364                                 CMD_SEM(buffer, SYNC_RECIPIENT_FE,
365                                         SYNC_RECIPIENT_PE);
366                                 CMD_STALL(buffer, SYNC_RECIPIENT_FE,
367                                         SYNC_RECIPIENT_PE);
368                         }
369
370                         gpu->mmu->need_flush = false;
371                 }
372
373                 if (switch_context) {
374                         etnaviv_cmd_select_pipe(gpu, buffer, exec_state);
375                         gpu->exec_state = exec_state;
376                 }
377
378                 /* And the link to the submitted buffer */
379                 CMD_LINK(buffer, link_dwords, link_target);
380
381                 /* Update the link target to point to above instructions */
382                 link_target = target;
383                 link_dwords = extra_dwords;
384         }
385
386         /*
387          * Append a LINK to the submitted command buffer to return to
388          * the ring buffer.  return_target is the ring target address.
389          * We need at most 7 dwords in the return target: 2 cache flush +
390          * 2 semaphore stall + 1 event + 1 wait + 1 link.
391          */
392         return_dwords = 7;
393         return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords);
394         CMD_LINK(cmdbuf, return_dwords, return_target);
395
396         /*
397          * Append a cache flush, stall, event, wait and link pointing back to
398          * the wait command to the ring buffer.
399          */
400         if (gpu->exec_state == ETNA_PIPE_2D) {
401                 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
402                                        VIVS_GL_FLUSH_CACHE_PE2D);
403         } else {
404                 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
405                                        VIVS_GL_FLUSH_CACHE_DEPTH |
406                                        VIVS_GL_FLUSH_CACHE_COLOR);
407                 CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
408                                        VIVS_TS_FLUSH_CACHE_FLUSH);
409         }
410         CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
411         CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
412         CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
413                        VIVS_GL_EVENT_FROM_PE);
414         CMD_WAIT(buffer);
415         CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) +
416                             buffer->user_size - 4);
417
418         if (drm_debug & DRM_UT_DRIVER)
419                 pr_info("stream link to 0x%08x @ 0x%08x %p\n",
420                         return_target, etnaviv_cmdbuf_get_va(cmdbuf),
421                         cmdbuf->vaddr);
422
423         if (drm_debug & DRM_UT_DRIVER) {
424                 print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
425                                cmdbuf->vaddr, cmdbuf->size, 0);
426
427                 pr_info("link op: %p\n", buffer->vaddr + waitlink_offset);
428                 pr_info("addr: 0x%08x\n", link_target);
429                 pr_info("back: 0x%08x\n", return_target);
430                 pr_info("event: %d\n", event);
431         }
432
433         /*
434          * Kick off the submitted command by replacing the previous
435          * WAIT with a link to the address in the ring buffer.
436          */
437         etnaviv_buffer_replace_wait(buffer, waitlink_offset,
438                                     VIV_FE_LINK_HEADER_OP_LINK |
439                                     VIV_FE_LINK_HEADER_PREFETCH(link_dwords),
440                                     link_target);
441
442         if (drm_debug & DRM_UT_DRIVER)
443                 etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
444 }