]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/vc4/vc4_kms.c
drm/vc4: drop use of drmP.h
[linux.git] / drivers / gpu / drm / vc4 / vc4_kms.c
1 /*
2  * Copyright (C) 2015 Broadcom
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8
9 /**
10  * DOC: VC4 KMS
11  *
12  * This is the general code for implementing KMS mode setting that
13  * doesn't clearly associate with any of the other objects (plane,
14  * crtc, HDMI encoder).
15  */
16
17 #include <drm/drm_atomic.h>
18 #include <drm/drm_atomic_helper.h>
19 #include <drm/drm_crtc.h>
20 #include <drm/drm_gem_framebuffer_helper.h>
21 #include <drm/drm_plane_helper.h>
22 #include <drm/drm_probe_helper.h>
23 #include <drm/drm_vblank.h>
24
25 #include "vc4_drv.h"
26 #include "vc4_regs.h"
27
28 struct vc4_ctm_state {
29         struct drm_private_state base;
30         struct drm_color_ctm *ctm;
31         int fifo;
32 };
33
34 static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv)
35 {
36         return container_of(priv, struct vc4_ctm_state, base);
37 }
38
39 struct vc4_load_tracker_state {
40         struct drm_private_state base;
41         u64 hvs_load;
42         u64 membus_load;
43 };
44
45 static struct vc4_load_tracker_state *
46 to_vc4_load_tracker_state(struct drm_private_state *priv)
47 {
48         return container_of(priv, struct vc4_load_tracker_state, base);
49 }
50
51 static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state,
52                                                struct drm_private_obj *manager)
53 {
54         struct drm_device *dev = state->dev;
55         struct vc4_dev *vc4 = dev->dev_private;
56         struct drm_private_state *priv_state;
57         int ret;
58
59         ret = drm_modeset_lock(&vc4->ctm_state_lock, state->acquire_ctx);
60         if (ret)
61                 return ERR_PTR(ret);
62
63         priv_state = drm_atomic_get_private_obj_state(state, manager);
64         if (IS_ERR(priv_state))
65                 return ERR_CAST(priv_state);
66
67         return to_vc4_ctm_state(priv_state);
68 }
69
70 static struct drm_private_state *
71 vc4_ctm_duplicate_state(struct drm_private_obj *obj)
72 {
73         struct vc4_ctm_state *state;
74
75         state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
76         if (!state)
77                 return NULL;
78
79         __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
80
81         return &state->base;
82 }
83
84 static void vc4_ctm_destroy_state(struct drm_private_obj *obj,
85                                   struct drm_private_state *state)
86 {
87         struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(state);
88
89         kfree(ctm_state);
90 }
91
92 static const struct drm_private_state_funcs vc4_ctm_state_funcs = {
93         .atomic_duplicate_state = vc4_ctm_duplicate_state,
94         .atomic_destroy_state = vc4_ctm_destroy_state,
95 };
96
97 /* Converts a DRM S31.32 value to the HW S0.9 format. */
98 static u16 vc4_ctm_s31_32_to_s0_9(u64 in)
99 {
100         u16 r;
101
102         /* Sign bit. */
103         r = in & BIT_ULL(63) ? BIT(9) : 0;
104
105         if ((in & GENMASK_ULL(62, 32)) > 0) {
106                 /* We have zero integer bits so we can only saturate here. */
107                 r |= GENMASK(8, 0);
108         } else {
109                 /* Otherwise take the 9 most important fractional bits. */
110                 r |= (in >> 23) & GENMASK(8, 0);
111         }
112
113         return r;
114 }
115
116 static void
117 vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
118 {
119         struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state);
120         struct drm_color_ctm *ctm = ctm_state->ctm;
121
122         if (ctm_state->fifo) {
123                 HVS_WRITE(SCALER_OLEDCOEF2,
124                           VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]),
125                                         SCALER_OLEDCOEF2_R_TO_R) |
126                           VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[3]),
127                                         SCALER_OLEDCOEF2_R_TO_G) |
128                           VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[6]),
129                                         SCALER_OLEDCOEF2_R_TO_B));
130                 HVS_WRITE(SCALER_OLEDCOEF1,
131                           VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[1]),
132                                         SCALER_OLEDCOEF1_G_TO_R) |
133                           VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[4]),
134                                         SCALER_OLEDCOEF1_G_TO_G) |
135                           VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[7]),
136                                         SCALER_OLEDCOEF1_G_TO_B));
137                 HVS_WRITE(SCALER_OLEDCOEF0,
138                           VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[2]),
139                                         SCALER_OLEDCOEF0_B_TO_R) |
140                           VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[5]),
141                                         SCALER_OLEDCOEF0_B_TO_G) |
142                           VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[8]),
143                                         SCALER_OLEDCOEF0_B_TO_B));
144         }
145
146         HVS_WRITE(SCALER_OLEDOFFS,
147                   VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO));
148 }
149
150 static void
151 vc4_atomic_complete_commit(struct drm_atomic_state *state)
152 {
153         struct drm_device *dev = state->dev;
154         struct vc4_dev *vc4 = to_vc4_dev(dev);
155         struct vc4_crtc *vc4_crtc;
156         int i;
157
158         for (i = 0; i < dev->mode_config.num_crtc; i++) {
159                 if (!state->crtcs[i].ptr || !state->crtcs[i].commit)
160                         continue;
161
162                 vc4_crtc = to_vc4_crtc(state->crtcs[i].ptr);
163                 vc4_hvs_mask_underrun(dev, vc4_crtc->channel);
164         }
165
166         drm_atomic_helper_wait_for_fences(dev, state, false);
167
168         drm_atomic_helper_wait_for_dependencies(state);
169
170         drm_atomic_helper_commit_modeset_disables(dev, state);
171
172         vc4_ctm_commit(vc4, state);
173
174         drm_atomic_helper_commit_planes(dev, state, 0);
175
176         drm_atomic_helper_commit_modeset_enables(dev, state);
177
178         drm_atomic_helper_fake_vblank(state);
179
180         drm_atomic_helper_commit_hw_done(state);
181
182         drm_atomic_helper_wait_for_flip_done(dev, state);
183
184         drm_atomic_helper_cleanup_planes(dev, state);
185
186         drm_atomic_helper_commit_cleanup_done(state);
187
188         drm_atomic_state_put(state);
189
190         up(&vc4->async_modeset);
191 }
192
193 static void commit_work(struct work_struct *work)
194 {
195         struct drm_atomic_state *state = container_of(work,
196                                                       struct drm_atomic_state,
197                                                       commit_work);
198         vc4_atomic_complete_commit(state);
199 }
200
201 /**
202  * vc4_atomic_commit - commit validated state object
203  * @dev: DRM device
204  * @state: the driver state object
205  * @nonblock: nonblocking commit
206  *
207  * This function commits a with drm_atomic_helper_check() pre-validated state
208  * object. This can still fail when e.g. the framebuffer reservation fails. For
209  * now this doesn't implement asynchronous commits.
210  *
211  * RETURNS
212  * Zero for success or -errno.
213  */
214 static int vc4_atomic_commit(struct drm_device *dev,
215                              struct drm_atomic_state *state,
216                              bool nonblock)
217 {
218         struct vc4_dev *vc4 = to_vc4_dev(dev);
219         int ret;
220
221         if (state->async_update) {
222                 ret = down_interruptible(&vc4->async_modeset);
223                 if (ret)
224                         return ret;
225
226                 ret = drm_atomic_helper_prepare_planes(dev, state);
227                 if (ret) {
228                         up(&vc4->async_modeset);
229                         return ret;
230                 }
231
232                 drm_atomic_helper_async_commit(dev, state);
233
234                 drm_atomic_helper_cleanup_planes(dev, state);
235
236                 up(&vc4->async_modeset);
237
238                 return 0;
239         }
240
241         /* We know for sure we don't want an async update here. Set
242          * state->legacy_cursor_update to false to prevent
243          * drm_atomic_helper_setup_commit() from auto-completing
244          * commit->flip_done.
245          */
246         state->legacy_cursor_update = false;
247         ret = drm_atomic_helper_setup_commit(state, nonblock);
248         if (ret)
249                 return ret;
250
251         INIT_WORK(&state->commit_work, commit_work);
252
253         ret = down_interruptible(&vc4->async_modeset);
254         if (ret)
255                 return ret;
256
257         ret = drm_atomic_helper_prepare_planes(dev, state);
258         if (ret) {
259                 up(&vc4->async_modeset);
260                 return ret;
261         }
262
263         if (!nonblock) {
264                 ret = drm_atomic_helper_wait_for_fences(dev, state, true);
265                 if (ret) {
266                         drm_atomic_helper_cleanup_planes(dev, state);
267                         up(&vc4->async_modeset);
268                         return ret;
269                 }
270         }
271
272         /*
273          * This is the point of no return - everything below never fails except
274          * when the hw goes bonghits. Which means we can commit the new state on
275          * the software side now.
276          */
277
278         BUG_ON(drm_atomic_helper_swap_state(state, false) < 0);
279
280         /*
281          * Everything below can be run asynchronously without the need to grab
282          * any modeset locks at all under one condition: It must be guaranteed
283          * that the asynchronous work has either been cancelled (if the driver
284          * supports it, which at least requires that the framebuffers get
285          * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
286          * before the new state gets committed on the software side with
287          * drm_atomic_helper_swap_state().
288          *
289          * This scheme allows new atomic state updates to be prepared and
290          * checked in parallel to the asynchronous completion of the previous
291          * update. Which is important since compositors need to figure out the
292          * composition of the next frame right after having submitted the
293          * current layout.
294          */
295
296         drm_atomic_state_get(state);
297         if (nonblock)
298                 queue_work(system_unbound_wq, &state->commit_work);
299         else
300                 vc4_atomic_complete_commit(state);
301
302         return 0;
303 }
304
305 static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
306                                              struct drm_file *file_priv,
307                                              const struct drm_mode_fb_cmd2 *mode_cmd)
308 {
309         struct drm_mode_fb_cmd2 mode_cmd_local;
310
311         /* If the user didn't specify a modifier, use the
312          * vc4_set_tiling_ioctl() state for the BO.
313          */
314         if (!(mode_cmd->flags & DRM_MODE_FB_MODIFIERS)) {
315                 struct drm_gem_object *gem_obj;
316                 struct vc4_bo *bo;
317
318                 gem_obj = drm_gem_object_lookup(file_priv,
319                                                 mode_cmd->handles[0]);
320                 if (!gem_obj) {
321                         DRM_DEBUG("Failed to look up GEM BO %d\n",
322                                   mode_cmd->handles[0]);
323                         return ERR_PTR(-ENOENT);
324                 }
325                 bo = to_vc4_bo(gem_obj);
326
327                 mode_cmd_local = *mode_cmd;
328
329                 if (bo->t_format) {
330                         mode_cmd_local.modifier[0] =
331                                 DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
332                 } else {
333                         mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE;
334                 }
335
336                 drm_gem_object_put_unlocked(gem_obj);
337
338                 mode_cmd = &mode_cmd_local;
339         }
340
341         return drm_gem_fb_create(dev, file_priv, mode_cmd);
342 }
343
344 /* Our CTM has some peculiar limitations: we can only enable it for one CRTC
345  * at a time and the HW only supports S0.9 scalars. To account for the latter,
346  * we don't allow userland to set a CTM that we have no hope of approximating.
347  */
348 static int
349 vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
350 {
351         struct vc4_dev *vc4 = to_vc4_dev(dev);
352         struct vc4_ctm_state *ctm_state = NULL;
353         struct drm_crtc *crtc;
354         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
355         struct drm_color_ctm *ctm;
356         int i;
357
358         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
359                 /* CTM is being disabled. */
360                 if (!new_crtc_state->ctm && old_crtc_state->ctm) {
361                         ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
362                         if (IS_ERR(ctm_state))
363                                 return PTR_ERR(ctm_state);
364                         ctm_state->fifo = 0;
365                 }
366         }
367
368         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
369                 if (new_crtc_state->ctm == old_crtc_state->ctm)
370                         continue;
371
372                 if (!ctm_state) {
373                         ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
374                         if (IS_ERR(ctm_state))
375                                 return PTR_ERR(ctm_state);
376                 }
377
378                 /* CTM is being enabled or the matrix changed. */
379                 if (new_crtc_state->ctm) {
380                         /* fifo is 1-based since 0 disables CTM. */
381                         int fifo = to_vc4_crtc(crtc)->channel + 1;
382
383                         /* Check userland isn't trying to turn on CTM for more
384                          * than one CRTC at a time.
385                          */
386                         if (ctm_state->fifo && ctm_state->fifo != fifo) {
387                                 DRM_DEBUG_DRIVER("Too many CTM configured\n");
388                                 return -EINVAL;
389                         }
390
391                         /* Check we can approximate the specified CTM.
392                          * We disallow scalars |c| > 1.0 since the HW has
393                          * no integer bits.
394                          */
395                         ctm = new_crtc_state->ctm->data;
396                         for (i = 0; i < ARRAY_SIZE(ctm->matrix); i++) {
397                                 u64 val = ctm->matrix[i];
398
399                                 val &= ~BIT_ULL(63);
400                                 if (val > BIT_ULL(32))
401                                         return -EINVAL;
402                         }
403
404                         ctm_state->fifo = fifo;
405                         ctm_state->ctm = ctm;
406                 }
407         }
408
409         return 0;
410 }
411
412 static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state)
413 {
414         struct drm_plane_state *old_plane_state, *new_plane_state;
415         struct vc4_dev *vc4 = to_vc4_dev(state->dev);
416         struct vc4_load_tracker_state *load_state;
417         struct drm_private_state *priv_state;
418         struct drm_plane *plane;
419         int i;
420
421         priv_state = drm_atomic_get_private_obj_state(state,
422                                                       &vc4->load_tracker);
423         if (IS_ERR(priv_state))
424                 return PTR_ERR(priv_state);
425
426         load_state = to_vc4_load_tracker_state(priv_state);
427         for_each_oldnew_plane_in_state(state, plane, old_plane_state,
428                                        new_plane_state, i) {
429                 struct vc4_plane_state *vc4_plane_state;
430
431                 if (old_plane_state->fb && old_plane_state->crtc) {
432                         vc4_plane_state = to_vc4_plane_state(old_plane_state);
433                         load_state->membus_load -= vc4_plane_state->membus_load;
434                         load_state->hvs_load -= vc4_plane_state->hvs_load;
435                 }
436
437                 if (new_plane_state->fb && new_plane_state->crtc) {
438                         vc4_plane_state = to_vc4_plane_state(new_plane_state);
439                         load_state->membus_load += vc4_plane_state->membus_load;
440                         load_state->hvs_load += vc4_plane_state->hvs_load;
441                 }
442         }
443
444         /* Don't check the load when the tracker is disabled. */
445         if (!vc4->load_tracker_enabled)
446                 return 0;
447
448         /* The absolute limit is 2Gbyte/sec, but let's take a margin to let
449          * the system work when other blocks are accessing the memory.
450          */
451         if (load_state->membus_load > SZ_1G + SZ_512M)
452                 return -ENOSPC;
453
454         /* HVS clock is supposed to run @ 250Mhz, let's take a margin and
455          * consider the maximum number of cycles is 240M.
456          */
457         if (load_state->hvs_load > 240000000ULL)
458                 return -ENOSPC;
459
460         return 0;
461 }
462
463 static struct drm_private_state *
464 vc4_load_tracker_duplicate_state(struct drm_private_obj *obj)
465 {
466         struct vc4_load_tracker_state *state;
467
468         state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
469         if (!state)
470                 return NULL;
471
472         __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
473
474         return &state->base;
475 }
476
477 static void vc4_load_tracker_destroy_state(struct drm_private_obj *obj,
478                                            struct drm_private_state *state)
479 {
480         struct vc4_load_tracker_state *load_state;
481
482         load_state = to_vc4_load_tracker_state(state);
483         kfree(load_state);
484 }
485
486 static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = {
487         .atomic_duplicate_state = vc4_load_tracker_duplicate_state,
488         .atomic_destroy_state = vc4_load_tracker_destroy_state,
489 };
490
491 static int
492 vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
493 {
494         int ret;
495
496         ret = vc4_ctm_atomic_check(dev, state);
497         if (ret < 0)
498                 return ret;
499
500         ret = drm_atomic_helper_check(dev, state);
501         if (ret)
502                 return ret;
503
504         return vc4_load_tracker_atomic_check(state);
505 }
506
507 static const struct drm_mode_config_funcs vc4_mode_funcs = {
508         .atomic_check = vc4_atomic_check,
509         .atomic_commit = vc4_atomic_commit,
510         .fb_create = vc4_fb_create,
511 };
512
513 int vc4_kms_load(struct drm_device *dev)
514 {
515         struct vc4_dev *vc4 = to_vc4_dev(dev);
516         struct vc4_ctm_state *ctm_state;
517         struct vc4_load_tracker_state *load_state;
518         int ret;
519
520         /* Start with the load tracker enabled. Can be disabled through the
521          * debugfs load_tracker file.
522          */
523         vc4->load_tracker_enabled = true;
524
525         sema_init(&vc4->async_modeset, 1);
526
527         /* Set support for vblank irq fast disable, before drm_vblank_init() */
528         dev->vblank_disable_immediate = true;
529
530         dev->irq_enabled = true;
531         ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
532         if (ret < 0) {
533                 dev_err(dev->dev, "failed to initialize vblank\n");
534                 return ret;
535         }
536
537         dev->mode_config.max_width = 2048;
538         dev->mode_config.max_height = 2048;
539         dev->mode_config.funcs = &vc4_mode_funcs;
540         dev->mode_config.preferred_depth = 24;
541         dev->mode_config.async_page_flip = true;
542         dev->mode_config.allow_fb_modifiers = true;
543
544         drm_modeset_lock_init(&vc4->ctm_state_lock);
545
546         ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL);
547         if (!ctm_state)
548                 return -ENOMEM;
549
550         drm_atomic_private_obj_init(dev, &vc4->ctm_manager, &ctm_state->base,
551                                     &vc4_ctm_state_funcs);
552
553         load_state = kzalloc(sizeof(*load_state), GFP_KERNEL);
554         if (!load_state) {
555                 drm_atomic_private_obj_fini(&vc4->ctm_manager);
556                 return -ENOMEM;
557         }
558
559         drm_atomic_private_obj_init(dev, &vc4->load_tracker, &load_state->base,
560                                     &vc4_load_tracker_state_funcs);
561
562         drm_mode_config_reset(dev);
563
564         drm_kms_helper_poll_init(dev);
565
566         return 0;
567 }