]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/vc4/vc4_hvs.c
drm/vc4: drop use of drmP.h
[linux.git] / drivers / gpu / drm / vc4 / vc4_hvs.c
1 /*
2  * Copyright (C) 2015 Broadcom
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8
9 /**
10  * DOC: VC4 HVS module.
11  *
12  * The Hardware Video Scaler (HVS) is the piece of hardware that does
13  * translation, scaling, colorspace conversion, and compositing of
14  * pixels stored in framebuffers into a FIFO of pixels going out to
15  * the Pixel Valve (CRTC).  It operates at the system clock rate (the
16  * system audio clock gate, specifically), which is much higher than
17  * the pixel clock rate.
18  *
19  * There is a single global HVS, with multiple output FIFOs that can
20  * be consumed by the PVs.  This file just manages the resources for
21  * the HVS, while the vc4_crtc.c code actually drives HVS setup for
22  * each CRTC.
23  */
24
25 #include <linux/component.h>
26 #include <linux/platform_device.h>
27
28 #include <drm/drm_atomic_helper.h>
29
30 #include "vc4_drv.h"
31 #include "vc4_regs.h"
32
33 static const struct debugfs_reg32 hvs_regs[] = {
34         VC4_REG32(SCALER_DISPCTRL),
35         VC4_REG32(SCALER_DISPSTAT),
36         VC4_REG32(SCALER_DISPID),
37         VC4_REG32(SCALER_DISPECTRL),
38         VC4_REG32(SCALER_DISPPROF),
39         VC4_REG32(SCALER_DISPDITHER),
40         VC4_REG32(SCALER_DISPEOLN),
41         VC4_REG32(SCALER_DISPLIST0),
42         VC4_REG32(SCALER_DISPLIST1),
43         VC4_REG32(SCALER_DISPLIST2),
44         VC4_REG32(SCALER_DISPLSTAT),
45         VC4_REG32(SCALER_DISPLACT0),
46         VC4_REG32(SCALER_DISPLACT1),
47         VC4_REG32(SCALER_DISPLACT2),
48         VC4_REG32(SCALER_DISPCTRL0),
49         VC4_REG32(SCALER_DISPBKGND0),
50         VC4_REG32(SCALER_DISPSTAT0),
51         VC4_REG32(SCALER_DISPBASE0),
52         VC4_REG32(SCALER_DISPCTRL1),
53         VC4_REG32(SCALER_DISPBKGND1),
54         VC4_REG32(SCALER_DISPSTAT1),
55         VC4_REG32(SCALER_DISPBASE1),
56         VC4_REG32(SCALER_DISPCTRL2),
57         VC4_REG32(SCALER_DISPBKGND2),
58         VC4_REG32(SCALER_DISPSTAT2),
59         VC4_REG32(SCALER_DISPBASE2),
60         VC4_REG32(SCALER_DISPALPHA2),
61         VC4_REG32(SCALER_OLEDOFFS),
62         VC4_REG32(SCALER_OLEDCOEF0),
63         VC4_REG32(SCALER_OLEDCOEF1),
64         VC4_REG32(SCALER_OLEDCOEF2),
65 };
66
67 void vc4_hvs_dump_state(struct drm_device *dev)
68 {
69         struct vc4_dev *vc4 = to_vc4_dev(dev);
70         struct drm_printer p = drm_info_printer(&vc4->hvs->pdev->dev);
71         int i;
72
73         drm_print_regset32(&p, &vc4->hvs->regset);
74
75         DRM_INFO("HVS ctx:\n");
76         for (i = 0; i < 64; i += 4) {
77                 DRM_INFO("0x%08x (%s): 0x%08x 0x%08x 0x%08x 0x%08x\n",
78                          i * 4, i < HVS_BOOTLOADER_DLIST_END ? "B" : "D",
79                          readl((u32 __iomem *)vc4->hvs->dlist + i + 0),
80                          readl((u32 __iomem *)vc4->hvs->dlist + i + 1),
81                          readl((u32 __iomem *)vc4->hvs->dlist + i + 2),
82                          readl((u32 __iomem *)vc4->hvs->dlist + i + 3));
83         }
84 }
85
86 static int vc4_hvs_debugfs_underrun(struct seq_file *m, void *data)
87 {
88         struct drm_info_node *node = m->private;
89         struct drm_device *dev = node->minor->dev;
90         struct vc4_dev *vc4 = to_vc4_dev(dev);
91         struct drm_printer p = drm_seq_file_printer(m);
92
93         drm_printf(&p, "%d\n", atomic_read(&vc4->underrun));
94
95         return 0;
96 }
97
98 /* The filter kernel is composed of dwords each containing 3 9-bit
99  * signed integers packed next to each other.
100  */
101 #define VC4_INT_TO_COEFF(coeff) (coeff & 0x1ff)
102 #define VC4_PPF_FILTER_WORD(c0, c1, c2)                         \
103         ((((c0) & 0x1ff) << 0) |                                \
104          (((c1) & 0x1ff) << 9) |                                \
105          (((c2) & 0x1ff) << 18))
106
107 /* The whole filter kernel is arranged as the coefficients 0-16 going
108  * up, then a pad, then 17-31 going down and reversed within the
109  * dwords.  This means that a linear phase kernel (where it's
110  * symmetrical at the boundary between 15 and 16) has the last 5
111  * dwords matching the first 5, but reversed.
112  */
113 #define VC4_LINEAR_PHASE_KERNEL(c0, c1, c2, c3, c4, c5, c6, c7, c8,     \
114                                 c9, c10, c11, c12, c13, c14, c15)       \
115         {VC4_PPF_FILTER_WORD(c0, c1, c2),                               \
116          VC4_PPF_FILTER_WORD(c3, c4, c5),                               \
117          VC4_PPF_FILTER_WORD(c6, c7, c8),                               \
118          VC4_PPF_FILTER_WORD(c9, c10, c11),                             \
119          VC4_PPF_FILTER_WORD(c12, c13, c14),                            \
120          VC4_PPF_FILTER_WORD(c15, c15, 0)}
121
122 #define VC4_LINEAR_PHASE_KERNEL_DWORDS 6
123 #define VC4_KERNEL_DWORDS (VC4_LINEAR_PHASE_KERNEL_DWORDS * 2 - 1)
124
125 /* Recommended B=1/3, C=1/3 filter choice from Mitchell/Netravali.
126  * http://www.cs.utexas.edu/~fussell/courses/cs384g/lectures/mitchell/Mitchell.pdf
127  */
128 static const u32 mitchell_netravali_1_3_1_3_kernel[] =
129         VC4_LINEAR_PHASE_KERNEL(0, -2, -6, -8, -10, -8, -3, 2, 18,
130                                 50, 82, 119, 155, 187, 213, 227);
131
132 static int vc4_hvs_upload_linear_kernel(struct vc4_hvs *hvs,
133                                         struct drm_mm_node *space,
134                                         const u32 *kernel)
135 {
136         int ret, i;
137         u32 __iomem *dst_kernel;
138
139         ret = drm_mm_insert_node(&hvs->dlist_mm, space, VC4_KERNEL_DWORDS);
140         if (ret) {
141                 DRM_ERROR("Failed to allocate space for filter kernel: %d\n",
142                           ret);
143                 return ret;
144         }
145
146         dst_kernel = hvs->dlist + space->start;
147
148         for (i = 0; i < VC4_KERNEL_DWORDS; i++) {
149                 if (i < VC4_LINEAR_PHASE_KERNEL_DWORDS)
150                         writel(kernel[i], &dst_kernel[i]);
151                 else {
152                         writel(kernel[VC4_KERNEL_DWORDS - i - 1],
153                                &dst_kernel[i]);
154                 }
155         }
156
157         return 0;
158 }
159
160 void vc4_hvs_mask_underrun(struct drm_device *dev, int channel)
161 {
162         struct vc4_dev *vc4 = to_vc4_dev(dev);
163         u32 dispctrl = HVS_READ(SCALER_DISPCTRL);
164
165         dispctrl &= ~SCALER_DISPCTRL_DSPEISLUR(channel);
166
167         HVS_WRITE(SCALER_DISPCTRL, dispctrl);
168 }
169
170 void vc4_hvs_unmask_underrun(struct drm_device *dev, int channel)
171 {
172         struct vc4_dev *vc4 = to_vc4_dev(dev);
173         u32 dispctrl = HVS_READ(SCALER_DISPCTRL);
174
175         dispctrl |= SCALER_DISPCTRL_DSPEISLUR(channel);
176
177         HVS_WRITE(SCALER_DISPSTAT,
178                   SCALER_DISPSTAT_EUFLOW(channel));
179         HVS_WRITE(SCALER_DISPCTRL, dispctrl);
180 }
181
182 static void vc4_hvs_report_underrun(struct drm_device *dev)
183 {
184         struct vc4_dev *vc4 = to_vc4_dev(dev);
185
186         atomic_inc(&vc4->underrun);
187         DRM_DEV_ERROR(dev->dev, "HVS underrun\n");
188 }
189
190 static irqreturn_t vc4_hvs_irq_handler(int irq, void *data)
191 {
192         struct drm_device *dev = data;
193         struct vc4_dev *vc4 = to_vc4_dev(dev);
194         irqreturn_t irqret = IRQ_NONE;
195         int channel;
196         u32 control;
197         u32 status;
198
199         status = HVS_READ(SCALER_DISPSTAT);
200         control = HVS_READ(SCALER_DISPCTRL);
201
202         for (channel = 0; channel < SCALER_CHANNELS_COUNT; channel++) {
203                 /* Interrupt masking is not always honored, so check it here. */
204                 if (status & SCALER_DISPSTAT_EUFLOW(channel) &&
205                     control & SCALER_DISPCTRL_DSPEISLUR(channel)) {
206                         vc4_hvs_mask_underrun(dev, channel);
207                         vc4_hvs_report_underrun(dev);
208
209                         irqret = IRQ_HANDLED;
210                 }
211         }
212
213         /* Clear every per-channel interrupt flag. */
214         HVS_WRITE(SCALER_DISPSTAT, SCALER_DISPSTAT_IRQMASK(0) |
215                                    SCALER_DISPSTAT_IRQMASK(1) |
216                                    SCALER_DISPSTAT_IRQMASK(2));
217
218         return irqret;
219 }
220
221 static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
222 {
223         struct platform_device *pdev = to_platform_device(dev);
224         struct drm_device *drm = dev_get_drvdata(master);
225         struct vc4_dev *vc4 = drm->dev_private;
226         struct vc4_hvs *hvs = NULL;
227         int ret;
228         u32 dispctrl;
229
230         hvs = devm_kzalloc(&pdev->dev, sizeof(*hvs), GFP_KERNEL);
231         if (!hvs)
232                 return -ENOMEM;
233
234         hvs->pdev = pdev;
235
236         hvs->regs = vc4_ioremap_regs(pdev, 0);
237         if (IS_ERR(hvs->regs))
238                 return PTR_ERR(hvs->regs);
239
240         hvs->regset.base = hvs->regs;
241         hvs->regset.regs = hvs_regs;
242         hvs->regset.nregs = ARRAY_SIZE(hvs_regs);
243
244         hvs->dlist = hvs->regs + SCALER_DLIST_START;
245
246         spin_lock_init(&hvs->mm_lock);
247
248         /* Set up the HVS display list memory manager.  We never
249          * overwrite the setup from the bootloader (just 128b out of
250          * our 16K), since we don't want to scramble the screen when
251          * transitioning from the firmware's boot setup to runtime.
252          */
253         drm_mm_init(&hvs->dlist_mm,
254                     HVS_BOOTLOADER_DLIST_END,
255                     (SCALER_DLIST_SIZE >> 2) - HVS_BOOTLOADER_DLIST_END);
256
257         /* Set up the HVS LBM memory manager.  We could have some more
258          * complicated data structure that allowed reuse of LBM areas
259          * between planes when they don't overlap on the screen, but
260          * for now we just allocate globally.
261          */
262         drm_mm_init(&hvs->lbm_mm, 0, 96 * 1024);
263
264         /* Upload filter kernels.  We only have the one for now, so we
265          * keep it around for the lifetime of the driver.
266          */
267         ret = vc4_hvs_upload_linear_kernel(hvs,
268                                            &hvs->mitchell_netravali_filter,
269                                            mitchell_netravali_1_3_1_3_kernel);
270         if (ret)
271                 return ret;
272
273         vc4->hvs = hvs;
274
275         dispctrl = HVS_READ(SCALER_DISPCTRL);
276
277         dispctrl |= SCALER_DISPCTRL_ENABLE;
278         dispctrl |= SCALER_DISPCTRL_DISPEIRQ(0) |
279                     SCALER_DISPCTRL_DISPEIRQ(1) |
280                     SCALER_DISPCTRL_DISPEIRQ(2);
281
282         /* Set DSP3 (PV1) to use HVS channel 2, which would otherwise
283          * be unused.
284          */
285         dispctrl &= ~SCALER_DISPCTRL_DSP3_MUX_MASK;
286         dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ |
287                       SCALER_DISPCTRL_SLVWREIRQ |
288                       SCALER_DISPCTRL_SLVRDEIRQ |
289                       SCALER_DISPCTRL_DSPEIEOF(0) |
290                       SCALER_DISPCTRL_DSPEIEOF(1) |
291                       SCALER_DISPCTRL_DSPEIEOF(2) |
292                       SCALER_DISPCTRL_DSPEIEOLN(0) |
293                       SCALER_DISPCTRL_DSPEIEOLN(1) |
294                       SCALER_DISPCTRL_DSPEIEOLN(2) |
295                       SCALER_DISPCTRL_DSPEISLUR(0) |
296                       SCALER_DISPCTRL_DSPEISLUR(1) |
297                       SCALER_DISPCTRL_DSPEISLUR(2) |
298                       SCALER_DISPCTRL_SCLEIRQ);
299         dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX);
300
301         HVS_WRITE(SCALER_DISPCTRL, dispctrl);
302
303         ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
304                                vc4_hvs_irq_handler, 0, "vc4 hvs", drm);
305         if (ret)
306                 return ret;
307
308         vc4_debugfs_add_regset32(drm, "hvs_regs", &hvs->regset);
309         vc4_debugfs_add_file(drm, "hvs_underrun", vc4_hvs_debugfs_underrun,
310                              NULL);
311
312         return 0;
313 }
314
315 static void vc4_hvs_unbind(struct device *dev, struct device *master,
316                            void *data)
317 {
318         struct drm_device *drm = dev_get_drvdata(master);
319         struct vc4_dev *vc4 = drm->dev_private;
320
321         if (vc4->hvs->mitchell_netravali_filter.allocated)
322                 drm_mm_remove_node(&vc4->hvs->mitchell_netravali_filter);
323
324         drm_mm_takedown(&vc4->hvs->dlist_mm);
325         drm_mm_takedown(&vc4->hvs->lbm_mm);
326
327         vc4->hvs = NULL;
328 }
329
330 static const struct component_ops vc4_hvs_ops = {
331         .bind   = vc4_hvs_bind,
332         .unbind = vc4_hvs_unbind,
333 };
334
335 static int vc4_hvs_dev_probe(struct platform_device *pdev)
336 {
337         return component_add(&pdev->dev, &vc4_hvs_ops);
338 }
339
340 static int vc4_hvs_dev_remove(struct platform_device *pdev)
341 {
342         component_del(&pdev->dev, &vc4_hvs_ops);
343         return 0;
344 }
345
346 static const struct of_device_id vc4_hvs_dt_match[] = {
347         { .compatible = "brcm,bcm2835-hvs" },
348         {}
349 };
350
351 struct platform_driver vc4_hvs_driver = {
352         .probe = vc4_hvs_dev_probe,
353         .remove = vc4_hvs_dev_remove,
354         .driver = {
355                 .name = "vc4_hvs",
356                 .of_match_table = vc4_hvs_dt_match,
357         },
358 };