2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <drm/drm_util.h>
26 struct drm_device *dev;
28 uint8_t reserved[MAX_CLIENTS]; /* fixed MMBs allocation per client */
36 u32 pipe_reqprio_fifo_wm0[SSPP_MAX];
37 u32 pipe_reqprio_fifo_wm1[SSPP_MAX];
38 u32 pipe_reqprio_fifo_wm2[SSPP_MAX];
42 struct mdp5_kms *get_kms(struct mdp5_smp *smp)
44 struct msm_drm_private *priv = smp->dev->dev_private;
46 return to_mdp5_kms(to_mdp_kms(priv->kms));
49 static inline u32 pipe2client(enum mdp5_pipe pipe, int plane)
53 if (WARN_ON(plane >= pipe2nclients(pipe)))
57 * Note on SMP clients:
58 * For ViG pipes, fetch Y/Cr/Cb-components clients are always
59 * consecutive, and in that order.
62 * if mdp5_cfg->smp.clients[SSPP_VIG0] = N,
63 * Y plane's client ID is N
64 * Cr plane's client ID is N + 1
65 * Cb plane's client ID is N + 2
68 return mdp5_cfg->smp.clients[pipe] + plane;
71 /* allocate blocks for the specified request: */
72 static int smp_request_block(struct mdp5_smp *smp,
73 struct mdp5_smp_state *state,
76 void *cs = state->client_state[cid];
77 int i, avail, cnt = smp->blk_cnt;
80 /* we shouldn't be requesting blocks for an in-use client: */
81 WARN_ON(bitmap_weight(cs, cnt) > 0);
83 reserved = smp->reserved[cid];
86 nblks = max(0, nblks - reserved);
87 DBG("%d MMBs allocated (%d reserved)", nblks, reserved);
90 avail = cnt - bitmap_weight(state->state, cnt);
92 DRM_DEV_ERROR(smp->dev->dev, "out of blks (req=%d > avail=%d)\n",
97 for (i = 0; i < nblks; i++) {
98 int blk = find_first_zero_bit(state->state, cnt);
100 set_bit(blk, state->state);
106 static void set_fifo_thresholds(struct mdp5_smp *smp,
107 enum mdp5_pipe pipe, int nblks)
109 u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE);
112 /* 1/4 of SMP pool that is being fetched */
113 val = (nblks * smp_entries_per_blk) / 4;
115 smp->pipe_reqprio_fifo_wm0[pipe] = val * 1;
116 smp->pipe_reqprio_fifo_wm1[pipe] = val * 2;
117 smp->pipe_reqprio_fifo_wm2[pipe] = val * 3;
121 * NOTE: looks like if horizontal decimation is used (if we supported that)
122 * then the width used to calculate SMP block requirements is the post-
123 * decimated width. Ie. SMP buffering sits downstream of decimation (which
124 * presumably happens during the dma from scanout buffer).
126 uint32_t mdp5_smp_calculate(struct mdp5_smp *smp,
127 const struct mdp_format *format,
128 u32 width, bool hdecim)
130 struct mdp5_kms *mdp5_kms = get_kms(smp);
131 int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
132 int i, hsub, nplanes, nlines;
133 u32 fmt = format->base.pixel_format;
136 nplanes = drm_format_num_planes(fmt);
137 hsub = drm_format_horz_chroma_subsampling(fmt);
139 /* different if BWC (compressed framebuffer?) enabled: */
142 /* Newer MDPs have split/packing logic, which fetches sub-sampled
143 * U and V components (splits them from Y if necessary) and packs
144 * them together, writes to SMP using a single client.
146 if ((rev > 0) && (format->chroma_sample > CHROMA_FULL)) {
147 fmt = DRM_FORMAT_NV24;
150 /* if decimation is enabled, HW decimates less on the
151 * sub sampled chroma components
153 if (hdecim && (hsub > 1))
157 for (i = 0; i < nplanes; i++) {
158 int n, fetch_stride, cpp;
160 cpp = drm_format_plane_cpp(fmt, i);
161 fetch_stride = width * cpp / (i ? hsub : 1);
163 n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size);
165 /* for hw rev v1.00 */
167 n = roundup_pow_of_two(n);
169 blkcfg |= (n << (8 * i));
175 int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state,
176 enum mdp5_pipe pipe, uint32_t blkcfg)
178 struct mdp5_kms *mdp5_kms = get_kms(smp);
179 struct drm_device *dev = mdp5_kms->dev;
182 for (i = 0; i < pipe2nclients(pipe); i++) {
183 u32 cid = pipe2client(pipe, i);
184 int n = blkcfg & 0xff;
189 DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
190 ret = smp_request_block(smp, state, cid, n);
192 DRM_DEV_ERROR(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
200 state->assigned |= (1 << pipe);
205 /* Release SMP blocks for all clients of the pipe */
206 void mdp5_smp_release(struct mdp5_smp *smp, struct mdp5_smp_state *state,
210 int cnt = smp->blk_cnt;
212 for (i = 0; i < pipe2nclients(pipe); i++) {
213 u32 cid = pipe2client(pipe, i);
214 void *cs = state->client_state[cid];
216 /* update global state: */
217 bitmap_andnot(state->state, state->state, cs, cnt);
219 /* clear client's state */
220 bitmap_zero(cs, cnt);
223 state->released |= (1 << pipe);
226 /* NOTE: SMP_ALLOC_* regs are *not* double buffered, so release has to
227 * happen after scanout completes.
229 static unsigned update_smp_state(struct mdp5_smp *smp,
230 u32 cid, mdp5_smp_state_t *assigned)
232 int cnt = smp->blk_cnt;
236 for_each_set_bit(blk, *assigned, cnt) {
240 val = smp->alloc_w[idx];
244 val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
245 val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid);
248 val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
249 val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid);
252 val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
253 val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid);
257 smp->alloc_w[idx] = val;
258 smp->alloc_r[idx] = val;
266 static void write_smp_alloc_regs(struct mdp5_smp *smp)
268 struct mdp5_kms *mdp5_kms = get_kms(smp);
271 num_regs = smp->blk_cnt / 3 + 1;
273 for (i = 0; i < num_regs; i++) {
274 mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(i),
276 mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(i),
281 static void write_smp_fifo_regs(struct mdp5_smp *smp)
283 struct mdp5_kms *mdp5_kms = get_kms(smp);
286 for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
287 struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
288 enum mdp5_pipe pipe = hwpipe->pipe;
290 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe),
291 smp->pipe_reqprio_fifo_wm0[pipe]);
292 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe),
293 smp->pipe_reqprio_fifo_wm1[pipe]);
294 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe),
295 smp->pipe_reqprio_fifo_wm2[pipe]);
299 void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state)
303 for_each_set_bit(pipe, &state->assigned, sizeof(state->assigned) * 8) {
304 unsigned i, nblks = 0;
306 for (i = 0; i < pipe2nclients(pipe); i++) {
307 u32 cid = pipe2client(pipe, i);
308 void *cs = state->client_state[cid];
310 nblks += update_smp_state(smp, cid, cs);
312 DBG("assign %s:%u, %u blks",
313 pipe2name(pipe), i, nblks);
316 set_fifo_thresholds(smp, pipe, nblks);
319 write_smp_alloc_regs(smp);
320 write_smp_fifo_regs(smp);
325 void mdp5_smp_complete_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state)
329 for_each_set_bit(pipe, &state->released, sizeof(state->released) * 8) {
330 DBG("release %s", pipe2name(pipe));
331 set_fifo_thresholds(smp, pipe, 0);
334 write_smp_fifo_regs(smp);
339 void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p)
341 struct mdp5_kms *mdp5_kms = get_kms(smp);
342 struct mdp5_hw_pipe_state *hwpstate;
343 struct mdp5_smp_state *state;
344 struct mdp5_global_state *global_state;
347 drm_printf(p, "name\tinuse\tplane\n");
348 drm_printf(p, "----\t-----\t-----\n");
351 drm_modeset_lock(&mdp5_kms->glob_state_lock, NULL);
353 global_state = mdp5_get_existing_global_state(mdp5_kms);
355 /* grab these *after* we hold the state_lock */
356 hwpstate = &global_state->hwpipe;
357 state = &global_state->smp;
359 for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
360 struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
361 struct drm_plane *plane = hwpstate->hwpipe_to_plane[hwpipe->idx];
362 enum mdp5_pipe pipe = hwpipe->pipe;
363 for (j = 0; j < pipe2nclients(pipe); j++) {
364 u32 cid = pipe2client(pipe, j);
365 void *cs = state->client_state[cid];
366 int inuse = bitmap_weight(cs, smp->blk_cnt);
368 drm_printf(p, "%s:%d\t%d\t%s\n",
369 pipe2name(pipe), j, inuse,
370 plane ? plane->name : NULL);
376 drm_printf(p, "TOTAL:\t%d\t(of %d)\n", total, smp->blk_cnt);
377 drm_printf(p, "AVAIL:\t%d\n", smp->blk_cnt -
378 bitmap_weight(state->state, smp->blk_cnt));
381 drm_modeset_unlock(&mdp5_kms->glob_state_lock);
384 void mdp5_smp_destroy(struct mdp5_smp *smp)
389 struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_block *cfg)
391 struct mdp5_smp_state *state;
392 struct mdp5_global_state *global_state;
393 struct mdp5_smp *smp = NULL;
396 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
397 if (unlikely(!smp)) {
402 smp->dev = mdp5_kms->dev;
403 smp->blk_cnt = cfg->mmb_count;
404 smp->blk_size = cfg->mmb_size;
406 global_state = mdp5_get_existing_global_state(mdp5_kms);
407 state = &global_state->smp;
409 /* statically tied MMBs cannot be re-allocated: */
410 bitmap_copy(state->state, cfg->reserved_state, smp->blk_cnt);
411 memcpy(smp->reserved, cfg->reserved, sizeof(smp->reserved));
416 mdp5_smp_destroy(smp);