1 // SPDX-License-Identifier: LGPL-2.1
3 * A V4L2 frontend for the FWHT codec
5 * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
8 #include <linux/errno.h>
9 #include <linux/string.h>
10 #include <linux/videodev2.h>
11 #include "codec-v4l2-fwht.h"
13 static const struct v4l2_fwht_pixfmt_info v4l2_fwht_pixfmts[] = {
14 { V4L2_PIX_FMT_YUV420, 1, 3, 2, 1, 1, 2, 2, 3, 3, FWHT_FL_PIXENC_YUV},
15 { V4L2_PIX_FMT_YVU420, 1, 3, 2, 1, 1, 2, 2, 3, 3, FWHT_FL_PIXENC_YUV},
16 { V4L2_PIX_FMT_YUV422P, 1, 2, 1, 1, 1, 2, 1, 3, 3, FWHT_FL_PIXENC_YUV},
17 { V4L2_PIX_FMT_NV12, 1, 3, 2, 1, 2, 2, 2, 3, 2, FWHT_FL_PIXENC_YUV},
18 { V4L2_PIX_FMT_NV21, 1, 3, 2, 1, 2, 2, 2, 3, 2, FWHT_FL_PIXENC_YUV},
19 { V4L2_PIX_FMT_NV16, 1, 2, 1, 1, 2, 2, 1, 3, 2, FWHT_FL_PIXENC_YUV},
20 { V4L2_PIX_FMT_NV61, 1, 2, 1, 1, 2, 2, 1, 3, 2, FWHT_FL_PIXENC_YUV},
21 { V4L2_PIX_FMT_NV24, 1, 3, 1, 1, 2, 1, 1, 3, 2, FWHT_FL_PIXENC_YUV},
22 { V4L2_PIX_FMT_NV42, 1, 3, 1, 1, 2, 1, 1, 3, 2, FWHT_FL_PIXENC_YUV},
23 { V4L2_PIX_FMT_YUYV, 2, 2, 1, 2, 4, 2, 1, 3, 1, FWHT_FL_PIXENC_YUV},
24 { V4L2_PIX_FMT_YVYU, 2, 2, 1, 2, 4, 2, 1, 3, 1, FWHT_FL_PIXENC_YUV},
25 { V4L2_PIX_FMT_UYVY, 2, 2, 1, 2, 4, 2, 1, 3, 1, FWHT_FL_PIXENC_YUV},
26 { V4L2_PIX_FMT_VYUY, 2, 2, 1, 2, 4, 2, 1, 3, 1, FWHT_FL_PIXENC_YUV},
27 { V4L2_PIX_FMT_BGR24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
28 { V4L2_PIX_FMT_RGB24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
29 { V4L2_PIX_FMT_HSV24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_HSV},
30 { V4L2_PIX_FMT_BGR32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
31 { V4L2_PIX_FMT_XBGR32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
32 { V4L2_PIX_FMT_RGB32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
33 { V4L2_PIX_FMT_XRGB32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
34 { V4L2_PIX_FMT_HSV32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_HSV},
35 { V4L2_PIX_FMT_ARGB32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
36 { V4L2_PIX_FMT_ABGR32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
37 { V4L2_PIX_FMT_GREY, 1, 1, 1, 1, 0, 1, 1, 1, 1, FWHT_FL_PIXENC_RGB},
40 const struct v4l2_fwht_pixfmt_info *v4l2_fwht_default_fmt(u32 width_div,
44 unsigned int start_idx)
48 for (i = 0; i < ARRAY_SIZE(v4l2_fwht_pixfmts); i++) {
49 if (v4l2_fwht_pixfmts[i].width_div == width_div &&
50 v4l2_fwht_pixfmts[i].height_div == height_div &&
51 (!pixenc || v4l2_fwht_pixfmts[i].pixenc == pixenc) &&
52 v4l2_fwht_pixfmts[i].components_num == components_num) {
54 return v4l2_fwht_pixfmts + i;
61 const struct v4l2_fwht_pixfmt_info *v4l2_fwht_find_pixfmt(u32 pixelformat)
65 for (i = 0; i < ARRAY_SIZE(v4l2_fwht_pixfmts); i++)
66 if (v4l2_fwht_pixfmts[i].id == pixelformat)
67 return v4l2_fwht_pixfmts + i;
71 const struct v4l2_fwht_pixfmt_info *v4l2_fwht_get_pixfmt(u32 idx)
73 if (idx >= ARRAY_SIZE(v4l2_fwht_pixfmts))
75 return v4l2_fwht_pixfmts + idx;
78 int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out)
80 unsigned int size = state->stride * state->coded_height;
81 unsigned int chroma_stride = state->stride;
82 const struct v4l2_fwht_pixfmt_info *info = state->info;
83 struct fwht_cframe_hdr *p_hdr;
84 struct fwht_cframe cf;
85 struct fwht_raw_frame rf;
93 rf.width_div = info->width_div;
94 rf.height_div = info->height_div;
95 rf.luma_alpha_step = info->luma_alpha_step;
96 rf.chroma_step = info->chroma_step;
98 rf.components_num = info->components_num;
101 case V4L2_PIX_FMT_GREY:
105 case V4L2_PIX_FMT_YUV420:
106 rf.cb = rf.luma + size;
107 rf.cr = rf.cb + size / 4;
110 case V4L2_PIX_FMT_YVU420:
111 rf.cr = rf.luma + size;
112 rf.cb = rf.cr + size / 4;
115 case V4L2_PIX_FMT_YUV422P:
116 rf.cb = rf.luma + size;
117 rf.cr = rf.cb + size / 2;
120 case V4L2_PIX_FMT_NV12:
121 case V4L2_PIX_FMT_NV16:
122 case V4L2_PIX_FMT_NV24:
123 rf.cb = rf.luma + size;
126 case V4L2_PIX_FMT_NV21:
127 case V4L2_PIX_FMT_NV61:
128 case V4L2_PIX_FMT_NV42:
129 rf.cr = rf.luma + size;
132 case V4L2_PIX_FMT_YUYV:
136 case V4L2_PIX_FMT_YVYU:
140 case V4L2_PIX_FMT_UYVY:
145 case V4L2_PIX_FMT_VYUY:
150 case V4L2_PIX_FMT_RGB24:
151 case V4L2_PIX_FMT_HSV24:
156 case V4L2_PIX_FMT_BGR24:
161 case V4L2_PIX_FMT_RGB32:
162 case V4L2_PIX_FMT_XRGB32:
163 case V4L2_PIX_FMT_HSV32:
168 case V4L2_PIX_FMT_BGR32:
169 case V4L2_PIX_FMT_XBGR32:
174 case V4L2_PIX_FMT_ARGB32:
180 case V4L2_PIX_FMT_ABGR32:
184 rf.alpha = rf.cr + 1;
190 cf.i_frame_qp = state->i_frame_qp;
191 cf.p_frame_qp = state->p_frame_qp;
192 cf.rlc_data = (__be16 *)(p_out + sizeof(*p_hdr));
194 encoding = fwht_encode_frame(&rf, &state->ref_frame, &cf,
196 state->gop_cnt == state->gop_size - 1,
197 state->visible_width,
198 state->visible_height,
199 state->stride, chroma_stride);
200 if (!(encoding & FWHT_FRAME_PCODED))
202 if (++state->gop_cnt >= state->gop_size)
205 p_hdr = (struct fwht_cframe_hdr *)p_out;
206 p_hdr->magic1 = FWHT_MAGIC1;
207 p_hdr->magic2 = FWHT_MAGIC2;
208 p_hdr->version = htonl(FWHT_VERSION);
209 p_hdr->width = htonl(state->visible_width);
210 p_hdr->height = htonl(state->visible_height);
211 flags |= (info->components_num - 1) << FWHT_FL_COMPONENTS_NUM_OFFSET;
212 flags |= info->pixenc;
213 if (encoding & FWHT_LUMA_UNENCODED)
214 flags |= FWHT_FL_LUMA_IS_UNCOMPRESSED;
215 if (encoding & FWHT_CB_UNENCODED)
216 flags |= FWHT_FL_CB_IS_UNCOMPRESSED;
217 if (encoding & FWHT_CR_UNENCODED)
218 flags |= FWHT_FL_CR_IS_UNCOMPRESSED;
219 if (encoding & FWHT_ALPHA_UNENCODED)
220 flags |= FWHT_FL_ALPHA_IS_UNCOMPRESSED;
221 if (!(encoding & FWHT_FRAME_PCODED))
222 flags |= FWHT_FL_I_FRAME;
223 if (rf.height_div == 1)
224 flags |= FWHT_FL_CHROMA_FULL_HEIGHT;
225 if (rf.width_div == 1)
226 flags |= FWHT_FL_CHROMA_FULL_WIDTH;
227 p_hdr->flags = htonl(flags);
228 p_hdr->colorspace = htonl(state->colorspace);
229 p_hdr->xfer_func = htonl(state->xfer_func);
230 p_hdr->ycbcr_enc = htonl(state->ycbcr_enc);
231 p_hdr->quantization = htonl(state->quantization);
232 p_hdr->size = htonl(cf.size);
233 return cf.size + sizeof(*p_hdr);
236 int v4l2_fwht_decode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out)
238 unsigned int i, j, k;
240 struct fwht_cframe cf;
242 unsigned int components_num = 3;
243 unsigned int version;
244 const struct v4l2_fwht_pixfmt_info *info;
245 unsigned int hdr_width_div, hdr_height_div;
252 version = ntohl(state->header.version);
253 if (!version || version > FWHT_VERSION) {
254 pr_err("version %d is not supported, current version is %d\n",
255 version, FWHT_VERSION);
259 if (state->header.magic1 != FWHT_MAGIC1 ||
260 state->header.magic2 != FWHT_MAGIC2)
263 /* TODO: support resolution changes */
264 if (ntohl(state->header.width) != state->visible_width ||
265 ntohl(state->header.height) != state->visible_height)
268 flags = ntohl(state->header.flags);
271 if ((flags & FWHT_FL_PIXENC_MSK) != info->pixenc)
273 components_num = 1 + ((flags & FWHT_FL_COMPONENTS_NUM_MSK) >>
274 FWHT_FL_COMPONENTS_NUM_OFFSET);
277 if (components_num != info->components_num)
280 state->colorspace = ntohl(state->header.colorspace);
281 state->xfer_func = ntohl(state->header.xfer_func);
282 state->ycbcr_enc = ntohl(state->header.ycbcr_enc);
283 state->quantization = ntohl(state->header.quantization);
284 cf.rlc_data = (__be16 *)p_in;
285 cf.size = ntohl(state->header.size);
287 hdr_width_div = (flags & FWHT_FL_CHROMA_FULL_WIDTH) ? 1 : 2;
288 hdr_height_div = (flags & FWHT_FL_CHROMA_FULL_HEIGHT) ? 1 : 2;
289 if (hdr_width_div != info->width_div ||
290 hdr_height_div != info->height_div)
293 if (!fwht_decode_frame(&cf, &state->ref_frame, flags, components_num,
294 state->visible_width, state->visible_height,
299 * TODO - handle the case where the compressed stream encodes a
300 * different format than the requested decoded format.
302 switch (state->info->id) {
303 case V4L2_PIX_FMT_GREY:
304 ref_p = state->ref_frame.luma;
305 for (i = 0; i < state->coded_height; i++) {
306 memcpy(p_out, ref_p, state->visible_width);
307 p_out += state->stride;
308 ref_p += state->coded_width;
311 case V4L2_PIX_FMT_YUV420:
312 case V4L2_PIX_FMT_YUV422P:
313 ref_p = state->ref_frame.luma;
314 for (i = 0; i < state->coded_height; i++) {
315 memcpy(p_out, ref_p, state->visible_width);
316 p_out += state->stride;
317 ref_p += state->coded_width;
320 ref_p = state->ref_frame.cb;
321 for (i = 0; i < state->coded_height / 2; i++) {
322 memcpy(p_out, ref_p, state->visible_width / 2);
323 p_out += state->stride / 2;
324 ref_p += state->coded_width / 2;
326 ref_p = state->ref_frame.cr;
327 for (i = 0; i < state->coded_height / 2; i++) {
328 memcpy(p_out, ref_p, state->visible_width / 2);
329 p_out += state->stride / 2;
330 ref_p += state->coded_width / 2;
333 case V4L2_PIX_FMT_YVU420:
334 ref_p = state->ref_frame.luma;
335 for (i = 0; i < state->coded_height; i++) {
336 memcpy(p_out, ref_p, state->visible_width);
337 p_out += state->stride;
338 ref_p += state->coded_width;
341 ref_p = state->ref_frame.cr;
342 for (i = 0; i < state->coded_height / 2; i++) {
343 memcpy(p_out, ref_p, state->visible_width / 2);
344 p_out += state->stride / 2;
345 ref_p += state->coded_width / 2;
347 ref_p = state->ref_frame.cb;
348 for (i = 0; i < state->coded_height / 2; i++) {
349 memcpy(p_out, ref_p, state->visible_width / 2);
350 p_out += state->stride / 2;
351 ref_p += state->coded_width / 2;
354 case V4L2_PIX_FMT_NV12:
355 case V4L2_PIX_FMT_NV16:
356 case V4L2_PIX_FMT_NV24:
357 ref_p = state->ref_frame.luma;
358 for (i = 0; i < state->coded_height; i++) {
359 memcpy(p_out, ref_p, state->visible_width);
360 p_out += state->stride;
361 ref_p += state->coded_width;
365 for (i = 0; i < state->coded_height / 2; i++) {
366 for (j = 0, p = p_out; j < state->coded_width / 2; j++) {
367 *p++ = state->ref_frame.cb[k];
368 *p++ = state->ref_frame.cr[k];
371 p_out += state->stride;
374 case V4L2_PIX_FMT_NV21:
375 case V4L2_PIX_FMT_NV61:
376 case V4L2_PIX_FMT_NV42:
377 ref_p = state->ref_frame.luma;
378 for (i = 0; i < state->coded_height; i++) {
379 memcpy(p_out, ref_p, state->visible_width);
380 p_out += state->stride;
381 ref_p += state->coded_width;
385 for (i = 0; i < state->coded_height / 2; i++) {
386 for (j = 0, p = p_out; j < state->coded_width / 2; j++) {
387 *p++ = state->ref_frame.cr[k];
388 *p++ = state->ref_frame.cb[k];
391 p_out += state->stride;
394 case V4L2_PIX_FMT_YUYV:
396 for (i = 0; i < state->coded_height; i++) {
397 for (j = 0, p = p_out; j < state->coded_width / 2; j++) {
398 *p++ = state->ref_frame.luma[k];
399 *p++ = state->ref_frame.cb[k / 2];
400 *p++ = state->ref_frame.luma[k + 1];
401 *p++ = state->ref_frame.cr[k / 2];
404 p_out += state->stride;
407 case V4L2_PIX_FMT_YVYU:
409 for (i = 0; i < state->coded_height; i++) {
410 for (j = 0, p = p_out; j < state->coded_width / 2; j++) {
411 *p++ = state->ref_frame.luma[k];
412 *p++ = state->ref_frame.cr[k / 2];
413 *p++ = state->ref_frame.luma[k + 1];
414 *p++ = state->ref_frame.cb[k / 2];
417 p_out += state->stride;
420 case V4L2_PIX_FMT_UYVY:
422 for (i = 0; i < state->coded_height; i++) {
423 for (j = 0, p = p_out; j < state->coded_width / 2; j++) {
424 *p++ = state->ref_frame.cb[k / 2];
425 *p++ = state->ref_frame.luma[k];
426 *p++ = state->ref_frame.cr[k / 2];
427 *p++ = state->ref_frame.luma[k + 1];
430 p_out += state->stride;
433 case V4L2_PIX_FMT_VYUY:
435 for (i = 0; i < state->coded_height; i++) {
436 for (j = 0, p = p_out; j < state->coded_width / 2; j++) {
437 *p++ = state->ref_frame.cr[k / 2];
438 *p++ = state->ref_frame.luma[k];
439 *p++ = state->ref_frame.cb[k / 2];
440 *p++ = state->ref_frame.luma[k + 1];
443 p_out += state->stride;
446 case V4L2_PIX_FMT_RGB24:
447 case V4L2_PIX_FMT_HSV24:
449 for (i = 0; i < state->coded_height; i++) {
450 for (j = 0, p = p_out; j < state->coded_width; j++) {
451 *p++ = state->ref_frame.cr[k];
452 *p++ = state->ref_frame.luma[k];
453 *p++ = state->ref_frame.cb[k];
456 p_out += state->stride;
459 case V4L2_PIX_FMT_BGR24:
461 for (i = 0; i < state->coded_height; i++) {
462 for (j = 0, p = p_out; j < state->coded_width; j++) {
463 *p++ = state->ref_frame.cb[k];
464 *p++ = state->ref_frame.luma[k];
465 *p++ = state->ref_frame.cr[k];
468 p_out += state->stride;
471 case V4L2_PIX_FMT_RGB32:
472 case V4L2_PIX_FMT_XRGB32:
473 case V4L2_PIX_FMT_HSV32:
475 for (i = 0; i < state->coded_height; i++) {
476 for (j = 0, p = p_out; j < state->coded_width; j++) {
478 *p++ = state->ref_frame.cr[k];
479 *p++ = state->ref_frame.luma[k];
480 *p++ = state->ref_frame.cb[k];
483 p_out += state->stride;
486 case V4L2_PIX_FMT_BGR32:
487 case V4L2_PIX_FMT_XBGR32:
489 for (i = 0; i < state->coded_height; i++) {
490 for (j = 0, p = p_out; j < state->coded_width; j++) {
491 *p++ = state->ref_frame.cb[k];
492 *p++ = state->ref_frame.luma[k];
493 *p++ = state->ref_frame.cr[k];
497 p_out += state->stride;
500 case V4L2_PIX_FMT_ARGB32:
502 for (i = 0; i < state->coded_height; i++) {
503 for (j = 0, p = p_out; j < state->coded_width; j++) {
504 *p++ = state->ref_frame.alpha[k];
505 *p++ = state->ref_frame.cr[k];
506 *p++ = state->ref_frame.luma[k];
507 *p++ = state->ref_frame.cb[k];
510 p_out += state->stride;
513 case V4L2_PIX_FMT_ABGR32:
515 for (i = 0; i < state->coded_height; i++) {
516 for (j = 0, p = p_out; j < state->coded_width; j++) {
517 *p++ = state->ref_frame.cb[k];
518 *p++ = state->ref_frame.luma[k];
519 *p++ = state->ref_frame.cr[k];
520 *p++ = state->ref_frame.alpha[k];
523 p_out += state->stride;