1 // SPDX-License-Identifier: LGPL-2.1
3 * A V4L2 frontend for the FWHT codec
5 * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
8 #include <linux/errno.h>
9 #include <linux/string.h>
10 #include <linux/videodev2.h>
11 #include "codec-v4l2-fwht.h"
13 static const struct v4l2_fwht_pixfmt_info v4l2_fwht_pixfmts[] = {
14 { V4L2_PIX_FMT_YUV420, 1, 3, 2, 1, 1, 2, 2, 3, 3, FWHT_FL_PIXENC_YUV},
15 { V4L2_PIX_FMT_YVU420, 1, 3, 2, 1, 1, 2, 2, 3, 3, FWHT_FL_PIXENC_YUV},
16 { V4L2_PIX_FMT_YUV422P, 1, 2, 1, 1, 1, 2, 1, 3, 3, FWHT_FL_PIXENC_YUV},
17 { V4L2_PIX_FMT_NV12, 1, 3, 2, 1, 2, 2, 2, 3, 2, FWHT_FL_PIXENC_YUV},
18 { V4L2_PIX_FMT_NV21, 1, 3, 2, 1, 2, 2, 2, 3, 2, FWHT_FL_PIXENC_YUV},
19 { V4L2_PIX_FMT_NV16, 1, 2, 1, 1, 2, 2, 1, 3, 2, FWHT_FL_PIXENC_YUV},
20 { V4L2_PIX_FMT_NV61, 1, 2, 1, 1, 2, 2, 1, 3, 2, FWHT_FL_PIXENC_YUV},
21 { V4L2_PIX_FMT_NV24, 1, 3, 1, 1, 2, 1, 1, 3, 2, FWHT_FL_PIXENC_YUV},
22 { V4L2_PIX_FMT_NV42, 1, 3, 1, 1, 2, 1, 1, 3, 2, FWHT_FL_PIXENC_YUV},
23 { V4L2_PIX_FMT_YUYV, 2, 2, 1, 2, 4, 2, 1, 3, 1, FWHT_FL_PIXENC_YUV},
24 { V4L2_PIX_FMT_YVYU, 2, 2, 1, 2, 4, 2, 1, 3, 1, FWHT_FL_PIXENC_YUV},
25 { V4L2_PIX_FMT_UYVY, 2, 2, 1, 2, 4, 2, 1, 3, 1, FWHT_FL_PIXENC_YUV},
26 { V4L2_PIX_FMT_VYUY, 2, 2, 1, 2, 4, 2, 1, 3, 1, FWHT_FL_PIXENC_YUV},
27 { V4L2_PIX_FMT_BGR24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
28 { V4L2_PIX_FMT_RGB24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
29 { V4L2_PIX_FMT_HSV24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_HSV},
30 { V4L2_PIX_FMT_BGR32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
31 { V4L2_PIX_FMT_XBGR32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
32 { V4L2_PIX_FMT_RGB32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
33 { V4L2_PIX_FMT_XRGB32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
34 { V4L2_PIX_FMT_HSV32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_HSV},
35 { V4L2_PIX_FMT_ARGB32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
36 { V4L2_PIX_FMT_ABGR32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
37 { V4L2_PIX_FMT_GREY, 1, 1, 1, 1, 0, 1, 1, 1, 1, FWHT_FL_PIXENC_RGB},
40 const struct v4l2_fwht_pixfmt_info *v4l2_fwht_default_fmt(u32 width_div,
44 unsigned int start_idx)
48 for (i = 0; i < ARRAY_SIZE(v4l2_fwht_pixfmts); i++) {
49 if (v4l2_fwht_pixfmts[i].width_div == width_div &&
50 v4l2_fwht_pixfmts[i].height_div == height_div &&
51 (!pixenc || v4l2_fwht_pixfmts[i].pixenc == pixenc) &&
52 v4l2_fwht_pixfmts[i].components_num == components_num) {
54 return v4l2_fwht_pixfmts + i;
61 const struct v4l2_fwht_pixfmt_info *v4l2_fwht_find_pixfmt(u32 pixelformat)
65 for (i = 0; i < ARRAY_SIZE(v4l2_fwht_pixfmts); i++)
66 if (v4l2_fwht_pixfmts[i].id == pixelformat)
67 return v4l2_fwht_pixfmts + i;
71 const struct v4l2_fwht_pixfmt_info *v4l2_fwht_get_pixfmt(u32 idx)
73 if (idx >= ARRAY_SIZE(v4l2_fwht_pixfmts))
75 return v4l2_fwht_pixfmts + idx;
78 int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out)
80 unsigned int size = state->stride * state->coded_height;
81 unsigned int chroma_stride = state->stride;
82 const struct v4l2_fwht_pixfmt_info *info = state->info;
83 struct fwht_cframe_hdr *p_hdr;
84 struct fwht_cframe cf;
85 struct fwht_raw_frame rf;
93 rf.width_div = info->width_div;
94 rf.height_div = info->height_div;
95 rf.luma_alpha_step = info->luma_alpha_step;
96 rf.chroma_step = info->chroma_step;
98 rf.components_num = info->components_num;
101 case V4L2_PIX_FMT_GREY:
105 case V4L2_PIX_FMT_YUV420:
106 rf.cb = rf.luma + size;
107 rf.cr = rf.cb + size / 4;
110 case V4L2_PIX_FMT_YVU420:
111 rf.cr = rf.luma + size;
112 rf.cb = rf.cr + size / 4;
115 case V4L2_PIX_FMT_YUV422P:
116 rf.cb = rf.luma + size;
117 rf.cr = rf.cb + size / 2;
120 case V4L2_PIX_FMT_NV12:
121 case V4L2_PIX_FMT_NV16:
122 case V4L2_PIX_FMT_NV24:
123 rf.cb = rf.luma + size;
126 case V4L2_PIX_FMT_NV21:
127 case V4L2_PIX_FMT_NV61:
128 case V4L2_PIX_FMT_NV42:
129 rf.cr = rf.luma + size;
132 case V4L2_PIX_FMT_YUYV:
136 case V4L2_PIX_FMT_YVYU:
140 case V4L2_PIX_FMT_UYVY:
145 case V4L2_PIX_FMT_VYUY:
150 case V4L2_PIX_FMT_RGB24:
151 case V4L2_PIX_FMT_HSV24:
156 case V4L2_PIX_FMT_BGR24:
161 case V4L2_PIX_FMT_RGB32:
162 case V4L2_PIX_FMT_XRGB32:
163 case V4L2_PIX_FMT_HSV32:
168 case V4L2_PIX_FMT_BGR32:
169 case V4L2_PIX_FMT_XBGR32:
174 case V4L2_PIX_FMT_ARGB32:
180 case V4L2_PIX_FMT_ABGR32:
184 rf.alpha = rf.cr + 1;
190 cf.i_frame_qp = state->i_frame_qp;
191 cf.p_frame_qp = state->p_frame_qp;
192 cf.rlc_data = (__be16 *)(p_out + sizeof(*p_hdr));
194 encoding = fwht_encode_frame(&rf, &state->ref_frame, &cf,
196 state->gop_cnt == state->gop_size - 1,
197 state->visible_width,
198 state->visible_height,
199 state->stride, chroma_stride);
200 if (!(encoding & FWHT_FRAME_PCODED))
202 if (++state->gop_cnt >= state->gop_size)
205 p_hdr = (struct fwht_cframe_hdr *)p_out;
206 p_hdr->magic1 = FWHT_MAGIC1;
207 p_hdr->magic2 = FWHT_MAGIC2;
208 p_hdr->version = htonl(FWHT_VERSION);
209 p_hdr->width = htonl(state->visible_width);
210 p_hdr->height = htonl(state->visible_height);
211 flags |= (info->components_num - 1) << FWHT_FL_COMPONENTS_NUM_OFFSET;
212 flags |= info->pixenc;
213 if (encoding & FWHT_LUMA_UNENCODED)
214 flags |= FWHT_FL_LUMA_IS_UNCOMPRESSED;
215 if (encoding & FWHT_CB_UNENCODED)
216 flags |= FWHT_FL_CB_IS_UNCOMPRESSED;
217 if (encoding & FWHT_CR_UNENCODED)
218 flags |= FWHT_FL_CR_IS_UNCOMPRESSED;
219 if (encoding & FWHT_ALPHA_UNENCODED)
220 flags |= FWHT_FL_ALPHA_IS_UNCOMPRESSED;
221 if (rf.height_div == 1)
222 flags |= FWHT_FL_CHROMA_FULL_HEIGHT;
223 if (rf.width_div == 1)
224 flags |= FWHT_FL_CHROMA_FULL_WIDTH;
225 p_hdr->flags = htonl(flags);
226 p_hdr->colorspace = htonl(state->colorspace);
227 p_hdr->xfer_func = htonl(state->xfer_func);
228 p_hdr->ycbcr_enc = htonl(state->ycbcr_enc);
229 p_hdr->quantization = htonl(state->quantization);
230 p_hdr->size = htonl(cf.size);
231 return cf.size + sizeof(*p_hdr);
234 int v4l2_fwht_decode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out)
236 unsigned int i, j, k;
238 struct fwht_cframe_hdr *p_hdr;
239 struct fwht_cframe cf;
241 unsigned int components_num = 3;
242 unsigned int version;
243 const struct v4l2_fwht_pixfmt_info *info;
244 unsigned int hdr_width_div, hdr_height_div;
250 p_hdr = (struct fwht_cframe_hdr *)p_in;
252 version = ntohl(p_hdr->version);
253 if (!version || version > FWHT_VERSION) {
254 pr_err("version %d is not supported, current version is %d\n",
255 version, FWHT_VERSION);
259 if (p_hdr->magic1 != FWHT_MAGIC1 ||
260 p_hdr->magic2 != FWHT_MAGIC2)
263 /* TODO: support resolution changes */
264 if (ntohl(p_hdr->width) != state->visible_width ||
265 ntohl(p_hdr->height) != state->visible_height)
268 flags = ntohl(p_hdr->flags);
270 if (version == FWHT_VERSION) {
271 if ((flags & FWHT_FL_PIXENC_MSK) != info->pixenc)
273 components_num = 1 + ((flags & FWHT_FL_COMPONENTS_NUM_MSK) >>
274 FWHT_FL_COMPONENTS_NUM_OFFSET);
277 if (components_num != info->components_num)
280 state->colorspace = ntohl(p_hdr->colorspace);
281 state->xfer_func = ntohl(p_hdr->xfer_func);
282 state->ycbcr_enc = ntohl(p_hdr->ycbcr_enc);
283 state->quantization = ntohl(p_hdr->quantization);
284 cf.rlc_data = (__be16 *)(p_in + sizeof(*p_hdr));
286 hdr_width_div = (flags & FWHT_FL_CHROMA_FULL_WIDTH) ? 1 : 2;
287 hdr_height_div = (flags & FWHT_FL_CHROMA_FULL_HEIGHT) ? 1 : 2;
288 if (hdr_width_div != info->width_div ||
289 hdr_height_div != info->height_div)
292 fwht_decode_frame(&cf, &state->ref_frame, flags, components_num,
293 state->visible_width, state->visible_height,
297 * TODO - handle the case where the compressed stream encodes a
298 * different format than the requested decoded format.
300 switch (state->info->id) {
301 case V4L2_PIX_FMT_GREY:
302 ref_p = state->ref_frame.luma;
303 for (i = 0; i < state->coded_height; i++) {
304 memcpy(p_out, ref_p, state->visible_width);
305 p_out += state->stride;
306 ref_p += state->coded_width;
309 case V4L2_PIX_FMT_YUV420:
310 case V4L2_PIX_FMT_YUV422P:
311 ref_p = state->ref_frame.luma;
312 for (i = 0; i < state->coded_height; i++) {
313 memcpy(p_out, ref_p, state->visible_width);
314 p_out += state->stride;
315 ref_p += state->coded_width;
318 ref_p = state->ref_frame.cb;
319 for (i = 0; i < state->coded_height / 2; i++) {
320 memcpy(p_out, ref_p, state->visible_width / 2);
321 p_out += state->stride / 2;
322 ref_p += state->coded_width / 2;
324 ref_p = state->ref_frame.cr;
325 for (i = 0; i < state->coded_height / 2; i++) {
326 memcpy(p_out, ref_p, state->visible_width / 2);
327 p_out += state->stride / 2;
328 ref_p += state->coded_width / 2;
331 case V4L2_PIX_FMT_YVU420:
332 ref_p = state->ref_frame.luma;
333 for (i = 0; i < state->coded_height; i++) {
334 memcpy(p_out, ref_p, state->visible_width);
335 p_out += state->stride;
336 ref_p += state->coded_width;
339 ref_p = state->ref_frame.cr;
340 for (i = 0; i < state->coded_height / 2; i++) {
341 memcpy(p_out, ref_p, state->visible_width / 2);
342 p_out += state->stride / 2;
343 ref_p += state->coded_width / 2;
345 ref_p = state->ref_frame.cb;
346 for (i = 0; i < state->coded_height / 2; i++) {
347 memcpy(p_out, ref_p, state->visible_width / 2);
348 p_out += state->stride / 2;
349 ref_p += state->coded_width / 2;
352 case V4L2_PIX_FMT_NV12:
353 case V4L2_PIX_FMT_NV16:
354 case V4L2_PIX_FMT_NV24:
355 ref_p = state->ref_frame.luma;
356 for (i = 0; i < state->coded_height; i++) {
357 memcpy(p_out, ref_p, state->visible_width);
358 p_out += state->stride;
359 ref_p += state->coded_width;
363 for (i = 0; i < state->coded_height / 2; i++) {
364 for (j = 0, p = p_out; j < state->coded_width / 2; j++) {
365 *p++ = state->ref_frame.cb[k];
366 *p++ = state->ref_frame.cr[k];
369 p_out += state->stride;
372 case V4L2_PIX_FMT_NV21:
373 case V4L2_PIX_FMT_NV61:
374 case V4L2_PIX_FMT_NV42:
375 ref_p = state->ref_frame.luma;
376 for (i = 0; i < state->coded_height; i++) {
377 memcpy(p_out, ref_p, state->visible_width);
378 p_out += state->stride;
379 ref_p += state->coded_width;
383 for (i = 0; i < state->coded_height / 2; i++) {
384 for (j = 0, p = p_out; j < state->coded_width / 2; j++) {
385 *p++ = state->ref_frame.cr[k];
386 *p++ = state->ref_frame.cb[k];
389 p_out += state->stride;
392 case V4L2_PIX_FMT_YUYV:
394 for (i = 0; i < state->coded_height; i++) {
395 for (j = 0, p = p_out; j < state->coded_width / 2; j++) {
396 *p++ = state->ref_frame.luma[k];
397 *p++ = state->ref_frame.cb[k / 2];
398 *p++ = state->ref_frame.luma[k + 1];
399 *p++ = state->ref_frame.cr[k / 2];
402 p_out += state->stride;
405 case V4L2_PIX_FMT_YVYU:
407 for (i = 0; i < state->coded_height; i++) {
408 for (j = 0, p = p_out; j < state->coded_width / 2; j++) {
409 *p++ = state->ref_frame.luma[k];
410 *p++ = state->ref_frame.cr[k / 2];
411 *p++ = state->ref_frame.luma[k + 1];
412 *p++ = state->ref_frame.cb[k / 2];
415 p_out += state->stride;
418 case V4L2_PIX_FMT_UYVY:
420 for (i = 0; i < state->coded_height; i++) {
421 for (j = 0, p = p_out; j < state->coded_width / 2; j++) {
422 *p++ = state->ref_frame.cb[k / 2];
423 *p++ = state->ref_frame.luma[k];
424 *p++ = state->ref_frame.cr[k / 2];
425 *p++ = state->ref_frame.luma[k + 1];
428 p_out += state->stride;
431 case V4L2_PIX_FMT_VYUY:
433 for (i = 0; i < state->coded_height; i++) {
434 for (j = 0, p = p_out; j < state->coded_width / 2; j++) {
435 *p++ = state->ref_frame.cr[k / 2];
436 *p++ = state->ref_frame.luma[k];
437 *p++ = state->ref_frame.cb[k / 2];
438 *p++ = state->ref_frame.luma[k + 1];
441 p_out += state->stride;
444 case V4L2_PIX_FMT_RGB24:
445 case V4L2_PIX_FMT_HSV24:
447 for (i = 0; i < state->coded_height; i++) {
448 for (j = 0, p = p_out; j < state->coded_width; j++) {
449 *p++ = state->ref_frame.cr[k];
450 *p++ = state->ref_frame.luma[k];
451 *p++ = state->ref_frame.cb[k];
454 p_out += state->stride;
457 case V4L2_PIX_FMT_BGR24:
459 for (i = 0; i < state->coded_height; i++) {
460 for (j = 0, p = p_out; j < state->coded_width; j++) {
461 *p++ = state->ref_frame.cb[k];
462 *p++ = state->ref_frame.luma[k];
463 *p++ = state->ref_frame.cr[k];
466 p_out += state->stride;
469 case V4L2_PIX_FMT_RGB32:
470 case V4L2_PIX_FMT_XRGB32:
471 case V4L2_PIX_FMT_HSV32:
473 for (i = 0; i < state->coded_height; i++) {
474 for (j = 0, p = p_out; j < state->coded_width; j++) {
476 *p++ = state->ref_frame.cr[k];
477 *p++ = state->ref_frame.luma[k];
478 *p++ = state->ref_frame.cb[k];
481 p_out += state->stride;
484 case V4L2_PIX_FMT_BGR32:
485 case V4L2_PIX_FMT_XBGR32:
487 for (i = 0; i < state->coded_height; i++) {
488 for (j = 0, p = p_out; j < state->coded_width; j++) {
489 *p++ = state->ref_frame.cb[k];
490 *p++ = state->ref_frame.luma[k];
491 *p++ = state->ref_frame.cr[k];
495 p_out += state->stride;
498 case V4L2_PIX_FMT_ARGB32:
500 for (i = 0; i < state->coded_height; i++) {
501 for (j = 0, p = p_out; j < state->coded_width; j++) {
502 *p++ = state->ref_frame.alpha[k];
503 *p++ = state->ref_frame.cr[k];
504 *p++ = state->ref_frame.luma[k];
505 *p++ = state->ref_frame.cb[k];
508 p_out += state->stride;
511 case V4L2_PIX_FMT_ABGR32:
513 for (i = 0; i < state->coded_height; i++) {
514 for (j = 0, p = p_out; j < state->coded_width; j++) {
515 *p++ = state->ref_frame.cb[k];
516 *p++ = state->ref_frame.luma[k];
517 *p++ = state->ref_frame.cr[k];
518 *p++ = state->ref_frame.alpha[k];
521 p_out += state->stride;