FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
utvideodec.c
Go to the documentation of this file.
1 /*
2  * Ut Video decoder
3  * Copyright (c) 2011 Konstantin Shishkov
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Ut Video decoder
25  */
26 
27 #include <inttypes.h>
28 #include <stdlib.h>
29 
30 #include "libavutil/intreadwrite.h"
31 #include "avcodec.h"
32 #include "bswapdsp.h"
33 #include "bytestream.h"
34 #include "get_bits.h"
35 #include "internal.h"
36 #include "thread.h"
37 #include "utvideo.h"
38 
39 static int build_huff10(const uint8_t *src, VLC *vlc, int *fsym)
40 {
41  int i;
42  HuffEntry he[1024];
43  int last;
44  uint32_t codes[1024];
45  uint8_t bits[1024];
46  uint16_t syms[1024];
47  uint32_t code;
48 
49  *fsym = -1;
50  for (i = 0; i < 1024; i++) {
51  he[i].sym = i;
52  he[i].len = *src++;
53  }
54  qsort(he, 1024, sizeof(*he), ff_ut10_huff_cmp_len);
55 
56  if (!he[0].len) {
57  *fsym = he[0].sym;
58  return 0;
59  }
60 
61  last = 1023;
62  while (he[last].len == 255 && last)
63  last--;
64 
65  if (he[last].len > 32) {
66  return -1;
67  }
68 
69  code = 1;
70  for (i = last; i >= 0; i--) {
71  codes[i] = code >> (32 - he[i].len);
72  bits[i] = he[i].len;
73  syms[i] = he[i].sym;
74  code += 0x80000000u >> (he[i].len - 1);
75  }
76 
77  return ff_init_vlc_sparse(vlc, FFMIN(he[last].len, 11), last + 1,
78  bits, sizeof(*bits), sizeof(*bits),
79  codes, sizeof(*codes), sizeof(*codes),
80  syms, sizeof(*syms), sizeof(*syms), 0);
81 }
82 
83 static int build_huff(const uint8_t *src, VLC *vlc, int *fsym)
84 {
85  int i;
86  HuffEntry he[256];
87  int last;
88  uint32_t codes[256];
89  uint8_t bits[256];
90  uint8_t syms[256];
91  uint32_t code;
92 
93  *fsym = -1;
94  for (i = 0; i < 256; i++) {
95  he[i].sym = i;
96  he[i].len = *src++;
97  }
98  qsort(he, 256, sizeof(*he), ff_ut_huff_cmp_len);
99 
100  if (!he[0].len) {
101  *fsym = he[0].sym;
102  return 0;
103  }
104 
105  last = 255;
106  while (he[last].len == 255 && last)
107  last--;
108 
109  if (he[last].len > 32)
110  return -1;
111 
112  code = 1;
113  for (i = last; i >= 0; i--) {
114  codes[i] = code >> (32 - he[i].len);
115  bits[i] = he[i].len;
116  syms[i] = he[i].sym;
117  code += 0x80000000u >> (he[i].len - 1);
118  }
119 
120  return ff_init_vlc_sparse(vlc, FFMIN(he[last].len, 11), last + 1,
121  bits, sizeof(*bits), sizeof(*bits),
122  codes, sizeof(*codes), sizeof(*codes),
123  syms, sizeof(*syms), sizeof(*syms), 0);
124 }
125 
126 static int decode_plane10(UtvideoContext *c, int plane_no,
127  uint16_t *dst, int step, ptrdiff_t stride,
128  int width, int height,
129  const uint8_t *src, const uint8_t *huff,
130  int use_pred)
131 {
132  int i, j, slice, pix, ret;
133  int sstart, send;
134  VLC vlc;
135  GetBitContext gb;
136  int prev, fsym;
137 
138  if ((ret = build_huff10(huff, &vlc, &fsym)) < 0) {
139  av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
140  return ret;
141  }
142  if (fsym >= 0) { // build_huff reported a symbol to fill slices with
143  send = 0;
144  for (slice = 0; slice < c->slices; slice++) {
145  uint16_t *dest;
146 
147  sstart = send;
148  send = (height * (slice + 1) / c->slices);
149  dest = dst + sstart * stride;
150 
151  prev = 0x200;
152  for (j = sstart; j < send; j++) {
153  for (i = 0; i < width * step; i += step) {
154  pix = fsym;
155  if (use_pred) {
156  prev += pix;
157  prev &= 0x3FF;
158  pix = prev;
159  }
160  dest[i] = pix;
161  }
162  dest += stride;
163  }
164  }
165  return 0;
166  }
167 
168  send = 0;
169  for (slice = 0; slice < c->slices; slice++) {
170  uint16_t *dest;
171  int slice_data_start, slice_data_end, slice_size;
172 
173  sstart = send;
174  send = (height * (slice + 1) / c->slices);
175  dest = dst + sstart * stride;
176 
177  // slice offset and size validation was done earlier
178  slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0;
179  slice_data_end = AV_RL32(src + slice * 4);
180  slice_size = slice_data_end - slice_data_start;
181 
182  if (!slice_size) {
183  av_log(c->avctx, AV_LOG_ERROR, "Plane has more than one symbol "
184  "yet a slice has a length of zero.\n");
185  goto fail;
186  }
187 
188  memcpy(c->slice_bits, src + slice_data_start + c->slices * 4,
189  slice_size);
190  memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
191  c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
192  (uint32_t *) c->slice_bits,
193  (slice_data_end - slice_data_start + 3) >> 2);
194  init_get_bits(&gb, c->slice_bits, slice_size * 8);
195 
196  prev = 0x200;
197  for (j = sstart; j < send; j++) {
198  for (i = 0; i < width * step; i += step) {
199  if (get_bits_left(&gb) <= 0) {
201  "Slice decoding ran out of bits\n");
202  goto fail;
203  }
204  pix = get_vlc2(&gb, vlc.table, vlc.bits, 3);
205  if (pix < 0) {
206  av_log(c->avctx, AV_LOG_ERROR, "Decoding error\n");
207  goto fail;
208  }
209  if (use_pred) {
210  prev += pix;
211  prev &= 0x3FF;
212  pix = prev;
213  }
214  dest[i] = pix;
215  }
216  dest += stride;
217  }
218  if (get_bits_left(&gb) > 32)
220  "%d bits left after decoding slice\n", get_bits_left(&gb));
221  }
222 
223  ff_free_vlc(&vlc);
224 
225  return 0;
226 fail:
227  ff_free_vlc(&vlc);
228  return AVERROR_INVALIDDATA;
229 }
230 
231 static int decode_plane(UtvideoContext *c, int plane_no,
232  uint8_t *dst, int step, ptrdiff_t stride,
233  int width, int height,
234  const uint8_t *src, int use_pred)
235 {
236  int i, j, slice, pix;
237  int sstart, send;
238  VLC vlc;
239  GetBitContext gb;
240  int prev, fsym;
241  const int cmask = ~(!plane_no && c->avctx->pix_fmt == AV_PIX_FMT_YUV420P);
242 
243  if (build_huff(src, &vlc, &fsym)) {
244  av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
245  return AVERROR_INVALIDDATA;
246  }
247  if (fsym >= 0) { // build_huff reported a symbol to fill slices with
248  send = 0;
249  for (slice = 0; slice < c->slices; slice++) {
250  uint8_t *dest;
251 
252  sstart = send;
253  send = (height * (slice + 1) / c->slices) & cmask;
254  dest = dst + sstart * stride;
255 
256  prev = 0x80;
257  for (j = sstart; j < send; j++) {
258  for (i = 0; i < width * step; i += step) {
259  pix = fsym;
260  if (use_pred) {
261  prev += pix;
262  pix = prev;
263  }
264  dest[i] = pix;
265  }
266  dest += stride;
267  }
268  }
269  return 0;
270  }
271 
272  src += 256;
273 
274  send = 0;
275  for (slice = 0; slice < c->slices; slice++) {
276  uint8_t *dest;
277  int slice_data_start, slice_data_end, slice_size;
278 
279  sstart = send;
280  send = (height * (slice + 1) / c->slices) & cmask;
281  dest = dst + sstart * stride;
282 
283  // slice offset and size validation was done earlier
284  slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0;
285  slice_data_end = AV_RL32(src + slice * 4);
286  slice_size = slice_data_end - slice_data_start;
287 
288  if (!slice_size) {
289  av_log(c->avctx, AV_LOG_ERROR, "Plane has more than one symbol "
290  "yet a slice has a length of zero.\n");
291  goto fail;
292  }
293 
294  memcpy(c->slice_bits, src + slice_data_start + c->slices * 4,
295  slice_size);
296  memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
297  c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
298  (uint32_t *) c->slice_bits,
299  (slice_data_end - slice_data_start + 3) >> 2);
300  init_get_bits(&gb, c->slice_bits, slice_size * 8);
301 
302  prev = 0x80;
303  for (j = sstart; j < send; j++) {
304  for (i = 0; i < width * step; i += step) {
305  if (get_bits_left(&gb) <= 0) {
307  "Slice decoding ran out of bits\n");
308  goto fail;
309  }
310  pix = get_vlc2(&gb, vlc.table, vlc.bits, 3);
311  if (pix < 0) {
312  av_log(c->avctx, AV_LOG_ERROR, "Decoding error\n");
313  goto fail;
314  }
315  if (use_pred) {
316  prev += pix;
317  pix = prev;
318  }
319  dest[i] = pix;
320  }
321  dest += stride;
322  }
323  if (get_bits_left(&gb) > 32)
325  "%d bits left after decoding slice\n", get_bits_left(&gb));
326  }
327 
328  ff_free_vlc(&vlc);
329 
330  return 0;
331 fail:
332  ff_free_vlc(&vlc);
333  return AVERROR_INVALIDDATA;
334 }
335 
336 static void restore_rgb_planes(uint8_t *src, int step, ptrdiff_t stride,
337  int width, int height)
338 {
339  int i, j;
340  uint8_t r, g, b;
341 
342  for (j = 0; j < height; j++) {
343  for (i = 0; i < width * step; i += step) {
344  r = src[i];
345  g = src[i + 1];
346  b = src[i + 2];
347  src[i] = r + g - 0x80;
348  src[i + 2] = b + g - 0x80;
349  }
350  src += stride;
351  }
352 }
353 
355 {
356  uint16_t *src_r = (uint16_t *)frame->data[2];
357  uint16_t *src_g = (uint16_t *)frame->data[0];
358  uint16_t *src_b = (uint16_t *)frame->data[1];
359  int r, g, b;
360  int i, j;
361 
362  for (j = 0; j < height; j++) {
363  for (i = 0; i < width; i++) {
364  r = src_r[i];
365  g = src_g[i];
366  b = src_b[i];
367  src_r[i] = (r + g - 0x200) & 0x3FF;
368  src_b[i] = (b + g - 0x200) & 0x3FF;
369  }
370  src_r += frame->linesize[2] / 2;
371  src_g += frame->linesize[0] / 2;
372  src_b += frame->linesize[1] / 2;
373  }
374 }
375 
376 #undef A
377 #undef B
378 #undef C
379 
381  int width, int height, int slices, int rmode)
382 {
383  int i, j, slice;
384  int A, B, C;
385  uint8_t *bsrc;
386  int slice_start, slice_height;
387  const int cmask = ~rmode;
388 
389  for (slice = 0; slice < slices; slice++) {
390  slice_start = ((slice * height) / slices) & cmask;
391  slice_height = ((((slice + 1) * height) / slices) & cmask) -
392  slice_start;
393 
394  if (!slice_height)
395  continue;
396  bsrc = src + slice_start * stride;
397 
398  // first line - left neighbour prediction
399  bsrc[0] += 0x80;
400  c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
401  bsrc += stride;
402  if (slice_height <= 1)
403  continue;
404  // second line - first element has top prediction, the rest uses median
405  C = bsrc[-stride];
406  bsrc[0] += C;
407  A = bsrc[0];
408  for (i = 1; i < width; i++) {
409  B = bsrc[i - stride];
410  bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
411  C = B;
412  A = bsrc[i];
413  }
414  bsrc += stride;
415  // the rest of lines use continuous median prediction
416  for (j = 2; j < slice_height; j++) {
417  c->llviddsp.add_median_pred(bsrc, bsrc - stride,
418  bsrc, width, &A, &B);
419  bsrc += stride;
420  }
421  }
422 }
423 
424 /* UtVideo interlaced mode treats every two lines as a single one,
425  * so restoring function should take care of possible padding between
426  * two parts of the same "line".
427  */
429  int width, int height, int slices, int rmode)
430 {
431  int i, j, slice;
432  int A, B, C;
433  uint8_t *bsrc;
434  int slice_start, slice_height;
435  const int cmask = ~(rmode ? 3 : 1);
436  const ptrdiff_t stride2 = stride << 1;
437 
438  for (slice = 0; slice < slices; slice++) {
439  slice_start = ((slice * height) / slices) & cmask;
440  slice_height = ((((slice + 1) * height) / slices) & cmask) -
441  slice_start;
442  slice_height >>= 1;
443  if (!slice_height)
444  continue;
445 
446  bsrc = src + slice_start * stride;
447 
448  // first line - left neighbour prediction
449  bsrc[0] += 0x80;
450  A = c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
451  c->llviddsp.add_left_pred(bsrc + stride, bsrc + stride, width, A);
452  bsrc += stride2;
453  if (slice_height <= 1)
454  continue;
455  // second line - first element has top prediction, the rest uses median
456  C = bsrc[-stride2];
457  bsrc[0] += C;
458  A = bsrc[0];
459  for (i = 1; i < width; i++) {
460  B = bsrc[i - stride2];
461  bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
462  C = B;
463  A = bsrc[i];
464  }
465  c->llviddsp.add_median_pred(bsrc + stride, bsrc - stride,
466  bsrc + stride, width, &A, &B);
467  bsrc += stride2;
468  // the rest of lines use continuous median prediction
469  for (j = 2; j < slice_height; j++) {
470  c->llviddsp.add_median_pred(bsrc, bsrc - stride2,
471  bsrc, width, &A, &B);
472  c->llviddsp.add_median_pred(bsrc + stride, bsrc - stride,
473  bsrc + stride, width, &A, &B);
474  bsrc += stride2;
475  }
476  }
477 }
478 
479 static void restore_median_packed(uint8_t *src, int step, ptrdiff_t stride,
480  int width, int height, int slices, int rmode)
481 {
482  int i, j, slice;
483  int A, B, C;
484  uint8_t *bsrc;
485  int slice_start, slice_height;
486  const int cmask = ~rmode;
487 
488  for (slice = 0; slice < slices; slice++) {
489  slice_start = ((slice * height) / slices) & cmask;
490  slice_height = ((((slice + 1) * height) / slices) & cmask) -
491  slice_start;
492 
493  if (!slice_height)
494  continue;
495  bsrc = src + slice_start * stride;
496 
497  // first line - left neighbour prediction
498  bsrc[0] += 0x80;
499  A = bsrc[0];
500  for (i = step; i < width * step; i += step) {
501  bsrc[i] += A;
502  A = bsrc[i];
503  }
504  bsrc += stride;
505  if (slice_height <= 1)
506  continue;
507  // second line - first element has top prediction, the rest uses median
508  C = bsrc[-stride];
509  bsrc[0] += C;
510  A = bsrc[0];
511  for (i = step; i < width * step; i += step) {
512  B = bsrc[i - stride];
513  bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
514  C = B;
515  A = bsrc[i];
516  }
517  bsrc += stride;
518  // the rest of lines use continuous median prediction
519  for (j = 2; j < slice_height; j++) {
520  for (i = 0; i < width * step; i += step) {
521  B = bsrc[i - stride];
522  bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
523  C = B;
524  A = bsrc[i];
525  }
526  bsrc += stride;
527  }
528  }
529 }
530 
531 /* UtVideo interlaced mode treats every two lines as a single one,
532  * so restoring function should take care of possible padding between
533  * two parts of the same "line".
534  */
535 static void restore_median_packed_il(uint8_t *src, int step, ptrdiff_t stride,
536  int width, int height, int slices, int rmode)
537 {
538  int i, j, slice;
539  int A, B, C;
540  uint8_t *bsrc;
541  int slice_start, slice_height;
542  const int cmask = ~(rmode ? 3 : 1);
543  const ptrdiff_t stride2 = stride << 1;
544 
545  for (slice = 0; slice < slices; slice++) {
546  slice_start = ((slice * height) / slices) & cmask;
547  slice_height = ((((slice + 1) * height) / slices) & cmask) -
548  slice_start;
549  slice_height >>= 1;
550  if (!slice_height)
551  continue;
552 
553  bsrc = src + slice_start * stride;
554 
555  // first line - left neighbour prediction
556  bsrc[0] += 0x80;
557  A = bsrc[0];
558  for (i = step; i < width * step; i += step) {
559  bsrc[i] += A;
560  A = bsrc[i];
561  }
562  for (i = 0; i < width * step; i += step) {
563  bsrc[stride + i] += A;
564  A = bsrc[stride + i];
565  }
566  bsrc += stride2;
567  if (slice_height <= 1)
568  continue;
569  // second line - first element has top prediction, the rest uses median
570  C = bsrc[-stride2];
571  bsrc[0] += C;
572  A = bsrc[0];
573  for (i = step; i < width * step; i += step) {
574  B = bsrc[i - stride2];
575  bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
576  C = B;
577  A = bsrc[i];
578  }
579  for (i = 0; i < width * step; i += step) {
580  B = bsrc[i - stride];
581  bsrc[stride + i] += mid_pred(A, B, (uint8_t)(A + B - C));
582  C = B;
583  A = bsrc[stride + i];
584  }
585  bsrc += stride2;
586  // the rest of lines use continuous median prediction
587  for (j = 2; j < slice_height; j++) {
588  for (i = 0; i < width * step; i += step) {
589  B = bsrc[i - stride2];
590  bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
591  C = B;
592  A = bsrc[i];
593  }
594  for (i = 0; i < width * step; i += step) {
595  B = bsrc[i - stride];
596  bsrc[i + stride] += mid_pred(A, B, (uint8_t)(A + B - C));
597  C = B;
598  A = bsrc[i + stride];
599  }
600  bsrc += stride2;
601  }
602  }
603 }
604 
605 static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
606  AVPacket *avpkt)
607 {
608  const uint8_t *buf = avpkt->data;
609  int buf_size = avpkt->size;
610  UtvideoContext *c = avctx->priv_data;
611  int i, j;
612  const uint8_t *plane_start[5];
613  int plane_size, max_slice_size = 0, slice_start, slice_end, slice_size;
614  int ret;
615  GetByteContext gb;
616  ThreadFrame frame = { .f = data };
617 
618  if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
619  return ret;
620 
621  /* parse plane structure to get frame flags and validate slice offsets */
622  bytestream2_init(&gb, buf, buf_size);
623  if (c->pro) {
625  av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n");
626  return AVERROR_INVALIDDATA;
627  }
628  c->frame_info = bytestream2_get_le32u(&gb);
629  c->slices = ((c->frame_info >> 16) & 0xff) + 1;
630  for (i = 0; i < c->planes; i++) {
631  plane_start[i] = gb.buffer;
632  if (bytestream2_get_bytes_left(&gb) < 1024 + 4 * c->slices) {
633  av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n");
634  return AVERROR_INVALIDDATA;
635  }
636  slice_start = 0;
637  slice_end = 0;
638  for (j = 0; j < c->slices; j++) {
639  slice_end = bytestream2_get_le32u(&gb);
640  if (slice_end < 0 || slice_end < slice_start ||
641  bytestream2_get_bytes_left(&gb) < slice_end) {
642  av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n");
643  return AVERROR_INVALIDDATA;
644  }
645  slice_size = slice_end - slice_start;
646  slice_start = slice_end;
647  max_slice_size = FFMAX(max_slice_size, slice_size);
648  }
649  plane_size = slice_end;
650  bytestream2_skipu(&gb, plane_size);
651  bytestream2_skipu(&gb, 1024);
652  }
653  plane_start[c->planes] = gb.buffer;
654  } else {
655  for (i = 0; i < c->planes; i++) {
656  plane_start[i] = gb.buffer;
657  if (bytestream2_get_bytes_left(&gb) < 256 + 4 * c->slices) {
658  av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n");
659  return AVERROR_INVALIDDATA;
660  }
661  bytestream2_skipu(&gb, 256);
662  slice_start = 0;
663  slice_end = 0;
664  for (j = 0; j < c->slices; j++) {
665  slice_end = bytestream2_get_le32u(&gb);
666  if (slice_end < 0 || slice_end < slice_start ||
667  bytestream2_get_bytes_left(&gb) < slice_end) {
668  av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n");
669  return AVERROR_INVALIDDATA;
670  }
671  slice_size = slice_end - slice_start;
672  slice_start = slice_end;
673  max_slice_size = FFMAX(max_slice_size, slice_size);
674  }
675  plane_size = slice_end;
676  bytestream2_skipu(&gb, plane_size);
677  }
678  plane_start[c->planes] = gb.buffer;
680  av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n");
681  return AVERROR_INVALIDDATA;
682  }
683  c->frame_info = bytestream2_get_le32u(&gb);
684  }
685  av_log(avctx, AV_LOG_DEBUG, "frame information flags %"PRIX32"\n",
686  c->frame_info);
687 
688  c->frame_pred = (c->frame_info >> 8) & 3;
689 
690  if (c->frame_pred == PRED_GRADIENT) {
691  avpriv_request_sample(avctx, "Frame with gradient prediction");
692  return AVERROR_PATCHWELCOME;
693  }
694 
696  max_slice_size + AV_INPUT_BUFFER_PADDING_SIZE);
697 
698  if (!c->slice_bits) {
699  av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer\n");
700  return AVERROR(ENOMEM);
701  }
702 
703  switch (c->avctx->pix_fmt) {
704  case AV_PIX_FMT_RGB24:
705  case AV_PIX_FMT_RGBA:
706  for (i = 0; i < c->planes; i++) {
707  ret = decode_plane(c, i, frame.f->data[0] + ff_ut_rgb_order[i],
708  c->planes, frame.f->linesize[0], avctx->width,
709  avctx->height, plane_start[i],
710  c->frame_pred == PRED_LEFT);
711  if (ret)
712  return ret;
713  if (c->frame_pred == PRED_MEDIAN) {
714  if (!c->interlaced) {
716  c->planes, frame.f->linesize[0], avctx->width,
717  avctx->height, c->slices, 0);
718  } else {
720  c->planes, frame.f->linesize[0],
721  avctx->width, avctx->height, c->slices,
722  0);
723  }
724  }
725  }
726  restore_rgb_planes(frame.f->data[0], c->planes, frame.f->linesize[0],
727  avctx->width, avctx->height);
728  break;
729  case AV_PIX_FMT_GBRAP10:
730  case AV_PIX_FMT_GBRP10:
731  for (i = 0; i < c->planes; i++) {
732  ret = decode_plane10(c, i, (uint16_t *)frame.f->data[i], 1,
733  frame.f->linesize[i] / 2, avctx->width,
734  avctx->height, plane_start[i],
735  plane_start[i + 1] - 1024,
736  c->frame_pred == PRED_LEFT);
737  if (ret)
738  return ret;
739  }
740  restore_rgb_planes10(frame.f, avctx->width, avctx->height);
741  break;
742  case AV_PIX_FMT_YUV420P:
743  for (i = 0; i < 3; i++) {
744  ret = decode_plane(c, i, frame.f->data[i], 1, frame.f->linesize[i],
745  avctx->width >> !!i, avctx->height >> !!i,
746  plane_start[i], c->frame_pred == PRED_LEFT);
747  if (ret)
748  return ret;
749  if (c->frame_pred == PRED_MEDIAN) {
750  if (!c->interlaced) {
751  restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
752  avctx->width >> !!i, avctx->height >> !!i,
753  c->slices, !i);
754  } else {
755  restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
756  avctx->width >> !!i,
757  avctx->height >> !!i,
758  c->slices, !i);
759  }
760  }
761  }
762  break;
763  case AV_PIX_FMT_YUV422P:
764  for (i = 0; i < 3; i++) {
765  ret = decode_plane(c, i, frame.f->data[i], 1, frame.f->linesize[i],
766  avctx->width >> !!i, avctx->height,
767  plane_start[i], c->frame_pred == PRED_LEFT);
768  if (ret)
769  return ret;
770  if (c->frame_pred == PRED_MEDIAN) {
771  if (!c->interlaced) {
772  restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
773  avctx->width >> !!i, avctx->height,
774  c->slices, 0);
775  } else {
776  restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
777  avctx->width >> !!i, avctx->height,
778  c->slices, 0);
779  }
780  }
781  }
782  break;
783  case AV_PIX_FMT_YUV444P:
784  for (i = 0; i < 3; i++) {
785  ret = decode_plane(c, i, frame.f->data[i], 1, frame.f->linesize[i],
786  avctx->width, avctx->height,
787  plane_start[i], c->frame_pred == PRED_LEFT);
788  if (ret)
789  return ret;
790  if (c->frame_pred == PRED_MEDIAN) {
791  if (!c->interlaced) {
792  restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
793  avctx->width, avctx->height,
794  c->slices, 0);
795  } else {
796  restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
797  avctx->width, avctx->height,
798  c->slices, 0);
799  }
800  }
801  }
802  break;
804  for (i = 0; i < 3; i++) {
805  ret = decode_plane10(c, i, (uint16_t *)frame.f->data[i], 1, frame.f->linesize[i] / 2,
806  avctx->width >> !!i, avctx->height,
807  plane_start[i], plane_start[i + 1] - 1024, c->frame_pred == PRED_LEFT);
808  if (ret)
809  return ret;
810  }
811  break;
812  }
813 
814  frame.f->key_frame = 1;
815  frame.f->pict_type = AV_PICTURE_TYPE_I;
816  frame.f->interlaced_frame = !!c->interlaced;
817 
818  *got_frame = 1;
819 
820  /* always report that the buffer was completely consumed */
821  return buf_size;
822 }
823 
825 {
826  UtvideoContext * const c = avctx->priv_data;
827 
828  c->avctx = avctx;
829 
830  ff_bswapdsp_init(&c->bdsp);
832 
833  if (avctx->extradata_size >= 16) {
834  av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",
835  avctx->extradata[3], avctx->extradata[2],
836  avctx->extradata[1], avctx->extradata[0]);
837  av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n",
838  AV_RB32(avctx->extradata + 4));
839  c->frame_info_size = AV_RL32(avctx->extradata + 8);
840  c->flags = AV_RL32(avctx->extradata + 12);
841 
842  if (c->frame_info_size != 4)
843  avpriv_request_sample(avctx, "Frame info not 4 bytes");
844  av_log(avctx, AV_LOG_DEBUG, "Encoding parameters %08"PRIX32"\n", c->flags);
845  c->slices = (c->flags >> 24) + 1;
846  c->compression = c->flags & 1;
847  c->interlaced = c->flags & 0x800;
848  } else if (avctx->extradata_size == 8) {
849  av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",
850  avctx->extradata[3], avctx->extradata[2],
851  avctx->extradata[1], avctx->extradata[0]);
852  av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n",
853  AV_RB32(avctx->extradata + 4));
854  c->interlaced = 0;
855  c->pro = 1;
856  c->frame_info_size = 4;
857  } else {
858  av_log(avctx, AV_LOG_ERROR,
859  "Insufficient extradata size %d, should be at least 16\n",
860  avctx->extradata_size);
861  return AVERROR_INVALIDDATA;
862  }
863 
864  c->slice_bits_size = 0;
865 
866  switch (avctx->codec_tag) {
867  case MKTAG('U', 'L', 'R', 'G'):
868  c->planes = 3;
869  avctx->pix_fmt = AV_PIX_FMT_RGB24;
870  break;
871  case MKTAG('U', 'L', 'R', 'A'):
872  c->planes = 4;
873  avctx->pix_fmt = AV_PIX_FMT_RGBA;
874  break;
875  case MKTAG('U', 'L', 'Y', '0'):
876  c->planes = 3;
877  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
878  avctx->colorspace = AVCOL_SPC_BT470BG;
879  break;
880  case MKTAG('U', 'L', 'Y', '2'):
881  c->planes = 3;
882  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
883  avctx->colorspace = AVCOL_SPC_BT470BG;
884  break;
885  case MKTAG('U', 'L', 'Y', '4'):
886  c->planes = 3;
887  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
888  avctx->colorspace = AVCOL_SPC_BT470BG;
889  break;
890  case MKTAG('U', 'Q', 'Y', '2'):
891  c->planes = 3;
892  avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
893  break;
894  case MKTAG('U', 'Q', 'R', 'G'):
895  c->planes = 3;
896  avctx->pix_fmt = AV_PIX_FMT_GBRP10;
897  break;
898  case MKTAG('U', 'Q', 'R', 'A'):
899  c->planes = 4;
900  avctx->pix_fmt = AV_PIX_FMT_GBRAP10;
901  break;
902  case MKTAG('U', 'L', 'H', '0'):
903  c->planes = 3;
904  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
905  avctx->colorspace = AVCOL_SPC_BT709;
906  break;
907  case MKTAG('U', 'L', 'H', '2'):
908  c->planes = 3;
909  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
910  avctx->colorspace = AVCOL_SPC_BT709;
911  break;
912  case MKTAG('U', 'L', 'H', '4'):
913  c->planes = 3;
914  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
915  avctx->colorspace = AVCOL_SPC_BT709;
916  break;
917  default:
918  av_log(avctx, AV_LOG_ERROR, "Unknown Ut Video FOURCC provided (%08X)\n",
919  avctx->codec_tag);
920  return AVERROR_INVALIDDATA;
921  }
922 
923  return 0;
924 }
925 
927 {
928  UtvideoContext * const c = avctx->priv_data;
929 
930  av_freep(&c->slice_bits);
931 
932  return 0;
933 }
934 
936  .name = "utvideo",
937  .long_name = NULL_IF_CONFIG_SMALL("Ut Video"),
938  .type = AVMEDIA_TYPE_VIDEO,
939  .id = AV_CODEC_ID_UTVIDEO,
940  .priv_data_size = sizeof(UtvideoContext),
941  .init = decode_init,
942  .close = decode_end,
943  .decode = decode_frame,
945  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
946 };
void(* bswap_buf)(uint32_t *dst, const uint32_t *src, int w)
Definition: bswapdsp.h:25
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:453
static void restore_median_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, int width, int height, int slices, int rmode)
Definition: utvideodec.c:428
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
This structure describes decoded (raw) audio or video data.
Definition: frame.h:187
int ff_ut10_huff_cmp_len(const void *a, const void *b)
Definition: utvideo.c:43
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
#define C
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:371
uint32_t flags
Definition: utvideo.h:76
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:67
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
AVFrame * f
Definition: thread.h:36
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:64
const char * g
Definition: vf_curves.c:112
Definition: vf_geq.c:46
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:457
int slice_bits_size
Definition: utvideo.h:86
int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
Definition: bitstream.c:268
int size
Definition: avcodec.h:1658
const char * b
Definition: vf_curves.c:113
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:367
static av_cold int decode_end(AVCodecContext *avctx)
Definition: utvideodec.c:926
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1960
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
#define src
Definition: vp8dsp.c:254
AVCodec.
Definition: avcodec.h:3681
static int decode_plane(UtvideoContext *c, int plane_no, uint8_t *dst, int step, ptrdiff_t stride, int width, int height, const uint8_t *src, int use_pred)
Definition: utvideodec.c:231
int interlaced
Definition: utvideo.h:80
void void avpriv_request_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
uint8_t bits
Definition: crc.c:296
uint8_t
#define av_cold
Definition: attributes.h:82
Multithreading support functions.
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1847
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:87
uint32_t frame_info
Definition: utvideo.h:76
static AVFrame * frame
#define height
uint8_t * data
Definition: avcodec.h:1657
const uint8_t * buffer
Definition: bytestream.h:34
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:170
bitstream reader API header.
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:325
#define A(x)
Definition: vp56_arith.h:28
const int ff_ut_rgb_order[4]
Definition: utvideo.c:35
#define av_log(a,...)
static int build_huff(const uint8_t *src, VLC *vlc, int *fsym)
Definition: utvideodec.c:83
BswapDSPContext bdsp
Definition: utvideo.h:72
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:587
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
static av_cold int decode_init(AVCodecContext *avctx)
Definition: utvideodec.c:824
#define AVERROR(e)
Definition: error.h:43
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:179
AVCodecContext * avctx
Definition: utvideo.h:71
const char * r
Definition: vf_curves.c:111
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:154
const char * name
Name of the codec implementation.
Definition: avcodec.h:3688
uint32_t frame_info_size
Definition: utvideo.h:76
#define FFMAX(a, b)
Definition: common.h:94
#define fail()
Definition: checkasm.h:89
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:1057
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:94
Definition: vlc.h:26
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:66
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:469
static void restore_median_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, int width, int height, int slices, int rmode)
Definition: utvideodec.c:380
int compression
Definition: utvideo.h:79
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:261
#define FFMIN(a, b)
Definition: common.h:96
#define width
int width
picture width / height.
Definition: avcodec.h:1919
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:554
static void restore_rgb_planes10(AVFrame *frame, int width, int height)
Definition: utvideodec.c:354
int bits
Definition: vlc.h:27
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: utvideodec.c:605
Common Ut Video header.
int frame_pred
Definition: utvideo.h:81
uint8_t len
Definition: magicyuv.c:49
Libavcodec external API header.
static void restore_rgb_planes(uint8_t *src, int step, ptrdiff_t stride, int width, int height)
Definition: utvideodec.c:336
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:218
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
main external API structure.
Definition: avcodec.h:1732
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:1764
void * buf
Definition: avisynth_c.h:690
int extradata_size
Definition: avcodec.h:1848
void ff_llviddsp_init(LLVidDSPContext *c)
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:2462
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:425
#define mid_pred
Definition: mathops.h:97
static int build_huff10(const uint8_t *src, VLC *vlc, int *fsym)
Definition: utvideodec.c:39
#define u(width,...)
uint8_t * slice_bits
Definition: utvideo.h:85
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:352
int ff_ut_huff_cmp_len(const void *a, const void *b)
Definition: utvideo.c:37
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:201
LLVidDSPContext llviddsp
Definition: utvideo.h:73
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:105
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:62
common internal api header.
static double c[64]
uint16_t sym
Definition: magicyuv.c:48
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:769
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2051
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
void * priv_data
Definition: avcodec.h:1774
int(* add_left_pred)(uint8_t *dst, const uint8_t *src, ptrdiff_t w, int left)
int len
VLC_TYPE(* table)[2]
code, bits
Definition: vlc.h:28
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:256
static void restore_median_packed_il(uint8_t *src, int step, ptrdiff_t stride, int width, int height, int slices, int rmode)
Definition: utvideodec.c:535
#define av_freep(p)
static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
Definition: ffmpeg.c:2257
static void restore_median_packed(uint8_t *src, int step, ptrdiff_t stride, int width, int height, int slices, int rmode)
Definition: utvideodec.c:479
#define stride
#define MKTAG(a, b, c, d)
Definition: common.h:342
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
AVCodec ff_utvideo_decoder
Definition: utvideodec.c:935
This structure stores compressed data.
Definition: avcodec.h:1634
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:354
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:994
for(j=16;j >0;--j)
void(* add_median_pred)(uint8_t *dst, const uint8_t *top, const uint8_t *diff, ptrdiff_t w, int *left, int *left_top)
static int decode_plane10(UtvideoContext *c, int plane_no, uint16_t *dst, int step, ptrdiff_t stride, int width, int height, const uint8_t *src, const uint8_t *huff, int use_pred)
Definition: utvideodec.c:126