FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
utvideodec.c
Go to the documentation of this file.
1 /*
2  * Ut Video decoder
3  * Copyright (c) 2011 Konstantin Shishkov
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Ut Video decoder
25  */
26 
27 #include <inttypes.h>
28 #include <stdlib.h>
29 
30 #include "libavutil/intreadwrite.h"
31 #include "avcodec.h"
32 #include "bswapdsp.h"
33 #include "bytestream.h"
34 #include "get_bits.h"
35 #include "internal.h"
36 #include "thread.h"
37 #include "utvideo.h"
38 
39 static int build_huff10(const uint8_t *src, VLC *vlc, int *fsym)
40 {
41  int i;
42  HuffEntry he[1024];
43  int last;
44  uint32_t codes[1024];
45  uint8_t bits[1024];
46  uint16_t syms[1024];
47  uint32_t code;
48 
49  *fsym = -1;
50  for (i = 0; i < 1024; i++) {
51  he[i].sym = i;
52  he[i].len = *src++;
53  }
54  qsort(he, 1024, sizeof(*he), ff_ut10_huff_cmp_len);
55 
56  if (!he[0].len) {
57  *fsym = he[0].sym;
58  return 0;
59  }
60 
61  last = 1023;
62  while (he[last].len == 255 && last)
63  last--;
64 
65  if (he[last].len > 32) {
66  return -1;
67  }
68 
69  code = 1;
70  for (i = last; i >= 0; i--) {
71  codes[i] = code >> (32 - he[i].len);
72  bits[i] = he[i].len;
73  syms[i] = he[i].sym;
74  code += 0x80000000u >> (he[i].len - 1);
75  }
76 
77  return ff_init_vlc_sparse(vlc, FFMIN(he[last].len, 11), last + 1,
78  bits, sizeof(*bits), sizeof(*bits),
79  codes, sizeof(*codes), sizeof(*codes),
80  syms, sizeof(*syms), sizeof(*syms), 0);
81 }
82 
83 static int build_huff(const uint8_t *src, VLC *vlc, int *fsym)
84 {
85  int i;
86  HuffEntry he[256];
87  int last;
88  uint32_t codes[256];
89  uint8_t bits[256];
90  uint8_t syms[256];
91  uint32_t code;
92 
93  *fsym = -1;
94  for (i = 0; i < 256; i++) {
95  he[i].sym = i;
96  he[i].len = *src++;
97  }
98  qsort(he, 256, sizeof(*he), ff_ut_huff_cmp_len);
99 
100  if (!he[0].len) {
101  *fsym = he[0].sym;
102  return 0;
103  }
104 
105  last = 255;
106  while (he[last].len == 255 && last)
107  last--;
108 
109  if (he[last].len > 32)
110  return -1;
111 
112  code = 1;
113  for (i = last; i >= 0; i--) {
114  codes[i] = code >> (32 - he[i].len);
115  bits[i] = he[i].len;
116  syms[i] = he[i].sym;
117  code += 0x80000000u >> (he[i].len - 1);
118  }
119 
120  return ff_init_vlc_sparse(vlc, FFMIN(he[last].len, 11), last + 1,
121  bits, sizeof(*bits), sizeof(*bits),
122  codes, sizeof(*codes), sizeof(*codes),
123  syms, sizeof(*syms), sizeof(*syms), 0);
124 }
125 
126 static int decode_plane10(UtvideoContext *c, int plane_no,
127  uint16_t *dst, int step, ptrdiff_t stride,
128  int width, int height,
129  const uint8_t *src, const uint8_t *huff,
130  int use_pred)
131 {
132  int i, j, slice, pix, ret;
133  int sstart, send;
134  VLC vlc;
135  GetBitContext gb;
136  int prev, fsym;
137 
138  if ((ret = build_huff10(huff, &vlc, &fsym)) < 0) {
139  av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
140  return ret;
141  }
142  if (fsym >= 0) { // build_huff reported a symbol to fill slices with
143  send = 0;
144  for (slice = 0; slice < c->slices; slice++) {
145  uint16_t *dest;
146 
147  sstart = send;
148  send = (height * (slice + 1) / c->slices);
149  dest = dst + sstart * stride;
150 
151  prev = 0x200;
152  for (j = sstart; j < send; j++) {
153  for (i = 0; i < width * step; i += step) {
154  pix = fsym;
155  if (use_pred) {
156  prev += pix;
157  prev &= 0x3FF;
158  pix = prev;
159  }
160  dest[i] = pix;
161  }
162  dest += stride;
163  }
164  }
165  return 0;
166  }
167 
168  send = 0;
169  for (slice = 0; slice < c->slices; slice++) {
170  uint16_t *dest;
171  int slice_data_start, slice_data_end, slice_size;
172 
173  sstart = send;
174  send = (height * (slice + 1) / c->slices);
175  dest = dst + sstart * stride;
176 
177  // slice offset and size validation was done earlier
178  slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0;
179  slice_data_end = AV_RL32(src + slice * 4);
180  slice_size = slice_data_end - slice_data_start;
181 
182  if (!slice_size) {
183  av_log(c->avctx, AV_LOG_ERROR, "Plane has more than one symbol "
184  "yet a slice has a length of zero.\n");
185  goto fail;
186  }
187 
188  memcpy(c->slice_bits, src + slice_data_start + c->slices * 4,
189  slice_size);
190  memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
191  c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
192  (uint32_t *) c->slice_bits,
193  (slice_data_end - slice_data_start + 3) >> 2);
194  init_get_bits(&gb, c->slice_bits, slice_size * 8);
195 
196  prev = 0x200;
197  for (j = sstart; j < send; j++) {
198  for (i = 0; i < width * step; i += step) {
199  if (get_bits_left(&gb) <= 0) {
201  "Slice decoding ran out of bits\n");
202  goto fail;
203  }
204  pix = get_vlc2(&gb, vlc.table, vlc.bits, 3);
205  if (pix < 0) {
206  av_log(c->avctx, AV_LOG_ERROR, "Decoding error\n");
207  goto fail;
208  }
209  if (use_pred) {
210  prev += pix;
211  prev &= 0x3FF;
212  pix = prev;
213  }
214  dest[i] = pix;
215  }
216  dest += stride;
217  }
218  if (get_bits_left(&gb) > 32)
220  "%d bits left after decoding slice\n", get_bits_left(&gb));
221  }
222 
223  ff_free_vlc(&vlc);
224 
225  return 0;
226 fail:
227  ff_free_vlc(&vlc);
228  return AVERROR_INVALIDDATA;
229 }
230 
231 static int decode_plane(UtvideoContext *c, int plane_no,
232  uint8_t *dst, int step, ptrdiff_t stride,
233  int width, int height,
234  const uint8_t *src, int use_pred)
235 {
236  int i, j, slice, pix;
237  int sstart, send;
238  VLC vlc;
239  GetBitContext gb;
240  int prev, fsym;
241  const int cmask = c->interlaced ? ~(1 + 2 * (!plane_no && c->avctx->pix_fmt == AV_PIX_FMT_YUV420P)) : ~(!plane_no && c->avctx->pix_fmt == AV_PIX_FMT_YUV420P);
242 
243  if (build_huff(src, &vlc, &fsym)) {
244  av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
245  return AVERROR_INVALIDDATA;
246  }
247  if (fsym >= 0) { // build_huff reported a symbol to fill slices with
248  send = 0;
249  for (slice = 0; slice < c->slices; slice++) {
250  uint8_t *dest;
251 
252  sstart = send;
253  send = (height * (slice + 1) / c->slices) & cmask;
254  dest = dst + sstart * stride;
255 
256  prev = 0x80;
257  for (j = sstart; j < send; j++) {
258  for (i = 0; i < width * step; i += step) {
259  pix = fsym;
260  if (use_pred) {
261  prev += pix;
262  pix = prev;
263  }
264  dest[i] = pix;
265  }
266  dest += stride;
267  }
268  }
269  return 0;
270  }
271 
272  src += 256;
273 
274  send = 0;
275  for (slice = 0; slice < c->slices; slice++) {
276  uint8_t *dest;
277  int slice_data_start, slice_data_end, slice_size;
278 
279  sstart = send;
280  send = (height * (slice + 1) / c->slices) & cmask;
281  dest = dst + sstart * stride;
282 
283  // slice offset and size validation was done earlier
284  slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0;
285  slice_data_end = AV_RL32(src + slice * 4);
286  slice_size = slice_data_end - slice_data_start;
287 
288  if (!slice_size) {
289  av_log(c->avctx, AV_LOG_ERROR, "Plane has more than one symbol "
290  "yet a slice has a length of zero.\n");
291  goto fail;
292  }
293 
294  memcpy(c->slice_bits, src + slice_data_start + c->slices * 4,
295  slice_size);
296  memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
297  c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
298  (uint32_t *) c->slice_bits,
299  (slice_data_end - slice_data_start + 3) >> 2);
300  init_get_bits(&gb, c->slice_bits, slice_size * 8);
301 
302  prev = 0x80;
303  for (j = sstart; j < send; j++) {
304  for (i = 0; i < width * step; i += step) {
305  if (get_bits_left(&gb) <= 0) {
307  "Slice decoding ran out of bits\n");
308  goto fail;
309  }
310  pix = get_vlc2(&gb, vlc.table, vlc.bits, 3);
311  if (pix < 0) {
312  av_log(c->avctx, AV_LOG_ERROR, "Decoding error\n");
313  goto fail;
314  }
315  if (use_pred) {
316  prev += pix;
317  pix = prev;
318  }
319  dest[i] = pix;
320  }
321  dest += stride;
322  }
323  if (get_bits_left(&gb) > 32)
325  "%d bits left after decoding slice\n", get_bits_left(&gb));
326  }
327 
328  ff_free_vlc(&vlc);
329 
330  return 0;
331 fail:
332  ff_free_vlc(&vlc);
333  return AVERROR_INVALIDDATA;
334 }
335 
336 static void restore_rgb_planes(uint8_t *src, int step, ptrdiff_t stride,
337  int width, int height)
338 {
339  int i, j;
340  uint8_t r, g, b;
341 
342  for (j = 0; j < height; j++) {
343  for (i = 0; i < width * step; i += step) {
344  r = src[i];
345  g = src[i + 1];
346  b = src[i + 2];
347  src[i] = r + g - 0x80;
348  src[i + 2] = b + g - 0x80;
349  }
350  src += stride;
351  }
352 }
353 
355 {
356  uint16_t *src_r = (uint16_t *)frame->data[2];
357  uint16_t *src_g = (uint16_t *)frame->data[0];
358  uint16_t *src_b = (uint16_t *)frame->data[1];
359  int r, g, b;
360  int i, j;
361 
362  for (j = 0; j < height; j++) {
363  for (i = 0; i < width; i++) {
364  r = src_r[i];
365  g = src_g[i];
366  b = src_b[i];
367  src_r[i] = (r + g - 0x200) & 0x3FF;
368  src_b[i] = (b + g - 0x200) & 0x3FF;
369  }
370  src_r += frame->linesize[2] / 2;
371  src_g += frame->linesize[0] / 2;
372  src_b += frame->linesize[1] / 2;
373  }
374 }
375 
376 #undef A
377 #undef B
378 #undef C
379 
381  int width, int height, int slices, int rmode)
382 {
383  int i, j, slice;
384  int A, B, C;
385  uint8_t *bsrc;
386  int slice_start, slice_height;
387  const int cmask = ~rmode;
388 
389  for (slice = 0; slice < slices; slice++) {
390  slice_start = ((slice * height) / slices) & cmask;
391  slice_height = ((((slice + 1) * height) / slices) & cmask) -
392  slice_start;
393 
394  if (!slice_height)
395  continue;
396  bsrc = src + slice_start * stride;
397 
398  // first line - left neighbour prediction
399  bsrc[0] += 0x80;
400  c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
401  bsrc += stride;
402  if (slice_height <= 1)
403  continue;
404  // second line - first element has top prediction, the rest uses median
405  C = bsrc[-stride];
406  bsrc[0] += C;
407  A = bsrc[0];
408  for (i = 1; i < width; i++) {
409  B = bsrc[i - stride];
410  bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
411  C = B;
412  A = bsrc[i];
413  }
414  bsrc += stride;
415  // the rest of lines use continuous median prediction
416  for (j = 2; j < slice_height; j++) {
417  c->llviddsp.add_median_pred(bsrc, bsrc - stride,
418  bsrc, width, &A, &B);
419  bsrc += stride;
420  }
421  }
422 }
423 
424 /* UtVideo interlaced mode treats every two lines as a single one,
425  * so restoring function should take care of possible padding between
426  * two parts of the same "line".
427  */
429  int width, int height, int slices, int rmode)
430 {
431  int i, j, slice;
432  int A, B, C;
433  uint8_t *bsrc;
434  int slice_start, slice_height;
435  const int cmask = ~(rmode ? 3 : 1);
436  const ptrdiff_t stride2 = stride << 1;
437 
438  for (slice = 0; slice < slices; slice++) {
439  slice_start = ((slice * height) / slices) & cmask;
440  slice_height = ((((slice + 1) * height) / slices) & cmask) -
441  slice_start;
442  slice_height >>= 1;
443  if (!slice_height)
444  continue;
445 
446  bsrc = src + slice_start * stride;
447 
448  // first line - left neighbour prediction
449  bsrc[0] += 0x80;
450  A = c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
451  c->llviddsp.add_left_pred(bsrc + stride, bsrc + stride, width, A);
452  bsrc += stride2;
453  if (slice_height <= 1)
454  continue;
455  // second line - first element has top prediction, the rest uses median
456  C = bsrc[-stride2];
457  bsrc[0] += C;
458  A = bsrc[0];
459  for (i = 1; i < width; i++) {
460  B = bsrc[i - stride2];
461  bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
462  C = B;
463  A = bsrc[i];
464  }
465  c->llviddsp.add_median_pred(bsrc + stride, bsrc - stride,
466  bsrc + stride, width, &A, &B);
467  bsrc += stride2;
468  // the rest of lines use continuous median prediction
469  for (j = 2; j < slice_height; j++) {
470  c->llviddsp.add_median_pred(bsrc, bsrc - stride2,
471  bsrc, width, &A, &B);
472  c->llviddsp.add_median_pred(bsrc + stride, bsrc - stride,
473  bsrc + stride, width, &A, &B);
474  bsrc += stride2;
475  }
476  }
477 }
478 
479 static void restore_median_packed(uint8_t *src, int step, ptrdiff_t stride,
480  int width, int height, int slices, int rmode)
481 {
482  int i, j, slice;
483  int A, B, C;
484  uint8_t *bsrc;
485  int slice_start, slice_height;
486  const int cmask = ~rmode;
487 
488  for (slice = 0; slice < slices; slice++) {
489  slice_start = ((slice * height) / slices) & cmask;
490  slice_height = ((((slice + 1) * height) / slices) & cmask) -
491  slice_start;
492 
493  if (!slice_height)
494  continue;
495  bsrc = src + slice_start * stride;
496 
497  // first line - left neighbour prediction
498  bsrc[0] += 0x80;
499  A = bsrc[0];
500  for (i = step; i < width * step; i += step) {
501  bsrc[i] += A;
502  A = bsrc[i];
503  }
504  bsrc += stride;
505  if (slice_height <= 1)
506  continue;
507  // second line - first element has top prediction, the rest uses median
508  C = bsrc[-stride];
509  bsrc[0] += C;
510  A = bsrc[0];
511  for (i = step; i < width * step; i += step) {
512  B = bsrc[i - stride];
513  bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
514  C = B;
515  A = bsrc[i];
516  }
517  bsrc += stride;
518  // the rest of lines use continuous median prediction
519  for (j = 2; j < slice_height; j++) {
520  for (i = 0; i < width * step; i += step) {
521  B = bsrc[i - stride];
522  bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
523  C = B;
524  A = bsrc[i];
525  }
526  bsrc += stride;
527  }
528  }
529 }
530 
531 /* UtVideo interlaced mode treats every two lines as a single one,
532  * so restoring function should take care of possible padding between
533  * two parts of the same "line".
534  */
535 static void restore_median_packed_il(uint8_t *src, int step, ptrdiff_t stride,
536  int width, int height, int slices, int rmode)
537 {
538  int i, j, slice;
539  int A, B, C;
540  uint8_t *bsrc;
541  int slice_start, slice_height;
542  const int cmask = ~(rmode ? 3 : 1);
543  const ptrdiff_t stride2 = stride << 1;
544 
545  for (slice = 0; slice < slices; slice++) {
546  slice_start = ((slice * height) / slices) & cmask;
547  slice_height = ((((slice + 1) * height) / slices) & cmask) -
548  slice_start;
549  slice_height >>= 1;
550  if (!slice_height)
551  continue;
552 
553  bsrc = src + slice_start * stride;
554 
555  // first line - left neighbour prediction
556  bsrc[0] += 0x80;
557  A = bsrc[0];
558  for (i = step; i < width * step; i += step) {
559  bsrc[i] += A;
560  A = bsrc[i];
561  }
562  for (i = 0; i < width * step; i += step) {
563  bsrc[stride + i] += A;
564  A = bsrc[stride + i];
565  }
566  bsrc += stride2;
567  if (slice_height <= 1)
568  continue;
569  // second line - first element has top prediction, the rest uses median
570  C = bsrc[-stride2];
571  bsrc[0] += C;
572  A = bsrc[0];
573  for (i = step; i < width * step; i += step) {
574  B = bsrc[i - stride2];
575  bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
576  C = B;
577  A = bsrc[i];
578  }
579  for (i = 0; i < width * step; i += step) {
580  B = bsrc[i - stride];
581  bsrc[stride + i] += mid_pred(A, B, (uint8_t)(A + B - C));
582  C = B;
583  A = bsrc[stride + i];
584  }
585  bsrc += stride2;
586  // the rest of lines use continuous median prediction
587  for (j = 2; j < slice_height; j++) {
588  for (i = 0; i < width * step; i += step) {
589  B = bsrc[i - stride2];
590  bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
591  C = B;
592  A = bsrc[i];
593  }
594  for (i = 0; i < width * step; i += step) {
595  B = bsrc[i - stride];
596  bsrc[i + stride] += mid_pred(A, B, (uint8_t)(A + B - C));
597  C = B;
598  A = bsrc[i + stride];
599  }
600  bsrc += stride2;
601  }
602  }
603 }
604 
606  int width, int height, int slices, int rmode)
607 {
608  int i, j, slice;
609  int A, B, C;
610  uint8_t *bsrc;
611  int slice_start, slice_height;
612  const int cmask = ~rmode;
613 
614  for (slice = 0; slice < slices; slice++) {
615  slice_start = ((slice * height) / slices) & cmask;
616  slice_height = ((((slice + 1) * height) / slices) & cmask) -
617  slice_start;
618 
619  if (!slice_height)
620  continue;
621  bsrc = src + slice_start * stride;
622 
623  // first line - left neighbour prediction
624  bsrc[0] += 0x80;
625  c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
626  bsrc += stride;
627  if (slice_height <= 1)
628  continue;
629  for (j = 1; j < slice_height; j++) {
630  // second line - first element has top prediction, the rest uses gradient
631  bsrc[0] = (bsrc[0] + bsrc[-stride]) & 0xFF;
632  for (i = 1; i < width; i++) {
633  A = bsrc[i - stride];
634  B = bsrc[i - (stride + 1)];
635  C = bsrc[i - 1];
636  bsrc[i] = (A - B + C + bsrc[i]) & 0xFF;
637  }
638  bsrc += stride;
639  }
640  }
641 }
642 
644  int width, int height, int slices, int rmode)
645 {
646  int i, j, slice;
647  int A, B, C;
648  uint8_t *bsrc;
649  int slice_start, slice_height;
650  const int cmask = ~(rmode ? 3 : 1);
651  const ptrdiff_t stride2 = stride << 1;
652 
653  for (slice = 0; slice < slices; slice++) {
654  slice_start = ((slice * height) / slices) & cmask;
655  slice_height = ((((slice + 1) * height) / slices) & cmask) -
656  slice_start;
657  slice_height >>= 1;
658  if (!slice_height)
659  continue;
660 
661  bsrc = src + slice_start * stride;
662 
663  // first line - left neighbour prediction
664  bsrc[0] += 0x80;
665  A = c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
666  c->llviddsp.add_left_pred(bsrc + stride, bsrc + stride, width, A);
667  bsrc += stride2;
668  if (slice_height <= 1)
669  continue;
670  for (j = 1; j < slice_height; j++) {
671  // second line - first element has top prediction, the rest uses gradient
672  bsrc[0] = (bsrc[0] + bsrc[-stride2]) & 0xFF;
673  for (i = 1; i < width; i++) {
674  A = bsrc[i - stride2];
675  B = bsrc[i - (stride2 + 1)];
676  C = bsrc[i - 1];
677  bsrc[i] = (A - B + C + bsrc[i]) & 0xFF;
678  }
679  A = bsrc[-stride];
680  B = bsrc[-(1 + stride + stride - width)];
681  C = bsrc[width - 1];
682  bsrc[stride] = (A - B + C + bsrc[stride]) & 0xFF;
683  for (i = 1; i < width; i++) {
684  A = bsrc[i - stride];
685  B = bsrc[i - (1 + stride)];
686  C = bsrc[i - 1 + stride];
687  bsrc[i + stride] = (A - B + C + bsrc[i + stride]) & 0xFF;
688  }
689  bsrc += stride2;
690  }
691  }
692 }
693 
694 static void restore_gradient_packed(uint8_t *src, int step, ptrdiff_t stride,
695  int width, int height, int slices, int rmode)
696 {
697  int i, j, slice;
698  int A, B, C;
699  uint8_t *bsrc;
700  int slice_start, slice_height;
701  const int cmask = ~rmode;
702 
703  for (slice = 0; slice < slices; slice++) {
704  slice_start = ((slice * height) / slices) & cmask;
705  slice_height = ((((slice + 1) * height) / slices) & cmask) -
706  slice_start;
707 
708  if (!slice_height)
709  continue;
710  bsrc = src + slice_start * stride;
711 
712  // first line - left neighbour prediction
713  bsrc[0] += 0x80;
714  A = bsrc[0];
715  for (i = step; i < width * step; i += step) {
716  bsrc[i] += A;
717  A = bsrc[i];
718  }
719  bsrc += stride;
720  if (slice_height <= 1)
721  continue;
722  for (j = 1; j < slice_height; j++) {
723  // second line - first element has top prediction, the rest uses gradient
724  C = bsrc[-stride];
725  bsrc[0] += C;
726  for (i = step; i < width * step; i += step) {
727  A = bsrc[i - stride];
728  B = bsrc[i - (stride + step)];
729  C = bsrc[i - step];
730  bsrc[i] = (A - B + C + bsrc[i]) & 0xFF;
731  }
732  bsrc += stride;
733  }
734  }
735 }
736 
737 static void restore_gradient_packed_il(uint8_t *src, int step, ptrdiff_t stride,
738  int width, int height, int slices, int rmode)
739 {
740  int i, j, slice;
741  int A, B, C;
742  uint8_t *bsrc;
743  int slice_start, slice_height;
744  const int cmask = ~(rmode ? 3 : 1);
745  const ptrdiff_t stride2 = stride << 1;
746 
747  for (slice = 0; slice < slices; slice++) {
748  slice_start = ((slice * height) / slices) & cmask;
749  slice_height = ((((slice + 1) * height) / slices) & cmask) -
750  slice_start;
751  slice_height >>= 1;
752  if (!slice_height)
753  continue;
754 
755  bsrc = src + slice_start * stride;
756 
757  // first line - left neighbour prediction
758  bsrc[0] += 0x80;
759  A = bsrc[0];
760  for (i = step; i < width * step; i += step) {
761  bsrc[i] += A;
762  A = bsrc[i];
763  }
764  for (i = 0; i < width * step; i += step) {
765  bsrc[stride + i] += A;
766  A = bsrc[stride + i];
767  }
768  bsrc += stride2;
769  if (slice_height <= 1)
770  continue;
771  for (j = 1; j < slice_height; j++) {
772  // second line - first element has top prediction, the rest uses gradient
773  C = bsrc[-stride2];
774  bsrc[0] += C;
775  for (i = step; i < width * step; i += step) {
776  A = bsrc[i - stride2];
777  B = bsrc[i - (stride2 + step)];
778  C = bsrc[i - step];
779  bsrc[i] = (A - B + C + bsrc[i]) & 0xFF;
780  }
781  A = bsrc[-stride];
782  B = bsrc[-(step + stride + stride - width * step)];
783  C = bsrc[width * step - step];
784  bsrc[stride] = (A - B + C + bsrc[stride]) & 0xFF;
785  for (i = step; i < width * step; i += step) {
786  A = bsrc[i - stride];
787  B = bsrc[i - (step + stride)];
788  C = bsrc[i - step + stride];
789  bsrc[i + stride] = (A - B + C + bsrc[i + stride]) & 0xFF;
790  }
791  bsrc += stride2;
792  }
793  }
794 }
795 
796 static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
797  AVPacket *avpkt)
798 {
799  const uint8_t *buf = avpkt->data;
800  int buf_size = avpkt->size;
801  UtvideoContext *c = avctx->priv_data;
802  int i, j;
803  const uint8_t *plane_start[5];
804  int plane_size, max_slice_size = 0, slice_start, slice_end, slice_size;
805  int ret;
806  GetByteContext gb;
807  ThreadFrame frame = { .f = data };
808 
809  if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
810  return ret;
811 
812  /* parse plane structure to get frame flags and validate slice offsets */
813  bytestream2_init(&gb, buf, buf_size);
814  if (c->pro) {
816  av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n");
817  return AVERROR_INVALIDDATA;
818  }
819  c->frame_info = bytestream2_get_le32u(&gb);
820  c->slices = ((c->frame_info >> 16) & 0xff) + 1;
821  for (i = 0; i < c->planes; i++) {
822  plane_start[i] = gb.buffer;
823  if (bytestream2_get_bytes_left(&gb) < 1024 + 4 * c->slices) {
824  av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n");
825  return AVERROR_INVALIDDATA;
826  }
827  slice_start = 0;
828  slice_end = 0;
829  for (j = 0; j < c->slices; j++) {
830  slice_end = bytestream2_get_le32u(&gb);
831  if (slice_end < 0 || slice_end < slice_start ||
832  bytestream2_get_bytes_left(&gb) < slice_end) {
833  av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n");
834  return AVERROR_INVALIDDATA;
835  }
836  slice_size = slice_end - slice_start;
837  slice_start = slice_end;
838  max_slice_size = FFMAX(max_slice_size, slice_size);
839  }
840  plane_size = slice_end;
841  bytestream2_skipu(&gb, plane_size);
842  bytestream2_skipu(&gb, 1024);
843  }
844  plane_start[c->planes] = gb.buffer;
845  } else {
846  for (i = 0; i < c->planes; i++) {
847  plane_start[i] = gb.buffer;
848  if (bytestream2_get_bytes_left(&gb) < 256 + 4 * c->slices) {
849  av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n");
850  return AVERROR_INVALIDDATA;
851  }
852  bytestream2_skipu(&gb, 256);
853  slice_start = 0;
854  slice_end = 0;
855  for (j = 0; j < c->slices; j++) {
856  slice_end = bytestream2_get_le32u(&gb);
857  if (slice_end < 0 || slice_end < slice_start ||
858  bytestream2_get_bytes_left(&gb) < slice_end) {
859  av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n");
860  return AVERROR_INVALIDDATA;
861  }
862  slice_size = slice_end - slice_start;
863  slice_start = slice_end;
864  max_slice_size = FFMAX(max_slice_size, slice_size);
865  }
866  plane_size = slice_end;
867  bytestream2_skipu(&gb, plane_size);
868  }
869  plane_start[c->planes] = gb.buffer;
871  av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n");
872  return AVERROR_INVALIDDATA;
873  }
874  c->frame_info = bytestream2_get_le32u(&gb);
875  }
876  av_log(avctx, AV_LOG_DEBUG, "frame information flags %"PRIX32"\n",
877  c->frame_info);
878 
879  c->frame_pred = (c->frame_info >> 8) & 3;
880 
882  max_slice_size + AV_INPUT_BUFFER_PADDING_SIZE);
883 
884  if (!c->slice_bits) {
885  av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer\n");
886  return AVERROR(ENOMEM);
887  }
888 
889  switch (c->avctx->pix_fmt) {
890  case AV_PIX_FMT_RGB24:
891  case AV_PIX_FMT_RGBA:
892  for (i = 0; i < c->planes; i++) {
893  ret = decode_plane(c, i, frame.f->data[0] + ff_ut_rgb_order[i],
894  c->planes, frame.f->linesize[0], avctx->width,
895  avctx->height, plane_start[i],
896  c->frame_pred == PRED_LEFT);
897  if (ret)
898  return ret;
899  if (c->frame_pred == PRED_MEDIAN) {
900  if (!c->interlaced) {
902  c->planes, frame.f->linesize[0], avctx->width,
903  avctx->height, c->slices, 0);
904  } else {
906  c->planes, frame.f->linesize[0],
907  avctx->width, avctx->height, c->slices,
908  0);
909  }
910  } else if (c->frame_pred == PRED_GRADIENT) {
911  if (!c->interlaced) {
913  c->planes, frame.f->linesize[0], avctx->width,
914  avctx->height, c->slices, 0);
915  } else {
917  c->planes, frame.f->linesize[0],
918  avctx->width, avctx->height, c->slices,
919  0);
920  }
921  }
922  }
923  restore_rgb_planes(frame.f->data[0], c->planes, frame.f->linesize[0],
924  avctx->width, avctx->height);
925  break;
926  case AV_PIX_FMT_GBRAP10:
927  case AV_PIX_FMT_GBRP10:
928  for (i = 0; i < c->planes; i++) {
929  ret = decode_plane10(c, i, (uint16_t *)frame.f->data[i], 1,
930  frame.f->linesize[i] / 2, avctx->width,
931  avctx->height, plane_start[i],
932  plane_start[i + 1] - 1024,
933  c->frame_pred == PRED_LEFT);
934  if (ret)
935  return ret;
936  }
937  restore_rgb_planes10(frame.f, avctx->width, avctx->height);
938  break;
939  case AV_PIX_FMT_YUV420P:
940  for (i = 0; i < 3; i++) {
941  ret = decode_plane(c, i, frame.f->data[i], 1, frame.f->linesize[i],
942  avctx->width >> !!i, avctx->height >> !!i,
943  plane_start[i], c->frame_pred == PRED_LEFT);
944  if (ret)
945  return ret;
946  if (c->frame_pred == PRED_MEDIAN) {
947  if (!c->interlaced) {
948  restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
949  avctx->width >> !!i, avctx->height >> !!i,
950  c->slices, !i);
951  } else {
952  restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
953  avctx->width >> !!i,
954  avctx->height >> !!i,
955  c->slices, !i);
956  }
957  } else if (c->frame_pred == PRED_GRADIENT) {
958  if (!c->interlaced) {
959  restore_gradient_planar(c, frame.f->data[i], frame.f->linesize[i],
960  avctx->width >> !!i, avctx->height >> !!i,
961  c->slices, !i);
962  } else {
963  restore_gradient_planar_il(c, frame.f->data[i], frame.f->linesize[i],
964  avctx->width >> !!i,
965  avctx->height >> !!i,
966  c->slices, !i);
967  }
968  }
969  }
970  break;
971  case AV_PIX_FMT_YUV422P:
972  for (i = 0; i < 3; i++) {
973  ret = decode_plane(c, i, frame.f->data[i], 1, frame.f->linesize[i],
974  avctx->width >> !!i, avctx->height,
975  plane_start[i], c->frame_pred == PRED_LEFT);
976  if (ret)
977  return ret;
978  if (c->frame_pred == PRED_MEDIAN) {
979  if (!c->interlaced) {
980  restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
981  avctx->width >> !!i, avctx->height,
982  c->slices, 0);
983  } else {
984  restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
985  avctx->width >> !!i, avctx->height,
986  c->slices, 0);
987  }
988  } else if (c->frame_pred == PRED_GRADIENT) {
989  if (!c->interlaced) {
990  restore_gradient_planar(c, frame.f->data[i], frame.f->linesize[i],
991  avctx->width >> !!i, avctx->height,
992  c->slices, 0);
993  } else {
994  restore_gradient_planar_il(c, frame.f->data[i], frame.f->linesize[i],
995  avctx->width >> !!i, avctx->height,
996  c->slices, 0);
997  }
998  }
999  }
1000  break;
1001  case AV_PIX_FMT_YUV444P:
1002  for (i = 0; i < 3; i++) {
1003  ret = decode_plane(c, i, frame.f->data[i], 1, frame.f->linesize[i],
1004  avctx->width, avctx->height,
1005  plane_start[i], c->frame_pred == PRED_LEFT);
1006  if (ret)
1007  return ret;
1008  if (c->frame_pred == PRED_MEDIAN) {
1009  if (!c->interlaced) {
1010  restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
1011  avctx->width, avctx->height,
1012  c->slices, 0);
1013  } else {
1014  restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
1015  avctx->width, avctx->height,
1016  c->slices, 0);
1017  }
1018  } else if (c->frame_pred == PRED_GRADIENT) {
1019  if (!c->interlaced) {
1020  restore_gradient_planar(c, frame.f->data[i], frame.f->linesize[i],
1021  avctx->width, avctx->height,
1022  c->slices, 0);
1023  } else {
1024  restore_gradient_planar_il(c, frame.f->data[i], frame.f->linesize[i],
1025  avctx->width, avctx->height,
1026  c->slices, 0);
1027  }
1028  }
1029  }
1030  break;
1031  case AV_PIX_FMT_YUV422P10:
1032  for (i = 0; i < 3; i++) {
1033  ret = decode_plane10(c, i, (uint16_t *)frame.f->data[i], 1, frame.f->linesize[i] / 2,
1034  avctx->width >> !!i, avctx->height,
1035  plane_start[i], plane_start[i + 1] - 1024, c->frame_pred == PRED_LEFT);
1036  if (ret)
1037  return ret;
1038  }
1039  break;
1040  }
1041 
1042  frame.f->key_frame = 1;
1043  frame.f->pict_type = AV_PICTURE_TYPE_I;
1044  frame.f->interlaced_frame = !!c->interlaced;
1045 
1046  *got_frame = 1;
1047 
1048  /* always report that the buffer was completely consumed */
1049  return buf_size;
1050 }
1051 
1053 {
1054  UtvideoContext * const c = avctx->priv_data;
1055 
1056  c->avctx = avctx;
1057 
1058  ff_bswapdsp_init(&c->bdsp);
1060 
1061  if (avctx->extradata_size >= 16) {
1062  av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",
1063  avctx->extradata[3], avctx->extradata[2],
1064  avctx->extradata[1], avctx->extradata[0]);
1065  av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n",
1066  AV_RB32(avctx->extradata + 4));
1067  c->frame_info_size = AV_RL32(avctx->extradata + 8);
1068  c->flags = AV_RL32(avctx->extradata + 12);
1069 
1070  if (c->frame_info_size != 4)
1071  avpriv_request_sample(avctx, "Frame info not 4 bytes");
1072  av_log(avctx, AV_LOG_DEBUG, "Encoding parameters %08"PRIX32"\n", c->flags);
1073  c->slices = (c->flags >> 24) + 1;
1074  c->compression = c->flags & 1;
1075  c->interlaced = c->flags & 0x800;
1076  } else if (avctx->extradata_size == 8) {
1077  av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",
1078  avctx->extradata[3], avctx->extradata[2],
1079  avctx->extradata[1], avctx->extradata[0]);
1080  av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n",
1081  AV_RB32(avctx->extradata + 4));
1082  c->interlaced = 0;
1083  c->pro = 1;
1084  c->frame_info_size = 4;
1085  } else {
1086  av_log(avctx, AV_LOG_ERROR,
1087  "Insufficient extradata size %d, should be at least 16\n",
1088  avctx->extradata_size);
1089  return AVERROR_INVALIDDATA;
1090  }
1091 
1092  c->slice_bits_size = 0;
1093 
1094  switch (avctx->codec_tag) {
1095  case MKTAG('U', 'L', 'R', 'G'):
1096  c->planes = 3;
1097  avctx->pix_fmt = AV_PIX_FMT_RGB24;
1098  break;
1099  case MKTAG('U', 'L', 'R', 'A'):
1100  c->planes = 4;
1101  avctx->pix_fmt = AV_PIX_FMT_RGBA;
1102  break;
1103  case MKTAG('U', 'L', 'Y', '0'):
1104  c->planes = 3;
1105  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
1106  avctx->colorspace = AVCOL_SPC_BT470BG;
1107  break;
1108  case MKTAG('U', 'L', 'Y', '2'):
1109  c->planes = 3;
1110  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
1111  avctx->colorspace = AVCOL_SPC_BT470BG;
1112  break;
1113  case MKTAG('U', 'L', 'Y', '4'):
1114  c->planes = 3;
1115  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
1116  avctx->colorspace = AVCOL_SPC_BT470BG;
1117  break;
1118  case MKTAG('U', 'Q', 'Y', '2'):
1119  c->planes = 3;
1120  avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
1121  break;
1122  case MKTAG('U', 'Q', 'R', 'G'):
1123  c->planes = 3;
1124  avctx->pix_fmt = AV_PIX_FMT_GBRP10;
1125  break;
1126  case MKTAG('U', 'Q', 'R', 'A'):
1127  c->planes = 4;
1128  avctx->pix_fmt = AV_PIX_FMT_GBRAP10;
1129  break;
1130  case MKTAG('U', 'L', 'H', '0'):
1131  c->planes = 3;
1132  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
1133  avctx->colorspace = AVCOL_SPC_BT709;
1134  break;
1135  case MKTAG('U', 'L', 'H', '2'):
1136  c->planes = 3;
1137  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
1138  avctx->colorspace = AVCOL_SPC_BT709;
1139  break;
1140  case MKTAG('U', 'L', 'H', '4'):
1141  c->planes = 3;
1142  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
1143  avctx->colorspace = AVCOL_SPC_BT709;
1144  break;
1145  default:
1146  av_log(avctx, AV_LOG_ERROR, "Unknown Ut Video FOURCC provided (%08X)\n",
1147  avctx->codec_tag);
1148  return AVERROR_INVALIDDATA;
1149  }
1150 
1151  return 0;
1152 }
1153 
1155 {
1156  UtvideoContext * const c = avctx->priv_data;
1157 
1158  av_freep(&c->slice_bits);
1159 
1160  return 0;
1161 }
1162 
1164  .name = "utvideo",
1165  .long_name = NULL_IF_CONFIG_SMALL("Ut Video"),
1166  .type = AVMEDIA_TYPE_VIDEO,
1167  .id = AV_CODEC_ID_UTVIDEO,
1168  .priv_data_size = sizeof(UtvideoContext),
1169  .init = decode_init,
1170  .close = decode_end,
1171  .decode = decode_frame,
1172  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
1173  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
1174 };
void(* bswap_buf)(uint32_t *dst, const uint32_t *src, int w)
Definition: bswapdsp.h:25
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:456
static void restore_median_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, int width, int height, int slices, int rmode)
Definition: utvideodec.c:428
static void restore_gradient_packed_il(uint8_t *src, int step, ptrdiff_t stride, int width, int height, int slices, int rmode)
Definition: utvideodec.c:737
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
This structure describes decoded (raw) audio or video data.
Definition: frame.h:194
int ff_ut10_huff_cmp_len(const void *a, const void *b)
Definition: utvideo.c:43
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
#define C
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:371
uint32_t flags
Definition: utvideo.h:76
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:67
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
AVFrame * f
Definition: thread.h:36
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:64
const char * g
Definition: vf_curves.c:112
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:460
int slice_bits_size
Definition: utvideo.h:86
int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
Definition: bitstream.c:268
int size
Definition: avcodec.h:1669
const char * b
Definition: vf_curves.c:113
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:367
static av_cold int decode_end(AVCodecContext *avctx)
Definition: utvideodec.c:1154
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1971
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
#define src
Definition: vp8dsp.c:254
AVCodec.
Definition: avcodec.h:3719
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:42
static int decode_plane(UtvideoContext *c, int plane_no, uint8_t *dst, int step, ptrdiff_t stride, int width, int height, const uint8_t *src, int use_pred)
Definition: utvideodec.c:231
int interlaced
Definition: utvideo.h:80
void void avpriv_request_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
uint8_t bits
Definition: crc.c:296
uint8_t
#define av_cold
Definition: attributes.h:82
static void restore_gradient_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, int width, int height, int slices, int rmode)
Definition: utvideodec.c:605
static void restore_gradient_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, int width, int height, int slices, int rmode)
Definition: utvideodec.c:643
Multithreading support functions.
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1858
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:87
uint32_t frame_info
Definition: utvideo.h:76
static AVFrame * frame
#define height
uint8_t * data
Definition: avcodec.h:1668
const uint8_t * buffer
Definition: bytestream.h:34
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:170
bitstream reader API header.
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:341
#define A(x)
Definition: vp56_arith.h:28
const int ff_ut_rgb_order[4]
Definition: utvideo.c:35
#define av_log(a,...)
static int build_huff(const uint8_t *src, VLC *vlc, int *fsym)
Definition: utvideodec.c:83
BswapDSPContext bdsp
Definition: utvideo.h:72
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:587
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
static av_cold int decode_init(AVCodecContext *avctx)
Definition: utvideodec.c:1052
#define AVERROR(e)
Definition: error.h:43
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:179
AVCodecContext * avctx
Definition: utvideo.h:71
const char * r
Definition: vf_curves.c:111
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:154
const char * name
Name of the codec implementation.
Definition: avcodec.h:3726
uint32_t frame_info_size
Definition: utvideo.h:76
#define FFMAX(a, b)
Definition: common.h:94
#define fail()
Definition: checkasm.h:90
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:1061
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:94
Definition: vlc.h:26
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:66
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:469
static void restore_median_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, int width, int height, int slices, int rmode)
Definition: utvideodec.c:380
int compression
Definition: utvideo.h:79
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:277
#define FFMIN(a, b)
Definition: common.h:96
#define width
int width
picture width / height.
Definition: avcodec.h:1930
Definition: vf_geq.c:46
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:554
static void restore_rgb_planes10(AVFrame *frame, int width, int height)
Definition: utvideodec.c:354
int bits
Definition: vlc.h:27
static void restore_gradient_packed(uint8_t *src, int step, ptrdiff_t stride, int width, int height, int slices, int rmode)
Definition: utvideodec.c:694
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: utvideodec.c:796
Common Ut Video header.
int frame_pred
Definition: utvideo.h:81
uint8_t len
Definition: magicyuv.c:49
Libavcodec external API header.
static void restore_rgb_planes(uint8_t *src, int step, ptrdiff_t stride, int width, int height)
Definition: utvideodec.c:336
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:225
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
main external API structure.
Definition: avcodec.h:1743
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:1775
void * buf
Definition: avisynth_c.h:690
int extradata_size
Definition: avcodec.h:1859
void ff_llviddsp_init(LLVidDSPContext *c)
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:2473
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:425
#define mid_pred
Definition: mathops.h:97
static int build_huff10(const uint8_t *src, VLC *vlc, int *fsym)
Definition: utvideodec.c:39
#define u(width,...)
uint8_t * slice_bits
Definition: utvideo.h:85
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:352
int ff_ut_huff_cmp_len(const void *a, const void *b)
Definition: utvideo.c:37
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:208
LLVidDSPContext llviddsp
Definition: utvideo.h:73
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:105
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:62
common internal api header.
static double c[64]
uint16_t sym
Definition: magicyuv.c:48
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:773
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2051
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
void * priv_data
Definition: avcodec.h:1785
int(* add_left_pred)(uint8_t *dst, const uint8_t *src, ptrdiff_t w, int left)
int len
VLC_TYPE(* table)[2]
code, bits
Definition: vlc.h:28
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:272
static void restore_median_packed_il(uint8_t *src, int step, ptrdiff_t stride, int width, int height, int slices, int rmode)
Definition: utvideodec.c:535
#define av_freep(p)
static void restore_median_packed(uint8_t *src, int step, ptrdiff_t stride, int width, int height, int slices, int rmode)
Definition: utvideodec.c:479
#define stride
#define MKTAG(a, b, c, d)
Definition: common.h:342
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
AVCodec ff_utvideo_decoder
Definition: utvideodec.c:1163
This structure stores compressed data.
Definition: avcodec.h:1645
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:354
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:998
for(j=16;j >0;--j)
void(* add_median_pred)(uint8_t *dst, const uint8_t *top, const uint8_t *diff, ptrdiff_t w, int *left, int *left_top)
static int decode_plane10(UtvideoContext *c, int plane_no, uint16_t *dst, int step, ptrdiff_t stride, int width, int height, const uint8_t *src, const uint8_t *huff, int use_pred)
Definition: utvideodec.c:126