FFmpeg
utvideodec.c
Go to the documentation of this file.
1 /*
2  * Ut Video decoder
3  * Copyright (c) 2011 Konstantin Shishkov
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Ut Video decoder
25  */
26 
27 #include <inttypes.h>
28 #include <stdlib.h>
29 
30 #define CACHED_BITSTREAM_READER !ARCH_X86_32
31 #define UNCHECKED_BITSTREAM_READER 1
32 
33 #include "libavutil/intreadwrite.h"
34 #include "libavutil/pixdesc.h"
35 #include "avcodec.h"
36 #include "bswapdsp.h"
37 #include "bytestream.h"
38 #include "codec_internal.h"
39 #include "get_bits.h"
40 #include "thread.h"
41 #include "utvideo.h"
42 
43 typedef struct HuffEntry {
44  uint8_t len;
45  uint16_t sym;
46 } HuffEntry;
47 
48 static int build_huff(UtvideoContext *c, const uint8_t *src, VLC *vlc,
49  VLC_MULTI *multi, int *fsym, unsigned nb_elems)
50 {
51  int i;
52  HuffEntry he[1024];
53  uint8_t bits[1024];
54  uint16_t codes_count[33] = { 0 };
55 
56  *fsym = -1;
57  for (i = 0; i < nb_elems; i++) {
58  if (src[i] == 0) {
59  *fsym = i;
60  return 0;
61  } else if (src[i] == 255) {
62  bits[i] = 0;
63  } else if (src[i] <= 32) {
64  bits[i] = src[i];
65  } else
66  return AVERROR_INVALIDDATA;
67 
68  codes_count[bits[i]]++;
69  }
70  if (codes_count[0] == nb_elems)
71  return AVERROR_INVALIDDATA;
72 
73  /* For Ut Video, longer codes are to the left of the tree and
74  * for codes with the same length the symbol is descending from
75  * left to right. So after the next loop --codes_count[i] will
76  * be the index of the first (lowest) symbol of length i when
77  * indexed by the position in the tree with left nodes being first. */
78  for (int i = 31; i >= 0; i--)
79  codes_count[i] += codes_count[i + 1];
80 
81  for (unsigned i = 0; i < nb_elems; i++)
82  he[--codes_count[bits[i]]] = (HuffEntry) { bits[i], i };
83 
84 #define VLC_BITS 11
85  return ff_vlc_init_multi_from_lengths(vlc, multi, VLC_BITS, nb_elems, codes_count[0],
86  &he[0].len, sizeof(*he),
87  &he[0].sym, sizeof(*he), 2, 0, 0, c->avctx);
88 }
89 
90 #define READ_PLANE(b, end) \
91 { \
92  buf = !use_pred ? dest : c->buffer; \
93  i = 0; \
94  for (; CACHED_BITSTREAM_READER && i < width-end && get_bits_left(&gb) > 0;) {\
95  ret = get_vlc_multi(&gb, (uint8_t *)buf + i * b, multi.table, \
96  vlc.table, VLC_BITS, 3); \
97  if (ret > 0) \
98  i += ret; \
99  if (ret <= 0) \
100  goto fail; \
101  } \
102  for (; i < width && get_bits_left(&gb) > 0; i++) \
103  buf[i] = get_vlc2(&gb, vlc.table, VLC_BITS, 3); \
104  if (use_pred) { \
105  if (b == 2) \
106  c->llviddsp.add_left_pred_int16((uint16_t *)dest, (const uint16_t *)buf, 0x3ff, width, prev); \
107  else \
108  c->llviddsp.add_left_pred((uint8_t *)dest, (const uint8_t *)buf, width, prev); \
109  } \
110  prev = dest[width-1]; \
111  dest += stride; \
112 }
113 
114 static int decode_plane10(UtvideoContext *c, int plane_no,
115  uint16_t *dst, ptrdiff_t stride,
116  int width, int height,
117  const uint8_t *src, const uint8_t *huff,
118  int use_pred)
119 {
120  int i, j, slice, pix, ret;
121  int sstart, send;
122  VLC_MULTI multi;
123  VLC vlc;
124  GetBitContext gb;
125  int prev, fsym;
126 
127  if ((ret = build_huff(c, huff, &vlc, &multi, &fsym, 1024)) < 0) {
128  av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
129  return ret;
130  }
131  if (fsym >= 0) { // build_huff reported a symbol to fill slices with
132  send = 0;
133  for (slice = 0; slice < c->slices; slice++) {
134  uint16_t *dest;
135 
136  sstart = send;
137  send = (height * (slice + 1) / c->slices);
138  dest = dst + sstart * stride;
139 
140  prev = 0x200;
141  for (j = sstart; j < send; j++) {
142  for (i = 0; i < width; i++) {
143  pix = fsym;
144  if (use_pred) {
145  prev += pix;
146  prev &= 0x3FF;
147  pix = prev;
148  }
149  dest[i] = pix;
150  }
151  dest += stride;
152  }
153  }
154  return 0;
155  }
156 
157  send = 0;
158  for (slice = 0; slice < c->slices; slice++) {
159  uint16_t *dest, *buf;
160  int slice_data_start, slice_data_end, slice_size;
161 
162  sstart = send;
163  send = (height * (slice + 1) / c->slices);
164  dest = dst + sstart * stride;
165 
166  // slice offset and size validation was done earlier
167  slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0;
168  slice_data_end = AV_RL32(src + slice * 4);
169  slice_size = slice_data_end - slice_data_start;
170 
171  if (!slice_size) {
172  av_log(c->avctx, AV_LOG_ERROR, "Plane has more than one symbol "
173  "yet a slice has a length of zero.\n");
174  goto fail;
175  }
176 
177  memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
178  c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
179  (uint32_t *)(src + slice_data_start + c->slices * 4),
180  (slice_data_end - slice_data_start + 3) >> 2);
181  init_get_bits(&gb, c->slice_bits, slice_size * 8);
182 
183  prev = 0x200;
184  for (j = sstart; j < send; j++)
185  READ_PLANE(2, 3)
186  if (get_bits_left(&gb) > 32)
187  av_log(c->avctx, AV_LOG_WARNING,
188  "%d bits left after decoding slice\n", get_bits_left(&gb));
189  }
190 
191  ff_vlc_free(&vlc);
192  ff_vlc_free_multi(&multi);
193 
194  return 0;
195 fail:
196  ff_vlc_free(&vlc);
197  ff_vlc_free_multi(&multi);
198  return AVERROR_INVALIDDATA;
199 }
200 
201 static int compute_cmask(int plane_no, int interlaced, enum AVPixelFormat pix_fmt)
202 {
203  const int is_luma = (pix_fmt == AV_PIX_FMT_YUV420P) && !plane_no;
204 
205  if (interlaced)
206  return ~(1 + 2 * is_luma);
207 
208  return ~is_luma;
209 }
210 
211 static int decode_plane(UtvideoContext *c, int plane_no,
212  uint8_t *dst, ptrdiff_t stride,
213  int width, int height,
214  const uint8_t *src, int use_pred)
215 {
216  int i, j, slice, pix;
217  int sstart, send;
218  VLC_MULTI multi;
219  VLC vlc;
220  GetBitContext gb;
221  int ret, prev, fsym;
222  const int cmask = compute_cmask(plane_no, c->interlaced, c->avctx->pix_fmt);
223 
224  if (c->pack) {
225  send = 0;
226  for (slice = 0; slice < c->slices; slice++) {
227  GetBitContext cbit, pbit;
228  uint8_t *dest, *p;
229 
230  ret = init_get_bits8_le(&cbit, c->control_stream[plane_no][slice], c->control_stream_size[plane_no][slice]);
231  if (ret < 0)
232  return ret;
233 
234  ret = init_get_bits8_le(&pbit, c->packed_stream[plane_no][slice], c->packed_stream_size[plane_no][slice]);
235  if (ret < 0)
236  return ret;
237 
238  sstart = send;
239  send = (height * (slice + 1) / c->slices) & cmask;
240  dest = dst + sstart * stride;
241 
242  if (3 * ((dst + send * stride - dest + 7)/8) > get_bits_left(&cbit))
243  return AVERROR_INVALIDDATA;
244 
245  for (p = dest; p < dst + send * stride; p += 8) {
246  int bits = get_bits_le(&cbit, 3);
247 
248  if (bits == 0) {
249  *(uint64_t *) p = 0;
250  } else {
251  uint32_t sub = 0x80 >> (8 - (bits + 1)), add;
252  int k;
253 
254  if ((bits + 1) * 8 > get_bits_left(&pbit))
255  return AVERROR_INVALIDDATA;
256 
257  for (k = 0; k < 8; k++) {
258 
259  p[k] = get_bits_le(&pbit, bits + 1);
260  add = (~p[k] & sub) << (8 - bits);
261  p[k] -= sub;
262  p[k] += add;
263  }
264  }
265  }
266  }
267 
268  return 0;
269  }
270 
271  if (build_huff(c, src, &vlc, &multi, &fsym, 256)) {
272  av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
273  return AVERROR_INVALIDDATA;
274  }
275  if (fsym >= 0) { // build_huff reported a symbol to fill slices with
276  send = 0;
277  for (slice = 0; slice < c->slices; slice++) {
278  uint8_t *dest;
279 
280  sstart = send;
281  send = (height * (slice + 1) / c->slices) & cmask;
282  dest = dst + sstart * stride;
283 
284  prev = 0x80;
285  for (j = sstart; j < send; j++) {
286  for (i = 0; i < width; i++) {
287  pix = fsym;
288  if (use_pred) {
289  prev += (unsigned)pix;
290  pix = prev;
291  }
292  dest[i] = pix;
293  }
294  dest += stride;
295  }
296  }
297  return 0;
298  }
299 
300  src += 256;
301 
302  send = 0;
303  for (slice = 0; slice < c->slices; slice++) {
304  uint8_t *dest, *buf;
305  int slice_data_start, slice_data_end, slice_size;
306 
307  sstart = send;
308  send = (height * (slice + 1) / c->slices) & cmask;
309  dest = dst + sstart * stride;
310 
311  // slice offset and size validation was done earlier
312  slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0;
313  slice_data_end = AV_RL32(src + slice * 4);
314  slice_size = slice_data_end - slice_data_start;
315 
316  if (!slice_size) {
317  av_log(c->avctx, AV_LOG_ERROR, "Plane has more than one symbol "
318  "yet a slice has a length of zero.\n");
319  goto fail;
320  }
321 
322  memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
323  c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
324  (uint32_t *)(src + slice_data_start + c->slices * 4),
325  (slice_data_end - slice_data_start + 3) >> 2);
326  init_get_bits(&gb, c->slice_bits, slice_size * 8);
327 
328  prev = 0x80;
329  for (j = sstart; j < send; j++)
330  READ_PLANE(1, 5)
331  if (get_bits_left(&gb) > 32)
332  av_log(c->avctx, AV_LOG_WARNING,
333  "%d bits left after decoding slice\n", get_bits_left(&gb));
334  }
335 
336  ff_vlc_free(&vlc);
337  ff_vlc_free_multi(&multi);
338 
339  return 0;
340 fail:
341  ff_vlc_free(&vlc);
342  ff_vlc_free_multi(&multi);
343  return AVERROR_INVALIDDATA;
344 }
345 
346 #undef A
347 #undef B
348 #undef C
349 
350 static void restore_median_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
351  int width, int height, int slices, int rmode)
352 {
353  int i, j, slice;
354  int A, B, C;
355  uint8_t *bsrc;
356  int slice_start, slice_height;
357  const int cmask = ~rmode;
358 
359  for (slice = 0; slice < slices; slice++) {
360  slice_start = ((slice * height) / slices) & cmask;
361  slice_height = ((((slice + 1) * height) / slices) & cmask) -
362  slice_start;
363 
364  if (!slice_height)
365  continue;
366  bsrc = src + slice_start * stride;
367 
368  // first line - left neighbour prediction
369  bsrc[0] += 0x80;
370  c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
371  bsrc += stride;
372  if (slice_height <= 1)
373  continue;
374  // second line - first element has top prediction, the rest uses median
375  C = bsrc[-stride];
376  bsrc[0] += C;
377  A = bsrc[0];
378  for (i = 1; i < FFMIN(width, 16); i++) { /* scalar loop (DSP need align 16) */
379  B = bsrc[i - stride];
380  bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
381  C = B;
382  A = bsrc[i];
383  }
384  if (width > 16)
385  c->llviddsp.add_median_pred(bsrc + 16, bsrc - stride + 16,
386  bsrc + 16, width - 16, &A, &B);
387 
388  bsrc += stride;
389  // the rest of lines use continuous median prediction
390  for (j = 2; j < slice_height; j++) {
391  c->llviddsp.add_median_pred(bsrc, bsrc - stride,
392  bsrc, width, &A, &B);
393  bsrc += stride;
394  }
395  }
396 }
397 
398 /* UtVideo interlaced mode treats every two lines as a single one,
399  * so restoring function should take care of possible padding between
400  * two parts of the same "line".
401  */
402 static void restore_median_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
403  int width, int height, int slices, int rmode)
404 {
405  int i, j, slice;
406  int A, B, C;
407  uint8_t *bsrc;
408  int slice_start, slice_height;
409  const int cmask = ~(rmode ? 3 : 1);
410  const ptrdiff_t stride2 = stride << 1;
411 
412  for (slice = 0; slice < slices; slice++) {
413  slice_start = ((slice * height) / slices) & cmask;
414  slice_height = ((((slice + 1) * height) / slices) & cmask) -
415  slice_start;
416  slice_height >>= 1;
417  if (!slice_height)
418  continue;
419 
420  bsrc = src + slice_start * stride;
421 
422  // first line - left neighbour prediction
423  bsrc[0] += 0x80;
424  A = c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
425  c->llviddsp.add_left_pred(bsrc + stride, bsrc + stride, width, A);
426  bsrc += stride2;
427  if (slice_height <= 1)
428  continue;
429  // second line - first element has top prediction, the rest uses median
430  C = bsrc[-stride2];
431  bsrc[0] += C;
432  A = bsrc[0];
433  for (i = 1; i < FFMIN(width, 16); i++) { /* scalar loop (DSP need align 16) */
434  B = bsrc[i - stride2];
435  bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
436  C = B;
437  A = bsrc[i];
438  }
439  if (width > 16)
440  c->llviddsp.add_median_pred(bsrc + 16, bsrc - stride2 + 16,
441  bsrc + 16, width - 16, &A, &B);
442 
443  c->llviddsp.add_median_pred(bsrc + stride, bsrc - stride,
444  bsrc + stride, width, &A, &B);
445  bsrc += stride2;
446  // the rest of lines use continuous median prediction
447  for (j = 2; j < slice_height; j++) {
448  c->llviddsp.add_median_pred(bsrc, bsrc - stride2,
449  bsrc, width, &A, &B);
450  c->llviddsp.add_median_pred(bsrc + stride, bsrc - stride,
451  bsrc + stride, width, &A, &B);
452  bsrc += stride2;
453  }
454  }
455 }
456 
457 static void restore_gradient_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
458  int width, int height, int slices, int rmode)
459 {
460  int i, j, slice;
461  int A, B, C;
462  uint8_t *bsrc;
463  int slice_start, slice_height;
464  const int cmask = ~rmode;
465  int min_width = FFMIN(width, 32);
466 
467  for (slice = 0; slice < slices; slice++) {
468  slice_start = ((slice * height) / slices) & cmask;
469  slice_height = ((((slice + 1) * height) / slices) & cmask) -
470  slice_start;
471 
472  if (!slice_height)
473  continue;
474  bsrc = src + slice_start * stride;
475 
476  // first line - left neighbour prediction
477  bsrc[0] += 0x80;
478  c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
479  bsrc += stride;
480  if (slice_height <= 1)
481  continue;
482  for (j = 1; j < slice_height; j++) {
483  // second line - first element has top prediction, the rest uses gradient
484  bsrc[0] = (bsrc[0] + bsrc[-stride]) & 0xFF;
485  for (i = 1; i < min_width; i++) { /* dsp need align 32 */
486  A = bsrc[i - stride];
487  B = bsrc[i - (stride + 1)];
488  C = bsrc[i - 1];
489  bsrc[i] = (A - B + C + bsrc[i]) & 0xFF;
490  }
491  if (width > 32)
492  c->llviddsp.add_gradient_pred(bsrc + 32, stride, width - 32);
493  bsrc += stride;
494  }
495  }
496 }
497 
498 static void restore_gradient_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
499  int width, int height, int slices, int rmode)
500 {
501  int i, j, slice;
502  int A, B, C;
503  uint8_t *bsrc;
504  int slice_start, slice_height;
505  const int cmask = ~(rmode ? 3 : 1);
506  const ptrdiff_t stride2 = stride << 1;
507  int min_width = FFMIN(width, 32);
508 
509  for (slice = 0; slice < slices; slice++) {
510  slice_start = ((slice * height) / slices) & cmask;
511  slice_height = ((((slice + 1) * height) / slices) & cmask) -
512  slice_start;
513  slice_height >>= 1;
514  if (!slice_height)
515  continue;
516 
517  bsrc = src + slice_start * stride;
518 
519  // first line - left neighbour prediction
520  bsrc[0] += 0x80;
521  A = c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
522  c->llviddsp.add_left_pred(bsrc + stride, bsrc + stride, width, A);
523  bsrc += stride2;
524  if (slice_height <= 1)
525  continue;
526  for (j = 1; j < slice_height; j++) {
527  // second line - first element has top prediction, the rest uses gradient
528  bsrc[0] = (bsrc[0] + bsrc[-stride2]) & 0xFF;
529  for (i = 1; i < min_width; i++) { /* dsp need align 32 */
530  A = bsrc[i - stride2];
531  B = bsrc[i - (stride2 + 1)];
532  C = bsrc[i - 1];
533  bsrc[i] = (A - B + C + bsrc[i]) & 0xFF;
534  }
535  if (width > 32)
536  c->llviddsp.add_gradient_pred(bsrc + 32, stride2, width - 32);
537 
538  A = bsrc[-stride];
539  B = bsrc[-(1 + stride + stride - width)];
540  C = bsrc[width - 1];
541  bsrc[stride] = (A - B + C + bsrc[stride]) & 0xFF;
542  for (i = 1; i < width; i++) {
543  A = bsrc[i - stride];
544  B = bsrc[i - (1 + stride)];
545  C = bsrc[i - 1 + stride];
546  bsrc[i + stride] = (A - B + C + bsrc[i + stride]) & 0xFF;
547  }
548  bsrc += stride2;
549  }
550  }
551 }
552 
554  int *got_frame, AVPacket *avpkt)
555 {
556  const uint8_t *buf = avpkt->data;
557  int buf_size = avpkt->size;
558  UtvideoContext *c = avctx->priv_data;
559  int i, j;
560  const uint8_t *plane_start[5];
561  int plane_size, max_slice_size = 0, slice_start, slice_end, slice_size;
562  int ret;
563  GetByteContext gb;
564 
565  if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
566  return ret;
567 
568  /* parse plane structure to get frame flags and validate slice offsets */
569  bytestream2_init(&gb, buf, buf_size);
570 
571  if (c->pack) {
572  const uint8_t *packed_stream;
573  const uint8_t *control_stream;
574  GetByteContext pb;
575  uint32_t nb_cbs;
576  int left;
577 
578  c->frame_info = PRED_GRADIENT << 8;
579 
580  if (bytestream2_get_byte(&gb) != 1)
581  return AVERROR_INVALIDDATA;
582  bytestream2_skip(&gb, 3);
583  c->offset = bytestream2_get_le32(&gb);
584 
585  if (buf_size <= c->offset + 8LL)
586  return AVERROR_INVALIDDATA;
587 
588  bytestream2_init(&pb, buf + 8 + c->offset, buf_size - 8 - c->offset);
589 
590  nb_cbs = bytestream2_get_le32(&pb);
591  if (nb_cbs > c->offset)
592  return AVERROR_INVALIDDATA;
593 
594  packed_stream = buf + 8;
595  control_stream = packed_stream + (c->offset - nb_cbs);
596  left = control_stream - packed_stream;
597 
598  for (i = 0; i < c->planes; i++) {
599  for (j = 0; j < c->slices; j++) {
600  c->packed_stream[i][j] = packed_stream;
601  c->packed_stream_size[i][j] = bytestream2_get_le32(&pb);
602  if (c->packed_stream_size[i][j] > left)
603  return AVERROR_INVALIDDATA;
604  left -= c->packed_stream_size[i][j];
605  packed_stream += c->packed_stream_size[i][j];
606  }
607  }
608 
609  left = buf + buf_size - control_stream;
610 
611  for (i = 0; i < c->planes; i++) {
612  for (j = 0; j < c->slices; j++) {
613  c->control_stream[i][j] = control_stream;
614  c->control_stream_size[i][j] = bytestream2_get_le32(&pb);
615  if (c->control_stream_size[i][j] > left)
616  return AVERROR_INVALIDDATA;
617  left -= c->control_stream_size[i][j];
618  control_stream += c->control_stream_size[i][j];
619  }
620  }
621  } else if (c->pro) {
622  if (bytestream2_get_bytes_left(&gb) < c->frame_info_size) {
623  av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n");
624  return AVERROR_INVALIDDATA;
625  }
626  c->frame_info = bytestream2_get_le32u(&gb);
627  c->slices = ((c->frame_info >> 16) & 0xff) + 1;
628  for (i = 0; i < c->planes; i++) {
629  plane_start[i] = gb.buffer;
630  if (bytestream2_get_bytes_left(&gb) < 1024 + 4 * c->slices) {
631  av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n");
632  return AVERROR_INVALIDDATA;
633  }
634  slice_start = 0;
635  slice_end = 0;
636  for (j = 0; j < c->slices; j++) {
637  slice_end = bytestream2_get_le32u(&gb);
638  if (slice_end < 0 || slice_end < slice_start ||
639  bytestream2_get_bytes_left(&gb) < slice_end + 1024LL) {
640  av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n");
641  return AVERROR_INVALIDDATA;
642  }
643  slice_size = slice_end - slice_start;
644  slice_start = slice_end;
645  max_slice_size = FFMAX(max_slice_size, slice_size);
646  }
647  plane_size = slice_end;
648  bytestream2_skipu(&gb, plane_size);
649  bytestream2_skipu(&gb, 1024);
650  }
651  plane_start[c->planes] = gb.buffer;
652  } else {
653  for (i = 0; i < c->planes; i++) {
654  plane_start[i] = gb.buffer;
655  if (bytestream2_get_bytes_left(&gb) < 256 + 4 * c->slices) {
656  av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n");
657  return AVERROR_INVALIDDATA;
658  }
659  bytestream2_skipu(&gb, 256);
660  slice_start = 0;
661  slice_end = 0;
662  for (j = 0; j < c->slices; j++) {
663  slice_end = bytestream2_get_le32u(&gb);
664  if (slice_end < 0 || slice_end < slice_start ||
666  av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n");
667  return AVERROR_INVALIDDATA;
668  }
669  slice_size = slice_end - slice_start;
670  slice_start = slice_end;
671  max_slice_size = FFMAX(max_slice_size, slice_size);
672  }
673  plane_size = slice_end;
674  bytestream2_skipu(&gb, plane_size);
675  }
676  plane_start[c->planes] = gb.buffer;
677  if (bytestream2_get_bytes_left(&gb) < c->frame_info_size) {
678  av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n");
679  return AVERROR_INVALIDDATA;
680  }
681  c->frame_info = bytestream2_get_le32u(&gb);
682  }
683  av_log(avctx, AV_LOG_DEBUG, "frame information flags %"PRIX32"\n",
684  c->frame_info);
685 
686  c->frame_pred = (c->frame_info >> 8) & 3;
687 
688  max_slice_size += 4*avctx->width;
689 
690  if (!c->pack) {
691  av_fast_malloc(&c->slice_bits, &c->slice_bits_size,
692  max_slice_size + AV_INPUT_BUFFER_PADDING_SIZE);
693 
694  if (!c->slice_bits) {
695  av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer\n");
696  return AVERROR(ENOMEM);
697  }
698  }
699 
700  switch (c->avctx->pix_fmt) {
701  case AV_PIX_FMT_GBRP:
702  case AV_PIX_FMT_GBRAP:
703  for (i = 0; i < c->planes; i++) {
704  ret = decode_plane(c, i, frame->data[i],
705  frame->linesize[i], avctx->width,
706  avctx->height, plane_start[i],
707  c->frame_pred == PRED_LEFT);
708  if (ret)
709  return ret;
710  if (c->frame_pred == PRED_MEDIAN) {
711  if (!c->interlaced) {
712  restore_median_planar(c, frame->data[i],
713  frame->linesize[i], avctx->width,
714  avctx->height, c->slices, 0);
715  } else {
717  frame->linesize[i],
718  avctx->width, avctx->height, c->slices,
719  0);
720  }
721  } else if (c->frame_pred == PRED_GRADIENT) {
722  if (!c->interlaced) {
724  frame->linesize[i], avctx->width,
725  avctx->height, c->slices, 0);
726  } else {
728  frame->linesize[i],
729  avctx->width, avctx->height, c->slices,
730  0);
731  }
732  }
733  }
734  c->utdsp.restore_rgb_planes(frame->data[2], frame->data[0], frame->data[1],
735  frame->linesize[2], frame->linesize[0], frame->linesize[1],
736  avctx->width, avctx->height);
737  break;
738  case AV_PIX_FMT_GBRAP10:
739  case AV_PIX_FMT_GBRP10:
740  for (i = 0; i < c->planes; i++) {
741  ret = decode_plane10(c, i, (uint16_t *)frame->data[i],
742  frame->linesize[i] / 2, avctx->width,
743  avctx->height, plane_start[i],
744  plane_start[i + 1] - 1024,
745  c->frame_pred == PRED_LEFT);
746  if (ret)
747  return ret;
748  }
749  c->utdsp.restore_rgb_planes10((uint16_t *)frame->data[2], (uint16_t *)frame->data[0], (uint16_t *)frame->data[1],
750  frame->linesize[2] / 2, frame->linesize[0] / 2, frame->linesize[1] / 2,
751  avctx->width, avctx->height);
752  break;
753  case AV_PIX_FMT_YUV420P:
754  for (i = 0; i < 3; i++) {
755  ret = decode_plane(c, i, frame->data[i], frame->linesize[i],
756  avctx->width >> !!i, avctx->height >> !!i,
757  plane_start[i], c->frame_pred == PRED_LEFT);
758  if (ret)
759  return ret;
760  if (c->frame_pred == PRED_MEDIAN) {
761  if (!c->interlaced) {
762  restore_median_planar(c, frame->data[i], frame->linesize[i],
763  avctx->width >> !!i, avctx->height >> !!i,
764  c->slices, !i);
765  } else {
766  restore_median_planar_il(c, frame->data[i], frame->linesize[i],
767  avctx->width >> !!i,
768  avctx->height >> !!i,
769  c->slices, !i);
770  }
771  } else if (c->frame_pred == PRED_GRADIENT) {
772  if (!c->interlaced) {
773  restore_gradient_planar(c, frame->data[i], frame->linesize[i],
774  avctx->width >> !!i, avctx->height >> !!i,
775  c->slices, !i);
776  } else {
777  restore_gradient_planar_il(c, frame->data[i], frame->linesize[i],
778  avctx->width >> !!i,
779  avctx->height >> !!i,
780  c->slices, !i);
781  }
782  }
783  }
784  break;
785  case AV_PIX_FMT_YUV422P:
786  for (i = 0; i < 3; i++) {
787  ret = decode_plane(c, i, frame->data[i], frame->linesize[i],
788  avctx->width >> !!i, avctx->height,
789  plane_start[i], c->frame_pred == PRED_LEFT);
790  if (ret)
791  return ret;
792  if (c->frame_pred == PRED_MEDIAN) {
793  if (!c->interlaced) {
794  restore_median_planar(c, frame->data[i], frame->linesize[i],
795  avctx->width >> !!i, avctx->height,
796  c->slices, 0);
797  } else {
798  restore_median_planar_il(c, frame->data[i], frame->linesize[i],
799  avctx->width >> !!i, avctx->height,
800  c->slices, 0);
801  }
802  } else if (c->frame_pred == PRED_GRADIENT) {
803  if (!c->interlaced) {
804  restore_gradient_planar(c, frame->data[i], frame->linesize[i],
805  avctx->width >> !!i, avctx->height,
806  c->slices, 0);
807  } else {
808  restore_gradient_planar_il(c, frame->data[i], frame->linesize[i],
809  avctx->width >> !!i, avctx->height,
810  c->slices, 0);
811  }
812  }
813  }
814  break;
815  case AV_PIX_FMT_YUV444P:
816  for (i = 0; i < 3; i++) {
817  ret = decode_plane(c, i, frame->data[i], frame->linesize[i],
818  avctx->width, avctx->height,
819  plane_start[i], c->frame_pred == PRED_LEFT);
820  if (ret)
821  return ret;
822  if (c->frame_pred == PRED_MEDIAN) {
823  if (!c->interlaced) {
824  restore_median_planar(c, frame->data[i], frame->linesize[i],
825  avctx->width, avctx->height,
826  c->slices, 0);
827  } else {
828  restore_median_planar_il(c, frame->data[i], frame->linesize[i],
829  avctx->width, avctx->height,
830  c->slices, 0);
831  }
832  } else if (c->frame_pred == PRED_GRADIENT) {
833  if (!c->interlaced) {
834  restore_gradient_planar(c, frame->data[i], frame->linesize[i],
835  avctx->width, avctx->height,
836  c->slices, 0);
837  } else {
838  restore_gradient_planar_il(c, frame->data[i], frame->linesize[i],
839  avctx->width, avctx->height,
840  c->slices, 0);
841  }
842  }
843  }
844  break;
846  for (i = 0; i < 3; i++) {
847  ret = decode_plane10(c, i, (uint16_t *)frame->data[i], frame->linesize[i] / 2,
848  avctx->width >> !!i, avctx->height >> !!i,
849  plane_start[i], plane_start[i + 1] - 1024, c->frame_pred == PRED_LEFT);
850  if (ret)
851  return ret;
852  }
853  break;
855  for (i = 0; i < 3; i++) {
856  ret = decode_plane10(c, i, (uint16_t *)frame->data[i], frame->linesize[i] / 2,
857  avctx->width >> !!i, avctx->height,
858  plane_start[i], plane_start[i + 1] - 1024, c->frame_pred == PRED_LEFT);
859  if (ret)
860  return ret;
861  }
862  break;
863  }
864 
865  frame->flags |= AV_FRAME_FLAG_KEY;
866  frame->pict_type = AV_PICTURE_TYPE_I;
867  if (c->interlaced)
869 
870  *got_frame = 1;
871 
872  /* always report that the buffer was completely consumed */
873  return buf_size;
874 }
875 
877 {
878  UtvideoContext * const c = avctx->priv_data;
879  int h_shift, v_shift;
880 
881  c->avctx = avctx;
882 
883  ff_utvideodsp_init(&c->utdsp);
884  ff_bswapdsp_init(&c->bdsp);
885  ff_llviddsp_init(&c->llviddsp);
886 
887  c->slice_bits_size = 0;
888 
889  switch (avctx->codec_tag) {
890  case MKTAG('U', 'L', 'R', 'G'):
891  c->planes = 3;
892  avctx->pix_fmt = AV_PIX_FMT_GBRP;
893  break;
894  case MKTAG('U', 'L', 'R', 'A'):
895  c->planes = 4;
896  avctx->pix_fmt = AV_PIX_FMT_GBRAP;
897  break;
898  case MKTAG('U', 'L', 'Y', '0'):
899  c->planes = 3;
900  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
901  avctx->colorspace = AVCOL_SPC_BT470BG;
902  break;
903  case MKTAG('U', 'L', 'Y', '2'):
904  c->planes = 3;
905  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
906  avctx->colorspace = AVCOL_SPC_BT470BG;
907  break;
908  case MKTAG('U', 'L', 'Y', '4'):
909  c->planes = 3;
910  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
911  avctx->colorspace = AVCOL_SPC_BT470BG;
912  break;
913  case MKTAG('U', 'Q', 'Y', '0'):
914  c->planes = 3;
915  c->pro = 1;
916  avctx->pix_fmt = AV_PIX_FMT_YUV420P10;
917  break;
918  case MKTAG('U', 'Q', 'Y', '2'):
919  c->planes = 3;
920  c->pro = 1;
921  avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
922  break;
923  case MKTAG('U', 'Q', 'R', 'G'):
924  c->planes = 3;
925  c->pro = 1;
926  avctx->pix_fmt = AV_PIX_FMT_GBRP10;
927  break;
928  case MKTAG('U', 'Q', 'R', 'A'):
929  c->planes = 4;
930  c->pro = 1;
931  avctx->pix_fmt = AV_PIX_FMT_GBRAP10;
932  break;
933  case MKTAG('U', 'L', 'H', '0'):
934  c->planes = 3;
935  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
936  avctx->colorspace = AVCOL_SPC_BT709;
937  break;
938  case MKTAG('U', 'L', 'H', '2'):
939  c->planes = 3;
940  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
941  avctx->colorspace = AVCOL_SPC_BT709;
942  break;
943  case MKTAG('U', 'L', 'H', '4'):
944  c->planes = 3;
945  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
946  avctx->colorspace = AVCOL_SPC_BT709;
947  break;
948  case MKTAG('U', 'M', 'Y', '2'):
949  c->planes = 3;
950  c->pack = 1;
951  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
952  avctx->colorspace = AVCOL_SPC_BT470BG;
953  break;
954  case MKTAG('U', 'M', 'H', '2'):
955  c->planes = 3;
956  c->pack = 1;
957  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
958  avctx->colorspace = AVCOL_SPC_BT709;
959  break;
960  case MKTAG('U', 'M', 'Y', '4'):
961  c->planes = 3;
962  c->pack = 1;
963  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
964  avctx->colorspace = AVCOL_SPC_BT470BG;
965  break;
966  case MKTAG('U', 'M', 'H', '4'):
967  c->planes = 3;
968  c->pack = 1;
969  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
970  avctx->colorspace = AVCOL_SPC_BT709;
971  break;
972  case MKTAG('U', 'M', 'R', 'G'):
973  c->planes = 3;
974  c->pack = 1;
975  avctx->pix_fmt = AV_PIX_FMT_GBRP;
976  break;
977  case MKTAG('U', 'M', 'R', 'A'):
978  c->planes = 4;
979  c->pack = 1;
980  avctx->pix_fmt = AV_PIX_FMT_GBRAP;
981  break;
982  default:
983  av_log(avctx, AV_LOG_ERROR, "Unknown Ut Video FOURCC provided (%08X)\n",
984  avctx->codec_tag);
985  return AVERROR_INVALIDDATA;
986  }
987 
988  c->buffer = av_calloc(avctx->width + 8, c->pro?2:1);
989  if (!c->buffer)
990  return AVERROR(ENOMEM);
991 
992  av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &h_shift, &v_shift);
993  if ((avctx->width & ((1<<h_shift)-1)) ||
994  (avctx->height & ((1<<v_shift)-1))) {
995  avpriv_request_sample(avctx, "Odd dimensions");
996  return AVERROR_PATCHWELCOME;
997  }
998 
999  if (c->pack && avctx->extradata_size >= 16) {
1000  av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",
1001  avctx->extradata[3], avctx->extradata[2],
1002  avctx->extradata[1], avctx->extradata[0]);
1003  av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n",
1004  AV_RB32(avctx->extradata + 4));
1005  c->compression = avctx->extradata[8];
1006  if (c->compression != 2)
1007  avpriv_request_sample(avctx, "Unknown compression type");
1008  c->slices = avctx->extradata[9] + 1;
1009  } else if (!c->pro && avctx->extradata_size >= 16) {
1010  av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",
1011  avctx->extradata[3], avctx->extradata[2],
1012  avctx->extradata[1], avctx->extradata[0]);
1013  av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n",
1014  AV_RB32(avctx->extradata + 4));
1015  c->frame_info_size = AV_RL32(avctx->extradata + 8);
1016  c->flags = AV_RL32(avctx->extradata + 12);
1017 
1018  if (c->frame_info_size != 4)
1019  avpriv_request_sample(avctx, "Frame info not 4 bytes");
1020  av_log(avctx, AV_LOG_DEBUG, "Encoding parameters %08"PRIX32"\n", c->flags);
1021  c->slices = (c->flags >> 24) + 1;
1022  c->compression = c->flags & 1;
1023  c->interlaced = c->flags & 0x800;
1024  } else if (c->pro && avctx->extradata_size == 8) {
1025  av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",
1026  avctx->extradata[3], avctx->extradata[2],
1027  avctx->extradata[1], avctx->extradata[0]);
1028  av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n",
1029  AV_RB32(avctx->extradata + 4));
1030  c->interlaced = 0;
1031  c->frame_info_size = 4;
1032  } else {
1033  av_log(avctx, AV_LOG_ERROR,
1034  "Insufficient extradata size %d, should be at least 16\n",
1035  avctx->extradata_size);
1036  return AVERROR_INVALIDDATA;
1037  }
1038 
1039  return 0;
1040 }
1041 
1043 {
1044  UtvideoContext * const c = avctx->priv_data;
1045 
1046  av_freep(&c->slice_bits);
1047  av_freep(&c->buffer);
1048 
1049  return 0;
1050 }
1051 
1053  .p.name = "utvideo",
1054  CODEC_LONG_NAME("Ut Video"),
1055  .p.type = AVMEDIA_TYPE_VIDEO,
1056  .p.id = AV_CODEC_ID_UTVIDEO,
1057  .priv_data_size = sizeof(UtvideoContext),
1058  .init = decode_init,
1059  .close = decode_end,
1061  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
1062 };
A
#define A(x)
Definition: vpx_arith.h:28
utvideo.h
bswapdsp.h
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:694
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1025
restore_gradient_planar_il
static void restore_gradient_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, int width, int height, int slices, int rmode)
Definition: utvideodec.c:498
decode_init
static av_cold int decode_init(AVCodecContext *avctx)
Definition: utvideodec.c:876
GetByteContext
Definition: bytestream.h:33
HuffEntry::len
uint8_t len
Definition: exr.c:95
PRED_LEFT
@ PRED_LEFT
Definition: utvideo.h:39
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
decode_end
static av_cold int decode_end(AVCodecContext *avctx)
Definition: utvideodec.c:1042
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
pixdesc.h
AVPacket::data
uint8_t * data
Definition: packet.h:374
compute_cmask
static int compute_cmask(int plane_no, int interlaced, enum AVPixelFormat pix_fmt)
Definition: utvideodec.c:201
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:465
FFCodec
Definition: codec_internal.h:127
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
restore_gradient_planar
static void restore_gradient_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, int width, int height, int slices, int rmode)
Definition: utvideodec.c:457
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:514
thread.h
decode_plane10
static int decode_plane10(UtvideoContext *c, int plane_no, uint16_t *dst, ptrdiff_t stride, int width, int height, const uint8_t *src, const uint8_t *huff, int use_pred)
Definition: utvideodec.c:114
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:601
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
fail
#define fail()
Definition: checkasm.h:138
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:481
GetBitContext
Definition: get_bits.h:108
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
READ_PLANE
#define READ_PLANE(b, end)
Definition: utvideodec.c:90
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2964
PRED_GRADIENT
@ PRED_GRADIENT
Definition: utvideo.h:40
C
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
Definition: writing_filters.txt:58
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:628
AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:485
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:539
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:306
intreadwrite.h
VLC_MULTI
Definition: vlc.h:45
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demux_decode.c:41
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2015
GetByteContext::buffer
const uint8_t * buffer
Definition: bytestream.h:34
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:365
ff_vlc_free_multi
void ff_vlc_free_multi(VLC_MULTI *vlc)
Definition: vlc.c:484
HuffEntry::sym
uint16_t sym
Definition: exr.c:96
bits
uint8_t bits
Definition: vp3data.h:128
B
#define B
Definition: huffyuv.h:42
get_bits_le
static unsigned int get_bits_le(GetBitContext *s, int n)
Definition: get_bits.h:356
AV_CODEC_ID_UTVIDEO
@ AV_CODEC_ID_UTVIDEO
Definition: codec_id.h:205
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
PRED_MEDIAN
@ PRED_MEDIAN
Definition: utvideo.h:41
get_bits.h
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:110
ff_bswapdsp_init
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
ff_vlc_init_multi_from_lengths
int ff_vlc_init_multi_from_lengths(VLC *vlc, VLC_MULTI *multi, int nb_bits, int nb_elems, int nb_codes, const int8_t *lens, int lens_wrap, const void *symbols, int symbols_wrap, int symbols_size, int offset, int flags, void *logctx)
Build VLC decoding tables suitable for use with get_vlc_multi()
Definition: vlc.c:430
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:466
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:375
codec_internal.h
decode_frame
static int decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: utvideodec.c:553
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
VLC_BITS
#define VLC_BITS
height
#define height
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
restore_median_planar_il
static void restore_median_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, int width, int height, int slices, int rmode)
Definition: utvideodec.c:402
interlaced
uint8_t interlaced
Definition: mxfenc.c:2148
restore_median_planar
static void restore_median_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, int width, int height, int slices, int rmode)
Definition: utvideodec.c:350
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:270
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:538
ff_utvideo_decoder
const FFCodec ff_utvideo_decoder
Definition: utvideodec.c:1052
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
UtvideoContext
Definition: utvideo.h:64
len
int len
Definition: vorbis_enc_data.h:426
AVCodecContext::height
int height
Definition: avcodec.h:617
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:654
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:636
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
build_huff
static int build_huff(UtvideoContext *c, const uint8_t *src, VLC *vlc, VLC_MULTI *multi, int *fsym, unsigned nb_elems)
Definition: utvideodec.c:48
mid_pred
#define mid_pred
Definition: mathops.h:98
ff_vlc_free
void ff_vlc_free(VLC *vlc)
Definition: vlc.c:489
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
ff_llviddsp_init
void ff_llviddsp_init(LLVidDSPContext *c)
Definition: lossless_videodsp.c:113
AVCodecContext
main external API structure.
Definition: avcodec.h:437
VLC
Definition: vlc.h:33
HuffEntry
Definition: exr.c:94
decode_plane
static int decode_plane(UtvideoContext *c, int plane_no, uint8_t *dst, ptrdiff_t stride, int width, int height, const uint8_t *src, int use_pred)
Definition: utvideodec.c:211
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
init_get_bits8_le
static int init_get_bits8_le(GetBitContext *s, const uint8_t *buffer, int byte_size)
Definition: get_bits.h:553
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:462
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:464
AVPacket
This structure stores compressed data.
Definition: packet.h:351
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:555
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
ff_utvideodsp_init
av_cold void ff_utvideodsp_init(UTVideoDSPContext *c)
Definition: utvideodsp.c:75
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:617
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:597