FFmpeg
ffv1dec.c
Go to the documentation of this file.
1 /*
2  * FFV1 decoder
3  *
4  * Copyright (c) 2003-2013 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * FF Video Codec 1 (a lossless codec) decoder
26  */
27 
28 #include "libavutil/avassert.h"
29 #include "libavutil/crc.h"
30 #include "libavutil/opt.h"
31 #include "libavutil/imgutils.h"
32 #include "libavutil/pixdesc.h"
33 #include "libavutil/timer.h"
34 #include "avcodec.h"
35 #include "internal.h"
36 #include "get_bits.h"
37 #include "rangecoder.h"
38 #include "golomb.h"
39 #include "mathops.h"
40 #include "ffv1.h"
41 
43  int is_signed)
44 {
45  if (get_rac(c, state + 0))
46  return 0;
47  else {
48  int i, e;
49  unsigned a;
50  e = 0;
51  while (get_rac(c, state + 1 + FFMIN(e, 9))) { // 1..10
52  e++;
53  if (e > 31)
54  return AVERROR_INVALIDDATA;
55  }
56 
57  a = 1;
58  for (i = e - 1; i >= 0; i--)
59  a += a + get_rac(c, state + 22 + FFMIN(i, 9)); // 22..31
60 
61  e = -(is_signed && get_rac(c, state + 11 + FFMIN(e, 10))); // 11..21
62  return (a ^ e) - e;
63  }
64 }
65 
66 static av_noinline int get_symbol(RangeCoder *c, uint8_t *state, int is_signed)
67 {
68  return get_symbol_inline(c, state, is_signed);
69 }
70 
71 static inline int get_vlc_symbol(GetBitContext *gb, VlcState *const state,
72  int bits)
73 {
74  int k, i, v, ret;
75 
76  i = state->count;
77  k = 0;
78  while (i < state->error_sum) { // FIXME: optimize
79  k++;
80  i += i;
81  }
82 
83  v = get_sr_golomb(gb, k, 12, bits);
84  ff_dlog(NULL, "v:%d bias:%d error:%d drift:%d count:%d k:%d",
85  v, state->bias, state->error_sum, state->drift, state->count, k);
86 
87  v ^= ((2 * state->drift + state->count) >> 31);
88 
89  ret = fold(v + state->bias, bits);
90 
92 
93  return ret;
94 }
95 
97 {
98  if (s->ac != AC_GOLOMB_RICE) {
99  RangeCoder *const c = &s->c;
100  if (c->overread > MAX_OVERREAD)
101  return AVERROR_INVALIDDATA;
102  } else {
103  if (get_bits_left(&s->gb) < 1)
104  return AVERROR_INVALIDDATA;
105  }
106  return 0;
107 }
108 
109 #define TYPE int16_t
110 #define RENAME(name) name
111 #include "ffv1dec_template.c"
112 #undef TYPE
113 #undef RENAME
114 
115 #define TYPE int32_t
116 #define RENAME(name) name ## 32
117 #include "ffv1dec_template.c"
118 
120  int w, int h, int stride, int plane_index,
121  int pixel_stride)
122 {
123  int x, y;
124  int16_t *sample[2];
125  sample[0] = s->sample_buffer + 3;
126  sample[1] = s->sample_buffer + w + 6 + 3;
127 
128  s->run_index = 0;
129 
130  memset(s->sample_buffer, 0, 2 * (w + 6) * sizeof(*s->sample_buffer));
131 
132  for (y = 0; y < h; y++) {
133  int16_t *temp = sample[0]; // FIXME: try a normal buffer
134 
135  sample[0] = sample[1];
136  sample[1] = temp;
137 
138  sample[1][-1] = sample[0][0];
139  sample[0][w] = sample[0][w - 1];
140 
141 // { START_TIMER
142  if (s->avctx->bits_per_raw_sample <= 8) {
143  int ret = decode_line(s, w, sample, plane_index, 8);
144  if (ret < 0)
145  return ret;
146  for (x = 0; x < w; x++)
147  src[x*pixel_stride + stride * y] = sample[1][x];
148  } else {
149  int ret = decode_line(s, w, sample, plane_index, s->avctx->bits_per_raw_sample);
150  if (ret < 0)
151  return ret;
152  if (s->packed_at_lsb) {
153  for (x = 0; x < w; x++) {
154  ((uint16_t*)(src + stride*y))[x*pixel_stride] = sample[1][x];
155  }
156  } else {
157  for (x = 0; x < w; x++) {
158  ((uint16_t*)(src + stride*y))[x*pixel_stride] = sample[1][x] << (16 - s->avctx->bits_per_raw_sample) | ((uint16_t **)sample)[1][x] >> (2 * s->avctx->bits_per_raw_sample - 16);
159  }
160  }
161  }
162 // STOP_TIMER("decode-line") }
163  }
164  return 0;
165 }
166 
168 {
169  RangeCoder *c = &fs->c;
171  unsigned ps, i, context_count;
172  memset(state, 128, sizeof(state));
173 
174  av_assert0(f->version > 2);
175 
176  fs->slice_x = get_symbol(c, state, 0) * f->width ;
177  fs->slice_y = get_symbol(c, state, 0) * f->height;
178  fs->slice_width = (get_symbol(c, state, 0) + 1) * f->width + fs->slice_x;
179  fs->slice_height = (get_symbol(c, state, 0) + 1) * f->height + fs->slice_y;
180 
181  fs->slice_x /= f->num_h_slices;
182  fs->slice_y /= f->num_v_slices;
183  fs->slice_width = fs->slice_width /f->num_h_slices - fs->slice_x;
184  fs->slice_height = fs->slice_height/f->num_v_slices - fs->slice_y;
185  if ((unsigned)fs->slice_width > f->width || (unsigned)fs->slice_height > f->height)
186  return -1;
187  if ( (unsigned)fs->slice_x + (uint64_t)fs->slice_width > f->width
188  || (unsigned)fs->slice_y + (uint64_t)fs->slice_height > f->height)
189  return -1;
190 
191  for (i = 0; i < f->plane_count; i++) {
192  PlaneContext * const p = &fs->plane[i];
193  int idx = get_symbol(c, state, 0);
194  if (idx >= (unsigned)f->quant_table_count) {
195  av_log(f->avctx, AV_LOG_ERROR, "quant_table_index out of range\n");
196  return -1;
197  }
198  p->quant_table_index = idx;
199  memcpy(p->quant_table, f->quant_tables[idx], sizeof(p->quant_table));
200  context_count = f->context_count[idx];
201 
202  if (p->context_count < context_count) {
203  av_freep(&p->state);
204  av_freep(&p->vlc_state);
205  }
207  }
208 
209  ps = get_symbol(c, state, 0);
210  if (ps == 1) {
211  f->cur->interlaced_frame = 1;
212  f->cur->top_field_first = 1;
213  } else if (ps == 2) {
214  f->cur->interlaced_frame = 1;
215  f->cur->top_field_first = 0;
216  } else if (ps == 3) {
217  f->cur->interlaced_frame = 0;
218  }
219  f->cur->sample_aspect_ratio.num = get_symbol(c, state, 0);
220  f->cur->sample_aspect_ratio.den = get_symbol(c, state, 0);
221 
222  if (av_image_check_sar(f->width, f->height,
223  f->cur->sample_aspect_ratio) < 0) {
224  av_log(f->avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
225  f->cur->sample_aspect_ratio.num,
226  f->cur->sample_aspect_ratio.den);
227  f->cur->sample_aspect_ratio = (AVRational){ 0, 1 };
228  }
229 
230  if (fs->version > 3) {
231  fs->slice_reset_contexts = get_rac(c, state);
232  fs->slice_coding_mode = get_symbol(c, state, 0);
233  if (fs->slice_coding_mode != 1) {
234  fs->slice_rct_by_coef = get_symbol(c, state, 0);
235  fs->slice_rct_ry_coef = get_symbol(c, state, 0);
236  if ((uint64_t)fs->slice_rct_by_coef + (uint64_t)fs->slice_rct_ry_coef > 4) {
237  av_log(f->avctx, AV_LOG_ERROR, "slice_rct_y_coef out of range\n");
238  return AVERROR_INVALIDDATA;
239  }
240  }
241  }
242 
243  return 0;
244 }
245 
246 static int decode_slice(AVCodecContext *c, void *arg)
247 {
248  FFV1Context *fs = *(void **)arg;
249  FFV1Context *f = fs->avctx->priv_data;
250  int width, height, x, y, ret;
251  const int ps = av_pix_fmt_desc_get(c->pix_fmt)->comp[0].step;
252  AVFrame * const p = f->cur;
253  int i, si;
254 
255  for( si=0; fs != f->slice_context[si]; si ++)
256  ;
257 
258  if(f->fsrc && !p->key_frame)
259  ff_thread_await_progress(&f->last_picture, si, 0);
260 
261  if(f->fsrc && !p->key_frame) {
262  FFV1Context *fssrc = f->fsrc->slice_context[si];
263  FFV1Context *fsdst = f->slice_context[si];
264  av_assert1(fsdst->plane_count == fssrc->plane_count);
265  av_assert1(fsdst == fs);
266 
267  if (!p->key_frame)
268  fsdst->slice_damaged |= fssrc->slice_damaged;
269 
270  for (i = 0; i < f->plane_count; i++) {
271  PlaneContext *psrc = &fssrc->plane[i];
272  PlaneContext *pdst = &fsdst->plane[i];
273 
274  av_free(pdst->state);
275  av_free(pdst->vlc_state);
276  memcpy(pdst, psrc, sizeof(*pdst));
277  pdst->state = NULL;
278  pdst->vlc_state = NULL;
279 
280  if (fssrc->ac) {
282  memcpy(pdst->state, psrc->state, CONTEXT_SIZE * psrc->context_count);
283  } else {
284  pdst->vlc_state = av_malloc_array(sizeof(*pdst->vlc_state), psrc->context_count);
285  memcpy(pdst->vlc_state, psrc->vlc_state, sizeof(*pdst->vlc_state) * psrc->context_count);
286  }
287  }
288  }
289 
290  fs->slice_rct_by_coef = 1;
291  fs->slice_rct_ry_coef = 1;
292 
293  if (f->version > 2) {
294  if (ff_ffv1_init_slice_state(f, fs) < 0)
295  return AVERROR(ENOMEM);
296  if (decode_slice_header(f, fs) < 0) {
297  fs->slice_x = fs->slice_y = fs->slice_height = fs->slice_width = 0;
298  fs->slice_damaged = 1;
299  return AVERROR_INVALIDDATA;
300  }
301  }
302  if ((ret = ff_ffv1_init_slice_state(f, fs)) < 0)
303  return ret;
304  if (f->cur->key_frame || fs->slice_reset_contexts)
306 
307  width = fs->slice_width;
308  height = fs->slice_height;
309  x = fs->slice_x;
310  y = fs->slice_y;
311 
312  if (fs->ac == AC_GOLOMB_RICE) {
313  if (f->version == 3 && f->micro_version > 1 || f->version > 3)
314  get_rac(&fs->c, (uint8_t[]) { 129 });
315  fs->ac_byte_count = f->version > 2 || (!x && !y) ? fs->c.bytestream - fs->c.bytestream_start - 1 : 0;
316  init_get_bits(&fs->gb,
317  fs->c.bytestream_start + fs->ac_byte_count,
318  (fs->c.bytestream_end - fs->c.bytestream_start - fs->ac_byte_count) * 8);
319  }
320 
321  av_assert1(width && height);
322  if (f->colorspace == 0 && (f->chroma_planes || !fs->transparency)) {
323  const int chroma_width = AV_CEIL_RSHIFT(width, f->chroma_h_shift);
324  const int chroma_height = AV_CEIL_RSHIFT(height, f->chroma_v_shift);
325  const int cx = x >> f->chroma_h_shift;
326  const int cy = y >> f->chroma_v_shift;
327  decode_plane(fs, p->data[0] + ps*x + y*p->linesize[0], width, height, p->linesize[0], 0, 1);
328 
329  if (f->chroma_planes) {
330  decode_plane(fs, p->data[1] + ps*cx+cy*p->linesize[1], chroma_width, chroma_height, p->linesize[1], 1, 1);
331  decode_plane(fs, p->data[2] + ps*cx+cy*p->linesize[2], chroma_width, chroma_height, p->linesize[2], 1, 1);
332  }
333  if (fs->transparency)
334  decode_plane(fs, p->data[3] + ps*x + y*p->linesize[3], width, height, p->linesize[3], (f->version >= 4 && !f->chroma_planes) ? 1 : 2, 1);
335  } else if (f->colorspace == 0) {
336  decode_plane(fs, p->data[0] + ps*x + y*p->linesize[0] , width, height, p->linesize[0], 0, 2);
337  decode_plane(fs, p->data[0] + ps*x + y*p->linesize[0] + 1, width, height, p->linesize[0], 1, 2);
338  } else if (f->use32bit) {
339  uint8_t *planes[4] = { p->data[0] + ps * x + y * p->linesize[0],
340  p->data[1] + ps * x + y * p->linesize[1],
341  p->data[2] + ps * x + y * p->linesize[2],
342  p->data[3] + ps * x + y * p->linesize[3] };
343  decode_rgb_frame32(fs, planes, width, height, p->linesize);
344  } else {
345  uint8_t *planes[4] = { p->data[0] + ps * x + y * p->linesize[0],
346  p->data[1] + ps * x + y * p->linesize[1],
347  p->data[2] + ps * x + y * p->linesize[2],
348  p->data[3] + ps * x + y * p->linesize[3] };
349  decode_rgb_frame(fs, planes, width, height, p->linesize);
350  }
351  if (fs->ac != AC_GOLOMB_RICE && f->version > 2) {
352  int v;
353  get_rac(&fs->c, (uint8_t[]) { 129 });
354  v = fs->c.bytestream_end - fs->c.bytestream - 2 - 5*f->ec;
355  if (v) {
356  av_log(f->avctx, AV_LOG_ERROR, "bytestream end mismatching by %d\n", v);
357  fs->slice_damaged = 1;
358  }
359  }
360 
361  emms_c();
362 
363  ff_thread_report_progress(&f->picture, si, 0);
364 
365  return 0;
366 }
367 
368 static int read_quant_table(RangeCoder *c, int16_t *quant_table, int scale)
369 {
370  int v;
371  int i = 0;
373 
374  memset(state, 128, sizeof(state));
375 
376  for (v = 0; i < 128; v++) {
377  unsigned len = get_symbol(c, state, 0) + 1U;
378 
379  if (len > 128 - i || !len)
380  return AVERROR_INVALIDDATA;
381 
382  while (len--) {
383  quant_table[i] = scale * v;
384  i++;
385  }
386  }
387 
388  for (i = 1; i < 128; i++)
389  quant_table[256 - i] = -quant_table[i];
390  quant_table[128] = -quant_table[127];
391 
392  return 2 * v - 1;
393 }
394 
396  int16_t quant_table[MAX_CONTEXT_INPUTS][256])
397 {
398  int i;
399  int context_count = 1;
400 
401  for (i = 0; i < 5; i++) {
403  if (ret < 0)
404  return ret;
405  context_count *= ret;
406  if (context_count > 32768U) {
407  return AVERROR_INVALIDDATA;
408  }
409  }
410  return (context_count + 1) / 2;
411 }
412 
414 {
415  RangeCoder *const c = &f->c;
417  int i, j, k, ret;
418  uint8_t state2[32][CONTEXT_SIZE];
419  unsigned crc = 0;
420 
421  memset(state2, 128, sizeof(state2));
422  memset(state, 128, sizeof(state));
423 
424  ff_init_range_decoder(c, f->avctx->extradata, f->avctx->extradata_size);
425  ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8);
426 
427  f->version = get_symbol(c, state, 0);
428  if (f->version < 2) {
429  av_log(f->avctx, AV_LOG_ERROR, "Invalid version in global header\n");
430  return AVERROR_INVALIDDATA;
431  }
432  if (f->version > 2) {
433  c->bytestream_end -= 4;
434  f->micro_version = get_symbol(c, state, 0);
435  if (f->micro_version < 0)
436  return AVERROR_INVALIDDATA;
437  }
438  f->ac = get_symbol(c, state, 0);
439 
440  if (f->ac == AC_RANGE_CUSTOM_TAB) {
441  for (i = 1; i < 256; i++)
442  f->state_transition[i] = get_symbol(c, state, 1) + c->one_state[i];
443  }
444 
445  f->colorspace = get_symbol(c, state, 0); //YUV cs type
446  f->avctx->bits_per_raw_sample = get_symbol(c, state, 0);
447  f->chroma_planes = get_rac(c, state);
448  f->chroma_h_shift = get_symbol(c, state, 0);
449  f->chroma_v_shift = get_symbol(c, state, 0);
450  f->transparency = get_rac(c, state);
451  f->plane_count = 1 + (f->chroma_planes || f->version<4) + f->transparency;
452  f->num_h_slices = 1 + get_symbol(c, state, 0);
453  f->num_v_slices = 1 + get_symbol(c, state, 0);
454 
455  if (f->chroma_h_shift > 4U || f->chroma_v_shift > 4U) {
456  av_log(f->avctx, AV_LOG_ERROR, "chroma shift parameters %d %d are invalid\n",
457  f->chroma_h_shift, f->chroma_v_shift);
458  return AVERROR_INVALIDDATA;
459  }
460 
461  if (f->num_h_slices > (unsigned)f->width || !f->num_h_slices ||
462  f->num_v_slices > (unsigned)f->height || !f->num_v_slices
463  ) {
464  av_log(f->avctx, AV_LOG_ERROR, "slice count invalid\n");
465  return AVERROR_INVALIDDATA;
466  }
467 
468  f->quant_table_count = get_symbol(c, state, 0);
469  if (f->quant_table_count > (unsigned)MAX_QUANT_TABLES || !f->quant_table_count) {
470  av_log(f->avctx, AV_LOG_ERROR, "quant table count %d is invalid\n", f->quant_table_count);
471  f->quant_table_count = 0;
472  return AVERROR_INVALIDDATA;
473  }
474 
475  for (i = 0; i < f->quant_table_count; i++) {
476  f->context_count[i] = read_quant_tables(c, f->quant_tables[i]);
477  if (f->context_count[i] < 0) {
478  av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
479  return AVERROR_INVALIDDATA;
480  }
481  }
483  return ret;
484 
485  for (i = 0; i < f->quant_table_count; i++)
486  if (get_rac(c, state)) {
487  for (j = 0; j < f->context_count[i]; j++)
488  for (k = 0; k < CONTEXT_SIZE; k++) {
489  int pred = j ? f->initial_states[i][j - 1][k] : 128;
490  f->initial_states[i][j][k] =
491  (pred + get_symbol(c, state2[k], 1)) & 0xFF;
492  }
493  }
494 
495  if (f->version > 2) {
496  f->ec = get_symbol(c, state, 0);
497  if (f->micro_version > 2)
498  f->intra = get_symbol(c, state, 0);
499  }
500 
501  if (f->version > 2) {
502  unsigned v;
504  f->avctx->extradata, f->avctx->extradata_size);
505  if (v || f->avctx->extradata_size < 4) {
506  av_log(f->avctx, AV_LOG_ERROR, "CRC mismatch %X!\n", v);
507  return AVERROR_INVALIDDATA;
508  }
509  crc = AV_RB32(f->avctx->extradata + f->avctx->extradata_size - 4);
510  }
511 
512  if (f->avctx->debug & FF_DEBUG_PICT_INFO)
513  av_log(f->avctx, AV_LOG_DEBUG,
514  "global: ver:%d.%d, coder:%d, colorspace: %d bpr:%d chroma:%d(%d:%d), alpha:%d slices:%dx%d qtabs:%d ec:%d intra:%d CRC:0x%08X\n",
515  f->version, f->micro_version,
516  f->ac,
517  f->colorspace,
518  f->avctx->bits_per_raw_sample,
519  f->chroma_planes, f->chroma_h_shift, f->chroma_v_shift,
520  f->transparency,
521  f->num_h_slices, f->num_v_slices,
522  f->quant_table_count,
523  f->ec,
524  f->intra,
525  crc
526  );
527  return 0;
528 }
529 
531 {
533  int i, j, context_count = -1; //-1 to avoid warning
534  RangeCoder *const c = &f->slice_context[0]->c;
535 
536  memset(state, 128, sizeof(state));
537 
538  if (f->version < 2) {
540  unsigned v= get_symbol(c, state, 0);
541  if (v >= 2) {
542  av_log(f->avctx, AV_LOG_ERROR, "invalid version %d in ver01 header\n", v);
543  return AVERROR_INVALIDDATA;
544  }
545  f->version = v;
546  f->ac = get_symbol(c, state, 0);
547 
548  if (f->ac == AC_RANGE_CUSTOM_TAB) {
549  for (i = 1; i < 256; i++) {
550  int st = get_symbol(c, state, 1) + c->one_state[i];
551  if (st < 1 || st > 255) {
552  av_log(f->avctx, AV_LOG_ERROR, "invalid state transition %d\n", st);
553  return AVERROR_INVALIDDATA;
554  }
555  f->state_transition[i] = st;
556  }
557  }
558 
559  colorspace = get_symbol(c, state, 0); //YUV cs type
560  bits_per_raw_sample = f->version > 0 ? get_symbol(c, state, 0) : f->avctx->bits_per_raw_sample;
565  if (colorspace == 0 && f->avctx->skip_alpha)
566  transparency = 0;
567 
568  if (f->plane_count) {
569  if (colorspace != f->colorspace ||
570  bits_per_raw_sample != f->avctx->bits_per_raw_sample ||
571  chroma_planes != f->chroma_planes ||
572  chroma_h_shift != f->chroma_h_shift ||
573  chroma_v_shift != f->chroma_v_shift ||
574  transparency != f->transparency) {
575  av_log(f->avctx, AV_LOG_ERROR, "Invalid change of global parameters\n");
576  return AVERROR_INVALIDDATA;
577  }
578  }
579 
580  if (chroma_h_shift > 4U || chroma_v_shift > 4U) {
581  av_log(f->avctx, AV_LOG_ERROR, "chroma shift parameters %d %d are invalid\n",
583  return AVERROR_INVALIDDATA;
584  }
585 
586  f->colorspace = colorspace;
587  f->avctx->bits_per_raw_sample = bits_per_raw_sample;
588  f->chroma_planes = chroma_planes;
589  f->chroma_h_shift = chroma_h_shift;
590  f->chroma_v_shift = chroma_v_shift;
591  f->transparency = transparency;
592 
593  f->plane_count = 2 + f->transparency;
594  }
595 
596  if (f->colorspace == 0) {
597  if (!f->transparency && !f->chroma_planes) {
598  if (f->avctx->bits_per_raw_sample <= 8)
599  f->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
600  else if (f->avctx->bits_per_raw_sample == 9) {
601  f->packed_at_lsb = 1;
602  f->avctx->pix_fmt = AV_PIX_FMT_GRAY9;
603  } else if (f->avctx->bits_per_raw_sample == 10) {
604  f->packed_at_lsb = 1;
605  f->avctx->pix_fmt = AV_PIX_FMT_GRAY10;
606  } else if (f->avctx->bits_per_raw_sample == 12) {
607  f->packed_at_lsb = 1;
608  f->avctx->pix_fmt = AV_PIX_FMT_GRAY12;
609  } else if (f->avctx->bits_per_raw_sample == 16) {
610  f->packed_at_lsb = 1;
611  f->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
612  } else if (f->avctx->bits_per_raw_sample < 16) {
613  f->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
614  } else
615  return AVERROR(ENOSYS);
616  } else if (f->transparency && !f->chroma_planes) {
617  if (f->avctx->bits_per_raw_sample <= 8)
618  f->avctx->pix_fmt = AV_PIX_FMT_YA8;
619  else
620  return AVERROR(ENOSYS);
621  } else if (f->avctx->bits_per_raw_sample<=8 && !f->transparency) {
622  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
623  case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P; break;
624  case 0x01: f->avctx->pix_fmt = AV_PIX_FMT_YUV440P; break;
625  case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P; break;
626  case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P; break;
627  case 0x20: f->avctx->pix_fmt = AV_PIX_FMT_YUV411P; break;
628  case 0x22: f->avctx->pix_fmt = AV_PIX_FMT_YUV410P; break;
629  }
630  } else if (f->avctx->bits_per_raw_sample <= 8 && f->transparency) {
631  switch(16*f->chroma_h_shift + f->chroma_v_shift) {
632  case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P; break;
633  case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P; break;
634  case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUVA420P; break;
635  }
636  } else if (f->avctx->bits_per_raw_sample == 9 && !f->transparency) {
637  f->packed_at_lsb = 1;
638  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
639  case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P9; break;
640  case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P9; break;
641  case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P9; break;
642  }
643  } else if (f->avctx->bits_per_raw_sample == 9 && f->transparency) {
644  f->packed_at_lsb = 1;
645  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
646  case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P9; break;
647  case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P9; break;
648  case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUVA420P9; break;
649  }
650  } else if (f->avctx->bits_per_raw_sample == 10 && !f->transparency) {
651  f->packed_at_lsb = 1;
652  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
653  case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P10; break;
654  case 0x01: f->avctx->pix_fmt = AV_PIX_FMT_YUV440P10; break;
655  case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P10; break;
656  case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P10; break;
657  }
658  } else if (f->avctx->bits_per_raw_sample == 10 && f->transparency) {
659  f->packed_at_lsb = 1;
660  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
661  case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P10; break;
662  case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P10; break;
663  case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUVA420P10; break;
664  }
665  } else if (f->avctx->bits_per_raw_sample == 12 && !f->transparency) {
666  f->packed_at_lsb = 1;
667  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
668  case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P12; break;
669  case 0x01: f->avctx->pix_fmt = AV_PIX_FMT_YUV440P12; break;
670  case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P12; break;
671  case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P12; break;
672  }
673  } else if (f->avctx->bits_per_raw_sample == 14 && !f->transparency) {
674  f->packed_at_lsb = 1;
675  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
676  case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P14; break;
677  case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P14; break;
678  case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P14; break;
679  }
680  } else if (f->avctx->bits_per_raw_sample == 16 && !f->transparency){
681  f->packed_at_lsb = 1;
682  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
683  case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P16; break;
684  case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P16; break;
685  case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P16; break;
686  }
687  } else if (f->avctx->bits_per_raw_sample == 16 && f->transparency){
688  f->packed_at_lsb = 1;
689  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
690  case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P16; break;
691  case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P16; break;
692  case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUVA420P16; break;
693  }
694  }
695  } else if (f->colorspace == 1) {
696  if (f->chroma_h_shift || f->chroma_v_shift) {
697  av_log(f->avctx, AV_LOG_ERROR,
698  "chroma subsampling not supported in this colorspace\n");
699  return AVERROR(ENOSYS);
700  }
701  if ( f->avctx->bits_per_raw_sample <= 8 && !f->transparency)
702  f->avctx->pix_fmt = AV_PIX_FMT_0RGB32;
703  else if (f->avctx->bits_per_raw_sample <= 8 && f->transparency)
704  f->avctx->pix_fmt = AV_PIX_FMT_RGB32;
705  else if (f->avctx->bits_per_raw_sample == 9 && !f->transparency)
706  f->avctx->pix_fmt = AV_PIX_FMT_GBRP9;
707  else if (f->avctx->bits_per_raw_sample == 10 && !f->transparency)
708  f->avctx->pix_fmt = AV_PIX_FMT_GBRP10;
709  else if (f->avctx->bits_per_raw_sample == 10 && f->transparency)
710  f->avctx->pix_fmt = AV_PIX_FMT_GBRAP10;
711  else if (f->avctx->bits_per_raw_sample == 12 && !f->transparency)
712  f->avctx->pix_fmt = AV_PIX_FMT_GBRP12;
713  else if (f->avctx->bits_per_raw_sample == 12 && f->transparency)
714  f->avctx->pix_fmt = AV_PIX_FMT_GBRAP12;
715  else if (f->avctx->bits_per_raw_sample == 14 && !f->transparency)
716  f->avctx->pix_fmt = AV_PIX_FMT_GBRP14;
717  else if (f->avctx->bits_per_raw_sample == 16 && !f->transparency) {
718  f->avctx->pix_fmt = AV_PIX_FMT_GBRP16;
719  f->use32bit = 1;
720  }
721  else if (f->avctx->bits_per_raw_sample == 16 && f->transparency) {
722  f->avctx->pix_fmt = AV_PIX_FMT_GBRAP16;
723  f->use32bit = 1;
724  }
725  } else {
726  av_log(f->avctx, AV_LOG_ERROR, "colorspace not supported\n");
727  return AVERROR(ENOSYS);
728  }
729  if (f->avctx->pix_fmt == AV_PIX_FMT_NONE) {
730  av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
731  return AVERROR(ENOSYS);
732  }
733 
734  ff_dlog(f->avctx, "%d %d %d\n",
735  f->chroma_h_shift, f->chroma_v_shift, f->avctx->pix_fmt);
736  if (f->version < 2) {
737  context_count = read_quant_tables(c, f->quant_table);
738  if (context_count < 0) {
739  av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
740  return AVERROR_INVALIDDATA;
741  }
742  f->slice_count = f->max_slice_count;
743  } else if (f->version < 3) {
744  f->slice_count = get_symbol(c, state, 0);
745  } else {
746  const uint8_t *p = c->bytestream_end;
747  for (f->slice_count = 0;
748  f->slice_count < MAX_SLICES && 3 + 5*!!f->ec < p - c->bytestream_start;
749  f->slice_count++) {
750  int trailer = 3 + 5*!!f->ec;
751  int size = AV_RB24(p-trailer);
752  if (size + trailer > p - c->bytestream_start)
753  break;
754  p -= size + trailer;
755  }
756  }
757  if (f->slice_count > (unsigned)MAX_SLICES || f->slice_count <= 0 || f->slice_count > f->max_slice_count) {
758  av_log(f->avctx, AV_LOG_ERROR, "slice count %d is invalid (max=%d)\n", f->slice_count, f->max_slice_count);
759  return AVERROR_INVALIDDATA;
760  }
761 
762  for (j = 0; j < f->slice_count; j++) {
763  FFV1Context *fs = f->slice_context[j];
764  fs->ac = f->ac;
765  fs->packed_at_lsb = f->packed_at_lsb;
766 
767  fs->slice_damaged = 0;
768 
769  if (f->version == 2) {
770  fs->slice_x = get_symbol(c, state, 0) * f->width ;
771  fs->slice_y = get_symbol(c, state, 0) * f->height;
772  fs->slice_width = (get_symbol(c, state, 0) + 1) * f->width + fs->slice_x;
773  fs->slice_height = (get_symbol(c, state, 0) + 1) * f->height + fs->slice_y;
774 
775  fs->slice_x /= f->num_h_slices;
776  fs->slice_y /= f->num_v_slices;
777  fs->slice_width = fs->slice_width / f->num_h_slices - fs->slice_x;
778  fs->slice_height = fs->slice_height / f->num_v_slices - fs->slice_y;
779  if ((unsigned)fs->slice_width > f->width ||
780  (unsigned)fs->slice_height > f->height)
781  return AVERROR_INVALIDDATA;
782  if ( (unsigned)fs->slice_x + (uint64_t)fs->slice_width > f->width
783  || (unsigned)fs->slice_y + (uint64_t)fs->slice_height > f->height)
784  return AVERROR_INVALIDDATA;
785  }
786 
787  for (i = 0; i < f->plane_count; i++) {
788  PlaneContext *const p = &fs->plane[i];
789 
790  if (f->version == 2) {
791  int idx = get_symbol(c, state, 0);
792  if (idx >= (unsigned)f->quant_table_count) {
793  av_log(f->avctx, AV_LOG_ERROR,
794  "quant_table_index out of range\n");
795  return AVERROR_INVALIDDATA;
796  }
797  p->quant_table_index = idx;
798  memcpy(p->quant_table, f->quant_tables[idx],
799  sizeof(p->quant_table));
800  context_count = f->context_count[idx];
801  } else {
802  memcpy(p->quant_table, f->quant_table, sizeof(p->quant_table));
803  }
804 
805  if (f->version <= 2) {
807  if (p->context_count < context_count) {
808  av_freep(&p->state);
809  av_freep(&p->vlc_state);
810  }
812  }
813  }
814  }
815  return 0;
816 }
817 
819 {
821  int ret;
822 
823  if ((ret = ff_ffv1_common_init(avctx)) < 0)
824  return ret;
825 
826  if (avctx->extradata_size > 0 && (ret = read_extra_header(f)) < 0)
827  return ret;
828 
829  if ((ret = ff_ffv1_init_slice_contexts(f)) < 0)
830  return ret;
831 
833 
834  return 0;
835 }
836 
837 static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
838 {
839  uint8_t *buf = avpkt->data;
840  int buf_size = avpkt->size;
842  RangeCoder *const c = &f->slice_context[0]->c;
843  int i, ret;
844  uint8_t keystate = 128;
845  uint8_t *buf_p;
846  AVFrame *p;
847 
848  if (f->last_picture.f)
849  ff_thread_release_buffer(avctx, &f->last_picture);
850  FFSWAP(ThreadFrame, f->picture, f->last_picture);
851 
852  f->cur = p = f->picture.f;
853 
854  if (f->version < 3 && avctx->field_order > AV_FIELD_PROGRESSIVE) {
855  /* we have interlaced material flagged in container */
856  p->interlaced_frame = 1;
858  p->top_field_first = 1;
859  }
860 
861  f->avctx = avctx;
862  ff_init_range_decoder(c, buf, buf_size);
863  ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8);
864 
865  p->pict_type = AV_PICTURE_TYPE_I; //FIXME I vs. P
866  if (get_rac(c, &keystate)) {
867  p->key_frame = 1;
868  f->key_frame_ok = 0;
869  if ((ret = read_header(f)) < 0)
870  return ret;
871  f->key_frame_ok = 1;
872  } else {
873  if (!f->key_frame_ok) {
875  "Cannot decode non-keyframe without valid keyframe\n");
876  return AVERROR_INVALIDDATA;
877  }
878  p->key_frame = 0;
879  }
880 
881  if ((ret = ff_thread_get_buffer(avctx, &f->picture, AV_GET_BUFFER_FLAG_REF)) < 0)
882  return ret;
883 
885  av_log(avctx, AV_LOG_DEBUG, "ver:%d keyframe:%d coder:%d ec:%d slices:%d bps:%d\n",
886  f->version, p->key_frame, f->ac, f->ec, f->slice_count, f->avctx->bits_per_raw_sample);
887 
889 
890  buf_p = buf + buf_size;
891  for (i = f->slice_count - 1; i >= 0; i--) {
892  FFV1Context *fs = f->slice_context[i];
893  int trailer = 3 + 5*!!f->ec;
894  int v;
895 
896  if (i || f->version > 2) {
897  if (trailer > buf_p - buf) v = INT_MAX;
898  else v = AV_RB24(buf_p-trailer) + trailer;
899  } else v = buf_p - c->bytestream_start;
900  if (buf_p - c->bytestream_start < v) {
901  av_log(avctx, AV_LOG_ERROR, "Slice pointer chain broken\n");
902  ff_thread_report_progress(&f->picture, INT_MAX, 0);
903  return AVERROR_INVALIDDATA;
904  }
905  buf_p -= v;
906 
907  if (f->ec) {
908  unsigned crc = av_crc(av_crc_get_table(AV_CRC_32_IEEE), 0, buf_p, v);
909  if (crc) {
910  int64_t ts = avpkt->pts != AV_NOPTS_VALUE ? avpkt->pts : avpkt->dts;
911  av_log(f->avctx, AV_LOG_ERROR, "slice CRC mismatch %X!", crc);
912  if (ts != AV_NOPTS_VALUE && avctx->pkt_timebase.num) {
913  av_log(f->avctx, AV_LOG_ERROR, "at %f seconds\n", ts*av_q2d(avctx->pkt_timebase));
914  } else if (ts != AV_NOPTS_VALUE) {
915  av_log(f->avctx, AV_LOG_ERROR, "at %"PRId64"\n", ts);
916  } else {
917  av_log(f->avctx, AV_LOG_ERROR, "\n");
918  }
919  fs->slice_damaged = 1;
920  }
921  if (avctx->debug & FF_DEBUG_PICT_INFO) {
922  av_log(avctx, AV_LOG_DEBUG, "slice %d, CRC: 0x%08"PRIX32"\n", i, AV_RB32(buf_p + v - 4));
923  }
924  }
925 
926  if (i) {
927  ff_init_range_decoder(&fs->c, buf_p, v);
928  } else
929  fs->c.bytestream_end = buf_p + v;
930 
931  fs->avctx = avctx;
932  fs->cur = p;
933  }
934 
936  decode_slice,
937  &f->slice_context[0],
938  NULL,
939  f->slice_count,
940  sizeof(void*));
941 
942  for (i = f->slice_count - 1; i >= 0; i--) {
943  FFV1Context *fs = f->slice_context[i];
944  int j;
945  if (fs->slice_damaged && f->last_picture.f->data[0]) {
947  const uint8_t *src[4];
948  uint8_t *dst[4];
949  ff_thread_await_progress(&f->last_picture, INT_MAX, 0);
950  for (j = 0; j < desc->nb_components; j++) {
951  int pixshift = desc->comp[j].depth > 8;
952  int sh = (j == 1 || j == 2) ? f->chroma_h_shift : 0;
953  int sv = (j == 1 || j == 2) ? f->chroma_v_shift : 0;
954  dst[j] = p->data[j] + p->linesize[j] *
955  (fs->slice_y >> sv) + ((fs->slice_x >> sh) << pixshift);
956  src[j] = f->last_picture.f->data[j] + f->last_picture.f->linesize[j] *
957  (fs->slice_y >> sv) + ((fs->slice_x >> sh) << pixshift);
958 
959  }
960  if (desc->flags & AV_PIX_FMT_FLAG_PAL ||
961  desc->flags & FF_PSEUDOPAL) {
962  dst[1] = p->data[1];
963  src[1] = f->last_picture.f->data[1];
964  }
965  av_image_copy(dst, p->linesize, src,
966  f->last_picture.f->linesize,
967  avctx->pix_fmt,
968  fs->slice_width,
969  fs->slice_height);
970  }
971  }
972  ff_thread_report_progress(&f->picture, INT_MAX, 0);
973 
974  f->picture_number++;
975 
976  if (f->last_picture.f)
977  ff_thread_release_buffer(avctx, &f->last_picture);
978  f->cur = NULL;
979  if ((ret = av_frame_ref(data, f->picture.f)) < 0)
980  return ret;
981 
982  *got_frame = 1;
983 
984  return buf_size;
985 }
986 
987 #if HAVE_THREADS
989 {
991  int i, ret;
992 
993  f->picture.f = NULL;
994  f->last_picture.f = NULL;
995  f->sample_buffer = NULL;
996  f->max_slice_count = 0;
997  f->slice_count = 0;
998 
999  for (i = 0; i < f->quant_table_count; i++) {
1000  av_assert0(f->version > 1);
1001  f->initial_states[i] = av_memdup(f->initial_states[i],
1002  f->context_count[i] * sizeof(*f->initial_states[i]));
1003  }
1004 
1005  f->picture.f = av_frame_alloc();
1006  f->last_picture.f = av_frame_alloc();
1007 
1008  if ((ret = ff_ffv1_init_slice_contexts(f)) < 0)
1009  return ret;
1010 
1011  return 0;
1012 }
1013 #endif
1014 
1015 static void copy_fields(FFV1Context *fsdst, FFV1Context *fssrc, FFV1Context *fsrc)
1016 {
1017  fsdst->version = fsrc->version;
1018  fsdst->micro_version = fsrc->micro_version;
1019  fsdst->chroma_planes = fsrc->chroma_planes;
1022  fsdst->transparency = fsrc->transparency;
1023  fsdst->plane_count = fsrc->plane_count;
1024  fsdst->ac = fsrc->ac;
1025  fsdst->colorspace = fsrc->colorspace;
1026 
1027  fsdst->ec = fsrc->ec;
1028  fsdst->intra = fsrc->intra;
1029  fsdst->slice_damaged = fssrc->slice_damaged;
1030  fsdst->key_frame_ok = fsrc->key_frame_ok;
1031 
1033  fsdst->packed_at_lsb = fsrc->packed_at_lsb;
1034  fsdst->slice_count = fsrc->slice_count;
1035  if (fsrc->version<3){
1036  fsdst->slice_x = fssrc->slice_x;
1037  fsdst->slice_y = fssrc->slice_y;
1038  fsdst->slice_width = fssrc->slice_width;
1039  fsdst->slice_height = fssrc->slice_height;
1040  }
1041 }
1042 
1043 #if HAVE_THREADS
1044 static int update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
1045 {
1046  FFV1Context *fsrc = src->priv_data;
1047  FFV1Context *fdst = dst->priv_data;
1048  int i, ret;
1049 
1050  if (dst == src)
1051  return 0;
1052 
1053  {
1057  memcpy(initial_states, fdst->initial_states, sizeof(fdst->initial_states));
1058  memcpy(slice_context, fdst->slice_context , sizeof(fdst->slice_context));
1059 
1060  memcpy(fdst, fsrc, sizeof(*fdst));
1061  memcpy(fdst->initial_states, initial_states, sizeof(fdst->initial_states));
1062  memcpy(fdst->slice_context, slice_context , sizeof(fdst->slice_context));
1063  fdst->picture = picture;
1064  fdst->last_picture = last_picture;
1065  for (i = 0; i<fdst->num_h_slices * fdst->num_v_slices; i++) {
1066  FFV1Context *fssrc = fsrc->slice_context[i];
1067  FFV1Context *fsdst = fdst->slice_context[i];
1068  copy_fields(fsdst, fssrc, fsrc);
1069  }
1070  av_assert0(!fdst->plane[0].state);
1071  av_assert0(!fdst->sample_buffer);
1072  }
1073 
1075 
1076 
1077  ff_thread_release_buffer(dst, &fdst->picture);
1078  if (fsrc->picture.f->data[0]) {
1079  if ((ret = ff_thread_ref_frame(&fdst->picture, &fsrc->picture)) < 0)
1080  return ret;
1081  }
1082 
1083  fdst->fsrc = fsrc;
1084 
1085  return 0;
1086 }
1087 #endif
1088 
1090  .name = "ffv1",
1091  .long_name = NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
1092  .type = AVMEDIA_TYPE_VIDEO,
1093  .id = AV_CODEC_ID_FFV1,
1094  .priv_data_size = sizeof(FFV1Context),
1095  .init = decode_init,
1096  .close = ff_ffv1_close,
1097  .decode = decode_frame,
1099  .update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context),
1100  .capabilities = AV_CODEC_CAP_DR1 /*| AV_CODEC_CAP_DRAW_HORIZ_BAND*/ |
1102  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP
1103 };
AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:430
read_extra_header
static int read_extra_header(FFV1Context *f)
Definition: ffv1dec.c:413
AV_PIX_FMT_GBRAP16
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:409
AVCodec
AVCodec.
Definition: avcodec.h:3481
stride
int stride
Definition: mace.c:144
FFV1Context::chroma_v_shift
int chroma_v_shift
Definition: ffv1.h:91
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
FFV1Context::key_frame_ok
int key_frame_ok
Definition: ffv1.h:119
AV_FIELD_PROGRESSIVE
@ AV_FIELD_PROGRESSIVE
Definition: avcodec.h:1545
update_vlc_state
static void update_vlc_state(VlcState *const state, const int v)
Definition: ffv1.h:162
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
decode_slice
static int decode_slice(AVCodecContext *c, void *arg)
Definition: ffv1dec.c:246
opt.h
AV_PIX_FMT_YA8
@ AV_PIX_FMT_YA8
8 bits gray, 8 bits alpha
Definition: pixfmt.h:143
FFSWAP
#define FFSWAP(type, a, b)
Definition: common.h:99
is_input_end
static int is_input_end(FFV1Context *s)
Definition: ffv1dec.c:96
FFV1Context::context_count
int context_count[MAX_QUANT_TABLES]
Definition: ffv1.h:106
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
MAX_OVERREAD
#define MAX_OVERREAD
Definition: lagarithrac.h:51
FFV1Context::ec
int ec
Definition: ffv1.h:116
FFV1Context::gb
GetBitContext gb
Definition: ffv1.h:83
AV_PIX_FMT_YUVA422P9
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:422
get_sr_golomb
static int get_sr_golomb(GetBitContext *gb, int k, int limit, int esc_len)
read signed golomb rice code (ffv1).
Definition: golomb.h:529
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
pixdesc.h
ff_ffv1_common_init
av_cold int ff_ffv1_common_init(AVCodecContext *avctx)
Definition: ffv1.c:42
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:429
w
uint8_t w
Definition: llviddspenc.c:38
internal.h
FFV1Context::last_picture
ThreadFrame last_picture
Definition: ffv1.h:96
AVPacket::data
uint8_t * data
Definition: avcodec.h:1477
AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:424
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:2222
FFV1Context::slice_x
int slice_x
Definition: ffv1.h:135
AVFrame::top_field_first
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:447
last_picture
enum AVPictureType last_picture
Definition: movenc.c:68
data
const char data[16]
Definition: mxf.c:91
ff_ffv1_decoder
AVCodec ff_ffv1_decoder
Definition: ffv1dec.c:1089
rangecoder.h
AVComponentDescriptor::step
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:41
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:387
PlaneContext::state
uint8_t(* state)[CONTEXT_SIZE]
Definition: ffv1.h:72
FFV1Context::num_h_slices
int num_h_slices
Definition: ffv1.h:132
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
FFV1Context::slice_context
struct FFV1Context * slice_context[MAX_SLICES]
Definition: ffv1.h:128
read_quant_table
static int read_quant_table(RangeCoder *c, int16_t *quant_table, int scale)
Definition: ffv1dec.c:368
ff_ffv1_init_slice_state
av_cold int ff_ffv1_init_slice_state(FFV1Context *f, FFV1Context *fs)
Definition: ffv1.c:67
AC_RANGE_CUSTOM_TAB
#define AC_RANGE_CUSTOM_TAB
Definition: ffv1.h:58
AV_PIX_FMT_YUVA422P10
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:425
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
ThreadFrame::f
AVFrame * f
Definition: thread.h:35
FFV1Context::chroma_h_shift
int chroma_h_shift
Definition: ffv1.h:91
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:2651
AV_PIX_FMT_GRAY9
#define AV_PIX_FMT_GRAY9
Definition: pixfmt.h:367
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
decode_frame
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: ffv1dec.c:837
av_memdup
void * av_memdup(const void *p, size_t size)
Duplicate a buffer with av_malloc().
Definition: mem.c:283
crc.h
golomb.h
exp golomb vlc stuff
AV_PIX_FMT_YUVA420P9
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:421
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:405
U
#define U(x)
Definition: vp56_arith.h:37
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:403
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:431
GetBitContext
Definition: get_bits.h:61
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:385
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:373
av_noinline
#define av_noinline
Definition: attributes.h:66
CONTEXT_SIZE
#define CONTEXT_SIZE
Definition: ffv1.h:51
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:371
get_symbol_inline
static av_flatten int get_symbol_inline(RangeCoder *c, uint8_t *state, int is_signed)
Definition: ffv1dec.c:42
FFV1Context::chroma_planes
int chroma_planes
Definition: ffv1.h:90
PlaneContext::context_count
int context_count
Definition: ffv1.h:71
AVRational::num
int num
Numerator.
Definition: rational.h:59
src
#define src
Definition: vp8dsp.c:254
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:390
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
state
static struct @313 state
buf
void * buf
Definition: avisynth_c.h:766
av_cold
#define av_cold
Definition: attributes.h:84
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:399
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:557
FFV1Context::bits_per_raw_sample
int bits_per_raw_sample
Definition: ffv1.h:122
AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:407
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:42
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:1667
ff_ffv1_clear_slice_state
void ff_ffv1_clear_slice_state(FFV1Context *f, FFV1Context *fs)
Definition: ffv1.c:182
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:408
FFV1Context::slice_count
int slice_count
Definition: ffv1.h:129
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:400
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1176
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
FFV1Context::plane
PlaneContext plane[MAX_PLANES]
Definition: ffv1.h:103
FFV1Context::max_slice_count
int max_slice_count
Definition: ffv1.h:130
FFV1Context::slice_damaged
int slice_damaged
Definition: ffv1.h:118
bits
uint8_t bits
Definition: vp3data.h:202
FFV1Context::intra
int intra
Definition: ffv1.h:117
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
FFV1Context::fsrc
struct FFV1Context * fsrc
Definition: ffv1.h:97
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:384
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
read_quant_tables
static int read_quant_tables(RangeCoder *c, int16_t quant_table[MAX_CONTEXT_INPUTS][256])
Definition: ffv1dec.c:395
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:398
get_bits.h
fold
static av_always_inline int fold(int diff, int bits)
Definition: ffv1.h:151
FFV1Context::ac
int ac
1=range coder <-> 0=golomb rice
Definition: ffv1.h:101
get_vlc_symbol
static int get_vlc_symbol(GetBitContext *gb, VlcState *const state, int bits)
Definition: ffv1dec.c:71
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
FFV1Context::plane_count
int plane_count
Definition: ffv1.h:100
f
#define f(width, name)
Definition: cbs_vp9.c:255
decode_slice_header
static int decode_slice_header(FFV1Context *f, FFV1Context *fs)
Definition: ffv1dec.c:167
arg
const char * arg
Definition: jacosubdec.c:66
AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:368
if
if(ret)
Definition: filter_design.txt:179
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:1037
FFV1Context::slice_height
int slice_height
Definition: ffv1.h:134
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:406
copy_fields
static void copy_fields(FFV1Context *fsdst, FFV1Context *fssrc, FFV1Context *fsrc)
Definition: ffv1dec.c:1015
read_header
static int read_header(FFV1Context *f)
Definition: ffv1dec.c:530
get_symbol
static av_noinline int get_symbol(RangeCoder *c, uint8_t *state, int is_signed)
Definition: ffv1dec.c:66
NULL
#define NULL
Definition: coverity.c:32
PlaneContext::vlc_state
VlcState * vlc_state
Definition: ffv1.h:73
AC_GOLOMB_RICE
#define AC_GOLOMB_RICE
Definition: ffv1.h:56
fs
#define fs(width, name, subs,...)
Definition: cbs_vp9.c:259
FFV1Context::num_v_slices
int num_v_slices
Definition: ffv1.h:131
FFV1Context::colorspace
int colorspace
Definition: ffv1.h:110
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1600
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
AV_PIX_FMT_YUV440P10
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:389
mathops.h
timer.h
PlaneContext
Definition: ffv1.h:68
ONLY_IF_THREADS_ENABLED
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:227
ff_thread_get_buffer
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:964
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:388
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
update_thread_context
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread. If the codec allocates writable tables in its init()
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:402
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
VlcState
Definition: ffv1.h:61
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:29
FFV1Context::slice_width
int slice_width
Definition: ffv1.h:133
ff_init_range_decoder
av_cold void ff_init_range_decoder(RangeCoder *c, const uint8_t *buf, int buf_size)
Definition: rangecoder.c:53
AV_CODEC_ID_FFV1
@ AV_CODEC_ID_FFV1
Definition: avcodec.h:251
desc
const char * desc
Definition: nvenc.c:68
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:378
planes
static const struct @314 planes[]
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:981
AVPacket::size
int size
Definition: avcodec.h:1478
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
AVCodecInternal::allocate_progress
int allocate_progress
Whether to allocate progress for frame threading.
Definition: internal.h:151
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:392
ff_ffv1_close
av_cold int ff_ffv1_close(AVCodecContext *avctx)
Definition: ffv1.c:210
AVCodecContext::pkt_timebase
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:3119
sample
#define sample
Definition: flacdsp_template.c:44
init_thread_copy
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have add an init_thread_copy() which re-allocates them for other threads. Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little speed gain at this point but it should work. If there are inter-frame dependencies
FFV1Context::picture
ThreadFrame picture
Definition: ffv1.h:96
size
int size
Definition: twinvq_data.h:11134
ff_build_rac_states
void ff_build_rac_states(RangeCoder *c, int factor, int max_p)
Definition: rangecoder.c:68
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:92
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:394
FFV1Context::sample_buffer
int16_t * sample_buffer
Definition: ffv1.h:111
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: avcodec.h:1476
height
#define height
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:360
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
av_crc_get_table
const AVCRC * av_crc_get_table(AVCRCId crc_id)
Get an initialized standard CRC table.
Definition: crc.c:374
AV_FIELD_TT
@ AV_FIELD_TT
Definition: avcodec.h:1546
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: avcodec.h:1041
AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:426
quant_table
static float quant_table[96]
Definition: binkaudio.c:43
PlaneContext::quant_table_index
int quant_table_index
Definition: ffv1.h:70
FFV1Context::initial_states
uint8_t(*[MAX_QUANT_TABLES] initial_states)[32]
Definition: ffv1.h:108
AVFrame::interlaced_frame
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:442
decode_plane
static int decode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index, int pixel_stride)
Definition: ffv1dec.c:119
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1470
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
av_flatten
#define av_flatten
Definition: attributes.h:90
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:404
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
FFV1Context::slice_y
int slice_y
Definition: ffv1.h:136
ffv1.h
uint8_t
uint8_t
Definition: audio_convert.c:194
AVCodec::name
const char * name
Name of the codec implementation.
Definition: avcodec.h:3488
len
int len
Definition: vorbis_enc_data.h:452
get_rac
static int get_rac(RangeCoder *c, uint8_t *const state)
Definition: rangecoder.h:136
AV_CRC_32_IEEE
@ AV_CRC_32_IEEE
Definition: crc.h:53
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1775
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:386
MAX_CONTEXT_INPUTS
#define MAX_CONTEXT_INPUTS
Definition: ffv1.h:54
FFV1Context::packed_at_lsb
int packed_at_lsb
Definition: ffv1.h:123
avcodec.h
FFV1Context::avctx
AVCodecContext * avctx
Definition: ffv1.h:81
ret
ret
Definition: filter_design.txt:187
pred
static const float pred[4]
Definition: siprdata.h:259
AV_PIX_FMT_0RGB32
#define AV_PIX_FMT_0RGB32
Definition: pixfmt.h:364
AV_PIX_FMT_YUVA444P9
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:423
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:391
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:396
ff_ffv1_allocate_initial_states
int ff_ffv1_allocate_initial_states(FFV1Context *f)
Definition: ffv1.c:167
AVCodecContext
main external API structure.
Definition: avcodec.h:1565
MAX_SLICES
#define MAX_SLICES
Definition: dxva2_hevc.c:29
ThreadFrame
Definition: thread.h:34
AVCodecContext::execute
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:2864
av_image_copy
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
Definition: imgutils.c:387
decode_init
static av_cold int decode_init(AVCodecContext *avctx)
Definition: ffv1dec.c:818
av_crc
uint32_t av_crc(const AVCRC *ctx, uint32_t crc, const uint8_t *buffer, size_t length)
Calculate the CRC of a block.
Definition: crc.c:392
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
temp
else temp
Definition: vf_mcdeint.c:256
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:2650
ff_thread_ref_frame
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
Definition: utils.c:1861
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
MAX_QUANT_TABLES
#define MAX_QUANT_TABLES
Definition: ffv1.h:53
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
FFV1Context
Definition: ffv1.h:79
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
FFV1Context::transparency
int transparency
Definition: ffv1.h:92
AVPacket
This structure stores compressed data.
Definition: avcodec.h:1454
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:1592
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AV_FIELD_TB
@ AV_FIELD_TB
Definition: avcodec.h:1548
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
ffv1dec_template.c
ff_ffv1_init_slice_contexts
av_cold int ff_ffv1_init_slice_contexts(FFV1Context *f)
Definition: ffv1.c:117
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
FF_PSEUDOPAL
#define FF_PSEUDOPAL
Definition: internal.h:369
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
FFV1Context::micro_version
int micro_version
Definition: ffv1.h:88
AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:393
h
h
Definition: vp9dsp_template.c:2038
RangeCoder
Definition: mss3.c:61
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:397
av_image_check_sar
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
Definition: imgutils.c:287
AV_PIX_FMT_GRAY12
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:369
PlaneContext::quant_table
int16_t quant_table[MAX_CONTEXT_INPUTS][256]
Definition: ffv1.h:69
AV_RB24
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
Definition: bytestream.h:93
AV_PIX_FMT_FLAG_PAL
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:132
FFV1Context::version
int version
Definition: ffv1.h:87
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:176
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:395
ff_thread_release_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have add an so the codec calls ff_thread_report set AVCodecInternal allocate_progress The frames must then be freed with ff_thread_release_buffer(). Otherwise leave it at zero and decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere