FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
h264.c
Go to the documentation of this file.
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... decoder
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * H.264 / AVC / MPEG4 part10 codec.
25  * @author Michael Niedermayer <michaelni@gmx.at>
26  */
27 
28 #define UNCHECKED_BITSTREAM_READER 1
29 
30 #include "libavutil/avassert.h"
31 #include "libavutil/display.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/opt.h"
34 #include "libavutil/stereo3d.h"
35 #include "libavutil/timer.h"
36 #include "internal.h"
37 #include "cabac.h"
38 #include "cabac_functions.h"
39 #include "error_resilience.h"
40 #include "avcodec.h"
41 #include "h264.h"
42 #include "h264data.h"
43 #include "h264chroma.h"
44 #include "h264_mvpred.h"
45 #include "golomb.h"
46 #include "mathops.h"
47 #include "me_cmp.h"
48 #include "mpegutils.h"
49 #include "rectangle.h"
50 #include "svq3.h"
51 #include "thread.h"
52 #include "vdpau_compat.h"
53 
54 const uint16_t ff_h264_mb_sizes[4] = { 256, 384, 512, 768 };
55 
57 {
58  H264Context *h = avctx->priv_data;
59  return h ? h->sps.num_reorder_frames : 0;
60 }
61 
62 static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
63  int (*mv)[2][4][2],
64  int mb_x, int mb_y, int mb_intra, int mb_skipped)
65 {
66  H264Context *h = opaque;
67  H264SliceContext *sl = &h->slice_ctx[0];
68 
69  sl->mb_x = mb_x;
70  sl->mb_y = mb_y;
71  sl->mb_xy = mb_x + mb_y * h->mb_stride;
72  memset(sl->non_zero_count_cache, 0, sizeof(sl->non_zero_count_cache));
73  av_assert1(ref >= 0);
74  /* FIXME: It is possible albeit uncommon that slice references
75  * differ between slices. We take the easy approach and ignore
76  * it for now. If this turns out to have any relevance in
77  * practice then correct remapping should be added. */
78  if (ref >= sl->ref_count[0])
79  ref = 0;
80  if (!sl->ref_list[0][ref].data[0]) {
81  av_log(h->avctx, AV_LOG_DEBUG, "Reference not available for error concealing\n");
82  ref = 0;
83  }
84  if ((sl->ref_list[0][ref].reference&3) != 3) {
85  av_log(h->avctx, AV_LOG_DEBUG, "Reference invalid\n");
86  return;
87  }
88  fill_rectangle(&h->cur_pic.ref_index[0][4 * sl->mb_xy],
89  2, 2, 2, ref, 1);
90  fill_rectangle(&sl->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
91  fill_rectangle(sl->mv_cache[0][scan8[0]], 4, 4, 8,
92  pack16to32((*mv)[0][0][0], (*mv)[0][0][1]), 4);
93  sl->mb_mbaff =
94  sl->mb_field_decoding_flag = 0;
96 }
97 
99  int y, int height)
100 {
101  AVCodecContext *avctx = h->avctx;
102  const AVFrame *src = h->cur_pic.f;
103  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
104  int vshift = desc->log2_chroma_h;
105  const int field_pic = h->picture_structure != PICT_FRAME;
106  if (field_pic) {
107  height <<= 1;
108  y <<= 1;
109  }
110 
111  height = FFMIN(height, avctx->height - y);
112 
113  if (field_pic && h->first_field && !(avctx->slice_flags & SLICE_FLAG_ALLOW_FIELD))
114  return;
115 
116  if (avctx->draw_horiz_band) {
118  int i;
119 
120  offset[0] = y * src->linesize[0];
121  offset[1] =
122  offset[2] = (y >> vshift) * src->linesize[1];
123  for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
124  offset[i] = 0;
125 
126  emms_c();
127 
128  avctx->draw_horiz_band(avctx, src, offset,
129  y, h->picture_structure, height);
130  }
131 }
132 
133 /**
134  * Check if the top & left blocks are available if needed and
135  * change the dc mode so it only uses the available blocks.
136  */
138 {
139  static const int8_t top[12] = {
140  -1, 0, LEFT_DC_PRED, -1, -1, -1, -1, -1, 0
141  };
142  static const int8_t left[12] = {
143  0, -1, TOP_DC_PRED, 0, -1, -1, -1, 0, -1, DC_128_PRED
144  };
145  int i;
146 
147  if (!(sl->top_samples_available & 0x8000)) {
148  for (i = 0; i < 4; i++) {
149  int status = top[sl->intra4x4_pred_mode_cache[scan8[0] + i]];
150  if (status < 0) {
152  "top block unavailable for requested intra4x4 mode %d at %d %d\n",
153  status, sl->mb_x, sl->mb_y);
154  return AVERROR_INVALIDDATA;
155  } else if (status) {
156  sl->intra4x4_pred_mode_cache[scan8[0] + i] = status;
157  }
158  }
159  }
160 
161  if ((sl->left_samples_available & 0x8888) != 0x8888) {
162  static const int mask[4] = { 0x8000, 0x2000, 0x80, 0x20 };
163  for (i = 0; i < 4; i++)
164  if (!(sl->left_samples_available & mask[i])) {
165  int status = left[sl->intra4x4_pred_mode_cache[scan8[0] + 8 * i]];
166  if (status < 0) {
168  "left block unavailable for requested intra4x4 mode %d at %d %d\n",
169  status, sl->mb_x, sl->mb_y);
170  return AVERROR_INVALIDDATA;
171  } else if (status) {
172  sl->intra4x4_pred_mode_cache[scan8[0] + 8 * i] = status;
173  }
174  }
175  }
176 
177  return 0;
178 } // FIXME cleanup like ff_h264_check_intra_pred_mode
179 
180 /**
181  * Check if the top & left blocks are available if needed and
182  * change the dc mode so it only uses the available blocks.
183  */
185  int mode, int is_chroma)
186 {
187  static const int8_t top[4] = { LEFT_DC_PRED8x8, 1, -1, -1 };
188  static const int8_t left[5] = { TOP_DC_PRED8x8, -1, 2, -1, DC_128_PRED8x8 };
189 
190  if (mode > 3U) {
192  "out of range intra chroma pred mode at %d %d\n",
193  sl->mb_x, sl->mb_y);
194  return AVERROR_INVALIDDATA;
195  }
196 
197  if (!(sl->top_samples_available & 0x8000)) {
198  mode = top[mode];
199  if (mode < 0) {
201  "top block unavailable for requested intra mode at %d %d\n",
202  sl->mb_x, sl->mb_y);
203  return AVERROR_INVALIDDATA;
204  }
205  }
206 
207  if ((sl->left_samples_available & 0x8080) != 0x8080) {
208  mode = left[mode];
209  if (mode < 0) {
211  "left block unavailable for requested intra mode at %d %d\n",
212  sl->mb_x, sl->mb_y);
213  return AVERROR_INVALIDDATA;
214  }
215  if (is_chroma && (sl->left_samples_available & 0x8080)) {
216  // mad cow disease mode, aka MBAFF + constrained_intra_pred
217  mode = ALZHEIMER_DC_L0T_PRED8x8 +
218  (!(sl->left_samples_available & 0x8000)) +
219  2 * (mode == DC_128_PRED8x8);
220  }
221  }
222 
223  return mode;
224 }
225 
227  const uint8_t *src,
228  int *dst_length, int *consumed, int length)
229 {
230  int i, si, di;
231  uint8_t *dst;
232 
233  // src[0]&0x80; // forbidden bit
234  h->nal_ref_idc = src[0] >> 5;
235  h->nal_unit_type = src[0] & 0x1F;
236 
237  src++;
238  length--;
239 
240 #define STARTCODE_TEST \
241  if (i + 2 < length && src[i + 1] == 0 && src[i + 2] <= 3) { \
242  if (src[i + 2] != 3 && src[i + 2] != 0) { \
243  /* startcode, so we must be past the end */ \
244  length = i; \
245  } \
246  break; \
247  }
248 
249 #if HAVE_FAST_UNALIGNED
250 #define FIND_FIRST_ZERO \
251  if (i > 0 && !src[i]) \
252  i--; \
253  while (src[i]) \
254  i++
255 
256 #if HAVE_FAST_64BIT
257  for (i = 0; i + 1 < length; i += 9) {
258  if (!((~AV_RN64A(src + i) &
259  (AV_RN64A(src + i) - 0x0100010001000101ULL)) &
260  0x8000800080008080ULL))
261  continue;
262  FIND_FIRST_ZERO;
264  i -= 7;
265  }
266 #else
267  for (i = 0; i + 1 < length; i += 5) {
268  if (!((~AV_RN32A(src + i) &
269  (AV_RN32A(src + i) - 0x01000101U)) &
270  0x80008080U))
271  continue;
272  FIND_FIRST_ZERO;
274  i -= 3;
275  }
276 #endif
277 #else
278  for (i = 0; i + 1 < length; i += 2) {
279  if (src[i])
280  continue;
281  if (i > 0 && src[i - 1] == 0)
282  i--;
284  }
285 #endif
286 
288  dst = sl->rbsp_buffer;
289 
290  if (!dst)
291  return NULL;
292 
293  if(i>=length-1){ //no escaped 0
294  *dst_length= length;
295  *consumed= length+1; //+1 for the header
296  if(h->avctx->flags2 & AV_CODEC_FLAG2_FAST){
297  return src;
298  }else{
299  memcpy(dst, src, length);
300  return dst;
301  }
302  }
303 
304  memcpy(dst, src, i);
305  si = di = i;
306  while (si + 2 < length) {
307  // remove escapes (very rare 1:2^22)
308  if (src[si + 2] > 3) {
309  dst[di++] = src[si++];
310  dst[di++] = src[si++];
311  } else if (src[si] == 0 && src[si + 1] == 0 && src[si + 2] != 0) {
312  if (src[si + 2] == 3) { // escape
313  dst[di++] = 0;
314  dst[di++] = 0;
315  si += 3;
316  continue;
317  } else // next start code
318  goto nsc;
319  }
320 
321  dst[di++] = src[si++];
322  }
323  while (si < length)
324  dst[di++] = src[si++];
325 
326 nsc:
327  memset(dst + di, 0, AV_INPUT_BUFFER_PADDING_SIZE);
328 
329  *dst_length = di;
330  *consumed = si + 1; // +1 for the header
331  /* FIXME store exact number of bits in the getbitcontext
332  * (it is needed for decoding) */
333  return dst;
334 }
335 
336 /**
337  * Identify the exact end of the bitstream
338  * @return the length of the trailing, or 0 if damaged
339  */
341 {
342  int v = *src;
343  int r;
344 
345  ff_tlog(h->avctx, "rbsp trailing %X\n", v);
346 
347  for (r = 1; r < 9; r++) {
348  if (v & 1)
349  return r;
350  v >>= 1;
351  }
352  return 0;
353 }
354 
356 {
357  int i;
358 
361  av_freep(&h->cbp_table);
362  av_freep(&h->mvd_table[0]);
363  av_freep(&h->mvd_table[1]);
364  av_freep(&h->direct_table);
367  h->slice_table = NULL;
368  av_freep(&h->list_counts);
369 
370  av_freep(&h->mb2b_xy);
371  av_freep(&h->mb2br_xy);
372 
377 
378  for (i = 0; i < h->nb_slice_ctx; i++) {
379  H264SliceContext *sl = &h->slice_ctx[i];
380 
381  av_freep(&sl->dc_val_base);
382  av_freep(&sl->er.mb_index2xy);
384  av_freep(&sl->er.er_temp_buffer);
385 
388  av_freep(&sl->top_borders[0]);
389  av_freep(&sl->top_borders[1]);
390 
393  sl->top_borders_allocated[0] = 0;
394  sl->top_borders_allocated[1] = 0;
395  }
396 }
397 
399 {
400  const int big_mb_num = h->mb_stride * (h->mb_height + 1);
401  const int row_mb_num = 2*h->mb_stride*FFMAX(h->avctx->thread_count, 1);
402  int x, y;
403 
405  row_mb_num, 8 * sizeof(uint8_t), fail)
407 
409  big_mb_num * 48 * sizeof(uint8_t), fail)
411  (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base), fail)
413  big_mb_num * sizeof(uint16_t), fail)
415  big_mb_num * sizeof(uint8_t), fail)
417  row_mb_num, 16 * sizeof(uint8_t), fail);
419  row_mb_num, 16 * sizeof(uint8_t), fail);
420  h->slice_ctx[0].mvd_table[0] = h->mvd_table[0];
421  h->slice_ctx[0].mvd_table[1] = h->mvd_table[1];
422 
424  4 * big_mb_num * sizeof(uint8_t), fail);
426  big_mb_num * sizeof(uint8_t), fail)
427 
428  memset(h->slice_table_base, -1,
429  (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base));
430  h->slice_table = h->slice_table_base + h->mb_stride * 2 + 1;
431 
433  big_mb_num * sizeof(uint32_t), fail);
435  big_mb_num * sizeof(uint32_t), fail);
436  for (y = 0; y < h->mb_height; y++)
437  for (x = 0; x < h->mb_width; x++) {
438  const int mb_xy = x + y * h->mb_stride;
439  const int b_xy = 4 * x + 4 * y * h->b_stride;
440 
441  h->mb2b_xy[mb_xy] = b_xy;
442  h->mb2br_xy[mb_xy] = 8 * (FMO ? mb_xy : (mb_xy % (2 * h->mb_stride)));
443  }
444 
445  if (!h->dequant4_coeff[0])
447 
448  return 0;
449 
450 fail:
452  return AVERROR(ENOMEM);
453 }
454 
455 /**
456  * Init context
457  * Allocate buffers which are not shared amongst multiple threads.
458  */
460 {
461  ERContext *er = &sl->er;
462  int mb_array_size = h->mb_height * h->mb_stride;
463  int y_size = (2 * h->mb_width + 1) * (2 * h->mb_height + 1);
464  int c_size = h->mb_stride * (h->mb_height + 1);
465  int yc_size = y_size + 2 * c_size;
466  int x, y, i;
467 
468  sl->ref_cache[0][scan8[5] + 1] =
469  sl->ref_cache[0][scan8[7] + 1] =
470  sl->ref_cache[0][scan8[13] + 1] =
471  sl->ref_cache[1][scan8[5] + 1] =
472  sl->ref_cache[1][scan8[7] + 1] =
473  sl->ref_cache[1][scan8[13] + 1] = PART_NOT_AVAILABLE;
474 
475  if (sl != h->slice_ctx) {
476  memset(er, 0, sizeof(*er));
477  } else
478  if (CONFIG_ERROR_RESILIENCE) {
479 
480  /* init ER */
481  er->avctx = h->avctx;
483  er->opaque = h;
484  er->quarter_sample = 1;
485 
486  er->mb_num = h->mb_num;
487  er->mb_width = h->mb_width;
488  er->mb_height = h->mb_height;
489  er->mb_stride = h->mb_stride;
490  er->b8_stride = h->mb_width * 2 + 1;
491 
492  // error resilience code looks cleaner with this
494  (h->mb_num + 1) * sizeof(int), fail);
495 
496  for (y = 0; y < h->mb_height; y++)
497  for (x = 0; x < h->mb_width; x++)
498  er->mb_index2xy[x + y * h->mb_width] = x + y * h->mb_stride;
499 
500  er->mb_index2xy[h->mb_height * h->mb_width] = (h->mb_height - 1) *
501  h->mb_stride + h->mb_width;
502 
504  mb_array_size * sizeof(uint8_t), fail);
505 
507  h->mb_height * h->mb_stride, fail);
508 
510  yc_size * sizeof(int16_t), fail);
511  er->dc_val[0] = sl->dc_val_base + h->mb_width * 2 + 2;
512  er->dc_val[1] = sl->dc_val_base + y_size + h->mb_stride + 1;
513  er->dc_val[2] = er->dc_val[1] + c_size;
514  for (i = 0; i < yc_size; i++)
515  sl->dc_val_base[i] = 1024;
516  }
517 
518  return 0;
519 
520 fail:
521  return AVERROR(ENOMEM); // ff_h264_free_tables will clean up for us
522 }
523 
524 static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size,
525  int parse_extradata);
526 
528 {
529  AVCodecContext *avctx = h->avctx;
530  int ret;
531 
532  if (!buf || size <= 0)
533  return -1;
534 
535  if (buf[0] == 1) {
536  int i, cnt, nalsize;
537  const unsigned char *p = buf;
538 
539  h->is_avc = 1;
540 
541  if (size < 7) {
542  av_log(avctx, AV_LOG_ERROR,
543  "avcC %d too short\n", size);
544  return AVERROR_INVALIDDATA;
545  }
546  /* sps and pps in the avcC always have length coded with 2 bytes,
547  * so put a fake nal_length_size = 2 while parsing them */
548  h->nal_length_size = 2;
549  // Decode sps from avcC
550  cnt = *(p + 5) & 0x1f; // Number of sps
551  p += 6;
552  for (i = 0; i < cnt; i++) {
553  nalsize = AV_RB16(p) + 2;
554  if(nalsize > size - (p-buf))
555  return AVERROR_INVALIDDATA;
556  ret = decode_nal_units(h, p, nalsize, 1);
557  if (ret < 0) {
558  av_log(avctx, AV_LOG_ERROR,
559  "Decoding sps %d from avcC failed\n", i);
560  return ret;
561  }
562  p += nalsize;
563  }
564  // Decode pps from avcC
565  cnt = *(p++); // Number of pps
566  for (i = 0; i < cnt; i++) {
567  nalsize = AV_RB16(p) + 2;
568  if(nalsize > size - (p-buf))
569  return AVERROR_INVALIDDATA;
570  ret = decode_nal_units(h, p, nalsize, 1);
571  if (ret < 0) {
572  av_log(avctx, AV_LOG_ERROR,
573  "Decoding pps %d from avcC failed\n", i);
574  return ret;
575  }
576  p += nalsize;
577  }
578  // Store right nal length size that will be used to parse all other nals
579  h->nal_length_size = (buf[4] & 0x03) + 1;
580  } else {
581  h->is_avc = 0;
582  ret = decode_nal_units(h, buf, size, 1);
583  if (ret < 0)
584  return ret;
585  }
586  return size;
587 }
588 
590 {
591  int i;
592 
593  h->avctx = avctx;
594  h->backup_width = -1;
595  h->backup_height = -1;
597  h->dequant_coeff_pps = -1;
598  h->current_sps_id = -1;
599  h->cur_chroma_format_idc = -1;
600 
602  h->slice_context_count = 1;
603  h->workaround_bugs = avctx->workaround_bugs;
604  h->flags = avctx->flags;
605  h->prev_poc_msb = 1 << 16;
606  h->x264_build = -1;
607  h->recovery_frame = -1;
608  h->frame_recovered = 0;
609  h->prev_frame_num = -1;
611 
612  h->next_outputed_poc = INT_MIN;
613  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
614  h->last_pocs[i] = INT_MIN;
615 
617 
619 
621  h->slice_ctx = av_mallocz_array(h->nb_slice_ctx, sizeof(*h->slice_ctx));
622  if (!h->slice_ctx) {
623  h->nb_slice_ctx = 0;
624  return AVERROR(ENOMEM);
625  }
626 
627  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
628  h->DPB[i].f = av_frame_alloc();
629  if (!h->DPB[i].f)
630  return AVERROR(ENOMEM);
631  }
632 
633  h->cur_pic.f = av_frame_alloc();
634  if (!h->cur_pic.f)
635  return AVERROR(ENOMEM);
636 
638  if (!h->last_pic_for_ec.f)
639  return AVERROR(ENOMEM);
640 
641  for (i = 0; i < h->nb_slice_ctx; i++)
642  h->slice_ctx[i].h264 = h;
643 
644  return 0;
645 }
646 
648 {
649  H264Context *h = avctx->priv_data;
650  int ret;
651 
652  ret = h264_init_context(avctx, h);
653  if (ret < 0)
654  return ret;
655 
656  /* set defaults */
657  if (!avctx->has_b_frames)
658  h->low_delay = 1;
659 
661 
663 
664  if (avctx->codec_id == AV_CODEC_ID_H264) {
665  if (avctx->ticks_per_frame == 1) {
666  if(h->avctx->time_base.den < INT_MAX/2) {
667  h->avctx->time_base.den *= 2;
668  } else
669  h->avctx->time_base.num /= 2;
670  }
671  avctx->ticks_per_frame = 2;
672  }
673 
674  if (avctx->extradata_size > 0 && avctx->extradata) {
675  ret = ff_h264_decode_extradata(h, avctx->extradata, avctx->extradata_size);
676  if (ret < 0) {
678  return ret;
679  }
680  }
681 
685  h->low_delay = 0;
686  }
687 
688  avctx->internal->allocate_progress = 1;
689 
691 
692  if (h->enable_er < 0 && (avctx->active_thread_type & FF_THREAD_SLICE))
693  h->enable_er = 0;
694 
695  if (h->enable_er && (avctx->active_thread_type & FF_THREAD_SLICE)) {
696  av_log(avctx, AV_LOG_WARNING,
697  "Error resilience with slice threads is enabled. It is unsafe and unsupported and may crash. "
698  "Use it at your own risk\n");
699  }
700 
701  return 0;
702 }
703 
705 {
706  H264Context *h = avctx->priv_data;
707  int ret;
708 
709  if (!avctx->internal->is_copy)
710  return 0;
711 
712  memset(h, 0, sizeof(*h));
713 
714  ret = h264_init_context(avctx, h);
715  if (ret < 0)
716  return ret;
717 
718  h->context_initialized = 0;
719 
720  return 0;
721 }
722 
723 /**
724  * Run setup operations that must be run after slice header decoding.
725  * This includes finding the next displayed frame.
726  *
727  * @param h h264 master context
728  * @param setup_finished enough NALs have been read that we can call
729  * ff_thread_finish_setup()
730  */
731 static void decode_postinit(H264Context *h, int setup_finished)
732 {
734  H264Picture *cur = h->cur_pic_ptr;
735  int i, pics, out_of_order, out_idx;
736 
737  h->cur_pic_ptr->f->pict_type = h->pict_type;
738 
739  if (h->next_output_pic)
740  return;
741 
742  if (cur->field_poc[0] == INT_MAX || cur->field_poc[1] == INT_MAX) {
743  /* FIXME: if we have two PAFF fields in one packet, we can't start
744  * the next thread here. If we have one field per packet, we can.
745  * The check in decode_nal_units() is not good enough to find this
746  * yet, so we assume the worst for now. */
747  // if (setup_finished)
748  // ff_thread_finish_setup(h->avctx);
749  if (cur->field_poc[0] == INT_MAX && cur->field_poc[1] == INT_MAX)
750  return;
751  if (h->avctx->hwaccel || h->missing_fields <=1)
752  return;
753  }
754 
755  cur->f->interlaced_frame = 0;
756  cur->f->repeat_pict = 0;
757 
758  /* Signal interlacing information externally. */
759  /* Prioritize picture timing SEI information over used
760  * decoding process if it exists. */
761 
762  if (h->sps.pic_struct_present_flag) {
763  switch (h->sei_pic_struct) {
765  break;
768  cur->f->interlaced_frame = 1;
769  break;
772  if (FIELD_OR_MBAFF_PICTURE(h))
773  cur->f->interlaced_frame = 1;
774  else
775  // try to flag soft telecine progressive
777  break;
780  /* Signal the possibility of telecined film externally
781  * (pic_struct 5,6). From these hints, let the applications
782  * decide if they apply deinterlacing. */
783  cur->f->repeat_pict = 1;
784  break;
786  cur->f->repeat_pict = 2;
787  break;
789  cur->f->repeat_pict = 4;
790  break;
791  }
792 
793  if ((h->sei_ct_type & 3) &&
795  cur->f->interlaced_frame = (h->sei_ct_type & (1 << 1)) != 0;
796  } else {
797  /* Derive interlacing flag from used decoding process. */
799  }
801 
802  if (cur->field_poc[0] != cur->field_poc[1]) {
803  /* Derive top_field_first from field pocs. */
804  cur->f->top_field_first = cur->field_poc[0] < cur->field_poc[1];
805  } else {
806  if (cur->f->interlaced_frame || h->sps.pic_struct_present_flag) {
807  /* Use picture timing SEI information. Even if it is a
808  * information of a past frame, better than nothing. */
811  cur->f->top_field_first = 1;
812  else
813  cur->f->top_field_first = 0;
814  } else {
815  /* Most likely progressive */
816  cur->f->top_field_first = 0;
817  }
818  }
819 
820  if (h->sei_frame_packing_present &&
825  AVStereo3D *stereo = av_stereo3d_create_side_data(cur->f);
826  if (stereo) {
827  switch (h->frame_packing_arrangement_type) {
828  case 0:
829  stereo->type = AV_STEREO3D_CHECKERBOARD;
830  break;
831  case 1:
832  stereo->type = AV_STEREO3D_COLUMNS;
833  break;
834  case 2:
835  stereo->type = AV_STEREO3D_LINES;
836  break;
837  case 3:
838  if (h->quincunx_subsampling)
840  else
841  stereo->type = AV_STEREO3D_SIDEBYSIDE;
842  break;
843  case 4:
844  stereo->type = AV_STEREO3D_TOPBOTTOM;
845  break;
846  case 5:
848  break;
849  case 6:
850  stereo->type = AV_STEREO3D_2D;
851  break;
852  }
853 
854  if (h->content_interpretation_type == 2)
855  stereo->flags = AV_STEREO3D_FLAG_INVERT;
856  }
857  }
858 
861  double angle = h->sei_anticlockwise_rotation * 360 / (double) (1 << 16);
862  AVFrameSideData *rotation = av_frame_new_side_data(cur->f,
864  sizeof(int32_t) * 9);
865  if (rotation) {
866  av_display_rotation_set((int32_t *)rotation->data, angle);
867  av_display_matrix_flip((int32_t *)rotation->data,
868  h->sei_hflip, h->sei_vflip);
869  }
870  }
871 
874  sizeof(uint8_t));
875 
876  if (sd) {
879  }
880  }
881 
882  if (h->a53_caption) {
885  h->a53_caption_size);
886  if (sd)
887  memcpy(sd->data, h->a53_caption, h->a53_caption_size);
888  av_freep(&h->a53_caption);
889  h->a53_caption_size = 0;
891  }
892 
893  cur->mmco_reset = h->mmco_reset;
894  h->mmco_reset = 0;
895 
896  // FIXME do something with unavailable reference frames
897 
898  /* Sort B-frames into display order */
899 
903  h->low_delay = 0;
904  }
905 
909  h->low_delay = 0;
910  }
911 
912  for (i = 0; 1; i++) {
913  if(i == MAX_DELAYED_PIC_COUNT || cur->poc < h->last_pocs[i]){
914  if(i)
915  h->last_pocs[i-1] = cur->poc;
916  break;
917  } else if(i) {
918  h->last_pocs[i-1]= h->last_pocs[i];
919  }
920  }
921  out_of_order = MAX_DELAYED_PIC_COUNT - i;
922  if( cur->f->pict_type == AV_PICTURE_TYPE_B
924  out_of_order = FFMAX(out_of_order, 1);
925  if (out_of_order == MAX_DELAYED_PIC_COUNT) {
926  av_log(h->avctx, AV_LOG_VERBOSE, "Invalid POC %d<%d\n", cur->poc, h->last_pocs[0]);
927  for (i = 1; i < MAX_DELAYED_PIC_COUNT; i++)
928  h->last_pocs[i] = INT_MIN;
929  h->last_pocs[0] = cur->poc;
930  cur->mmco_reset = 1;
931  } else if(h->avctx->has_b_frames < out_of_order && !h->sps.bitstream_restriction_flag){
932  av_log(h->avctx, AV_LOG_VERBOSE, "Increasing reorder buffer to %d\n", out_of_order);
933  h->avctx->has_b_frames = out_of_order;
934  h->low_delay = 0;
935  }
936 
937  pics = 0;
938  while (h->delayed_pic[pics])
939  pics++;
940 
942 
943  h->delayed_pic[pics++] = cur;
944  if (cur->reference == 0)
945  cur->reference = DELAYED_PIC_REF;
946 
947  out = h->delayed_pic[0];
948  out_idx = 0;
949  for (i = 1; h->delayed_pic[i] &&
950  !h->delayed_pic[i]->f->key_frame &&
951  !h->delayed_pic[i]->mmco_reset;
952  i++)
953  if (h->delayed_pic[i]->poc < out->poc) {
954  out = h->delayed_pic[i];
955  out_idx = i;
956  }
957  if (h->avctx->has_b_frames == 0 &&
958  (h->delayed_pic[0]->f->key_frame || h->delayed_pic[0]->mmco_reset))
959  h->next_outputed_poc = INT_MIN;
960  out_of_order = out->poc < h->next_outputed_poc;
961 
962  if (out_of_order || pics > h->avctx->has_b_frames) {
963  out->reference &= ~DELAYED_PIC_REF;
964  // for frame threading, the owner must be the second field's thread or
965  // else the first thread can release the picture and reuse it unsafely
966  for (i = out_idx; h->delayed_pic[i]; i++)
967  h->delayed_pic[i] = h->delayed_pic[i + 1];
968  }
969  if (!out_of_order && pics > h->avctx->has_b_frames) {
970  h->next_output_pic = out;
971  if (out_idx == 0 && h->delayed_pic[0] && (h->delayed_pic[0]->f->key_frame || h->delayed_pic[0]->mmco_reset)) {
972  h->next_outputed_poc = INT_MIN;
973  } else
974  h->next_outputed_poc = out->poc;
975  } else {
976  av_log(h->avctx, AV_LOG_DEBUG, "no picture %s\n", out_of_order ? "ooo" : "");
977  }
978 
979  if (h->next_output_pic) {
980  if (h->next_output_pic->recovered) {
981  // We have reached an recovery point and all frames after it in
982  // display order are "recovered".
984  }
986  }
987 
988  if (setup_finished && !h->avctx->hwaccel) {
990 
992  h->setup_finished = 1;
993  }
994 }
995 
997 {
998  int list, i;
999  int luma_def, chroma_def;
1000 
1001  sl->use_weight = 0;
1002  sl->use_weight_chroma = 0;
1004  if (h->sps.chroma_format_idc)
1006 
1007  if (sl->luma_log2_weight_denom > 7U) {
1008  av_log(h->avctx, AV_LOG_ERROR, "luma_log2_weight_denom %d is out of range\n", sl->luma_log2_weight_denom);
1009  sl->luma_log2_weight_denom = 0;
1010  }
1011  if (sl->chroma_log2_weight_denom > 7U) {
1012  av_log(h->avctx, AV_LOG_ERROR, "chroma_log2_weight_denom %d is out of range\n", sl->chroma_log2_weight_denom);
1013  sl->chroma_log2_weight_denom = 0;
1014  }
1015 
1016  luma_def = 1 << sl->luma_log2_weight_denom;
1017  chroma_def = 1 << sl->chroma_log2_weight_denom;
1018 
1019  for (list = 0; list < 2; list++) {
1020  sl->luma_weight_flag[list] = 0;
1021  sl->chroma_weight_flag[list] = 0;
1022  for (i = 0; i < sl->ref_count[list]; i++) {
1023  int luma_weight_flag, chroma_weight_flag;
1024 
1025  luma_weight_flag = get_bits1(&sl->gb);
1026  if (luma_weight_flag) {
1027  sl->luma_weight[i][list][0] = get_se_golomb(&sl->gb);
1028  sl->luma_weight[i][list][1] = get_se_golomb(&sl->gb);
1029  if (sl->luma_weight[i][list][0] != luma_def ||
1030  sl->luma_weight[i][list][1] != 0) {
1031  sl->use_weight = 1;
1032  sl->luma_weight_flag[list] = 1;
1033  }
1034  } else {
1035  sl->luma_weight[i][list][0] = luma_def;
1036  sl->luma_weight[i][list][1] = 0;
1037  }
1038 
1039  if (h->sps.chroma_format_idc) {
1040  chroma_weight_flag = get_bits1(&sl->gb);
1041  if (chroma_weight_flag) {
1042  int j;
1043  for (j = 0; j < 2; j++) {
1044  sl->chroma_weight[i][list][j][0] = get_se_golomb(&sl->gb);
1045  sl->chroma_weight[i][list][j][1] = get_se_golomb(&sl->gb);
1046  if (sl->chroma_weight[i][list][j][0] != chroma_def ||
1047  sl->chroma_weight[i][list][j][1] != 0) {
1048  sl->use_weight_chroma = 1;
1049  sl->chroma_weight_flag[list] = 1;
1050  }
1051  }
1052  } else {
1053  int j;
1054  for (j = 0; j < 2; j++) {
1055  sl->chroma_weight[i][list][j][0] = chroma_def;
1056  sl->chroma_weight[i][list][j][1] = 0;
1057  }
1058  }
1059  }
1060  }
1061  if (sl->slice_type_nos != AV_PICTURE_TYPE_B)
1062  break;
1063  }
1064  sl->use_weight = sl->use_weight || sl->use_weight_chroma;
1065  return 0;
1066 }
1067 
1068 /**
1069  * instantaneous decoder refresh.
1070  */
1071 static void idr(H264Context *h)
1072 {
1073  int i;
1075  h->prev_frame_num =
1076  h->prev_frame_num_offset = 0;
1077  h->prev_poc_msb = 1<<16;
1078  h->prev_poc_lsb = 0;
1079  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
1080  h->last_pocs[i] = INT_MIN;
1081 }
1082 
1083 /* forget old pics after a seek */
1085 {
1086  int i, j;
1087 
1088  h->next_outputed_poc = INT_MIN;
1089  h->prev_interlaced_frame = 1;
1090  idr(h);
1091 
1092  h->prev_frame_num = -1;
1093  if (h->cur_pic_ptr) {
1094  h->cur_pic_ptr->reference = 0;
1095  for (j=i=0; h->delayed_pic[i]; i++)
1096  if (h->delayed_pic[i] != h->cur_pic_ptr)
1097  h->delayed_pic[j++] = h->delayed_pic[i];
1098  h->delayed_pic[j] = NULL;
1099  }
1101 
1102  h->first_field = 0;
1103  ff_h264_reset_sei(h);
1104  h->recovery_frame = -1;
1105  h->frame_recovered = 0;
1106  h->current_slice = 0;
1107  h->mmco_reset = 1;
1108  for (i = 0; i < h->nb_slice_ctx; i++)
1109  h->slice_ctx[i].list_count = 0;
1110 }
1111 
1112 /* forget old pics after a seek */
1113 static void flush_dpb(AVCodecContext *avctx)
1114 {
1115  H264Context *h = avctx->priv_data;
1116  int i;
1117 
1118  memset(h->delayed_pic, 0, sizeof(h->delayed_pic));
1119 
1121 
1122  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
1123  ff_h264_unref_picture(h, &h->DPB[i]);
1124  h->cur_pic_ptr = NULL;
1126 
1127  h->mb_y = 0;
1128 
1130  h->context_initialized = 0;
1131 }
1132 
1133 int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc)
1134 {
1135  const int max_frame_num = 1 << h->sps.log2_max_frame_num;
1136  int field_poc[2];
1137 
1139  if (h->frame_num < h->prev_frame_num)
1140  h->frame_num_offset += max_frame_num;
1141 
1142  if (h->sps.poc_type == 0) {
1143  const int max_poc_lsb = 1 << h->sps.log2_max_poc_lsb;
1144 
1145  if (h->poc_lsb < h->prev_poc_lsb &&
1146  h->prev_poc_lsb - h->poc_lsb >= max_poc_lsb / 2)
1147  h->poc_msb = h->prev_poc_msb + max_poc_lsb;
1148  else if (h->poc_lsb > h->prev_poc_lsb &&
1149  h->prev_poc_lsb - h->poc_lsb < -max_poc_lsb / 2)
1150  h->poc_msb = h->prev_poc_msb - max_poc_lsb;
1151  else
1152  h->poc_msb = h->prev_poc_msb;
1153  field_poc[0] =
1154  field_poc[1] = h->poc_msb + h->poc_lsb;
1155  if (h->picture_structure == PICT_FRAME)
1156  field_poc[1] += h->delta_poc_bottom;
1157  } else if (h->sps.poc_type == 1) {
1158  int abs_frame_num, expected_delta_per_poc_cycle, expectedpoc;
1159  int i;
1160 
1161  if (h->sps.poc_cycle_length != 0)
1162  abs_frame_num = h->frame_num_offset + h->frame_num;
1163  else
1164  abs_frame_num = 0;
1165 
1166  if (h->nal_ref_idc == 0 && abs_frame_num > 0)
1167  abs_frame_num--;
1168 
1169  expected_delta_per_poc_cycle = 0;
1170  for (i = 0; i < h->sps.poc_cycle_length; i++)
1171  // FIXME integrate during sps parse
1172  expected_delta_per_poc_cycle += h->sps.offset_for_ref_frame[i];
1173 
1174  if (abs_frame_num > 0) {
1175  int poc_cycle_cnt = (abs_frame_num - 1) / h->sps.poc_cycle_length;
1176  int frame_num_in_poc_cycle = (abs_frame_num - 1) % h->sps.poc_cycle_length;
1177 
1178  expectedpoc = poc_cycle_cnt * expected_delta_per_poc_cycle;
1179  for (i = 0; i <= frame_num_in_poc_cycle; i++)
1180  expectedpoc = expectedpoc + h->sps.offset_for_ref_frame[i];
1181  } else
1182  expectedpoc = 0;
1183 
1184  if (h->nal_ref_idc == 0)
1185  expectedpoc = expectedpoc + h->sps.offset_for_non_ref_pic;
1186 
1187  field_poc[0] = expectedpoc + h->delta_poc[0];
1188  field_poc[1] = field_poc[0] + h->sps.offset_for_top_to_bottom_field;
1189 
1190  if (h->picture_structure == PICT_FRAME)
1191  field_poc[1] += h->delta_poc[1];
1192  } else {
1193  int poc = 2 * (h->frame_num_offset + h->frame_num);
1194 
1195  if (!h->nal_ref_idc)
1196  poc--;
1197 
1198  field_poc[0] = poc;
1199  field_poc[1] = poc;
1200  }
1201 
1203  pic_field_poc[0] = field_poc[0];
1205  pic_field_poc[1] = field_poc[1];
1206  *pic_poc = FFMIN(pic_field_poc[0], pic_field_poc[1]);
1207 
1208  return 0;
1209 }
1210 
1211 /**
1212  * Compute profile from profile_idc and constraint_set?_flags.
1213  *
1214  * @param sps SPS
1215  *
1216  * @return profile as defined by FF_PROFILE_H264_*
1217  */
1219 {
1220  int profile = sps->profile_idc;
1221 
1222  switch (sps->profile_idc) {
1224  // constraint_set1_flag set to 1
1225  profile |= (sps->constraint_set_flags & 1 << 1) ? FF_PROFILE_H264_CONSTRAINED : 0;
1226  break;
1230  // constraint_set3_flag set to 1
1231  profile |= (sps->constraint_set_flags & 1 << 3) ? FF_PROFILE_H264_INTRA : 0;
1232  break;
1233  }
1234 
1235  return profile;
1236 }
1237 
1239 {
1240  int ref_count[2], list_count;
1241  int num_ref_idx_active_override_flag;
1242 
1243  // set defaults, might be overridden a few lines later
1244  ref_count[0] = h->pps.ref_count[0];
1245  ref_count[1] = h->pps.ref_count[1];
1246 
1247  if (sl->slice_type_nos != AV_PICTURE_TYPE_I) {
1248  unsigned max[2];
1249  max[0] = max[1] = h->picture_structure == PICT_FRAME ? 15 : 31;
1250 
1251  if (sl->slice_type_nos == AV_PICTURE_TYPE_B)
1252  sl->direct_spatial_mv_pred = get_bits1(&sl->gb);
1253  num_ref_idx_active_override_flag = get_bits1(&sl->gb);
1254 
1255  if (num_ref_idx_active_override_flag) {
1256  ref_count[0] = get_ue_golomb(&sl->gb) + 1;
1257  if (sl->slice_type_nos == AV_PICTURE_TYPE_B) {
1258  ref_count[1] = get_ue_golomb(&sl->gb) + 1;
1259  } else
1260  // full range is spec-ok in this case, even for frames
1261  ref_count[1] = 1;
1262  }
1263 
1264  if (ref_count[0]-1 > max[0] || ref_count[1]-1 > max[1]){
1265  av_log(h->avctx, AV_LOG_ERROR, "reference overflow %u > %u or %u > %u\n", ref_count[0]-1, max[0], ref_count[1]-1, max[1]);
1266  sl->ref_count[0] = sl->ref_count[1] = 0;
1267  sl->list_count = 0;
1268  return AVERROR_INVALIDDATA;
1269  }
1270 
1271  if (sl->slice_type_nos == AV_PICTURE_TYPE_B)
1272  list_count = 2;
1273  else
1274  list_count = 1;
1275  } else {
1276  list_count = 0;
1277  ref_count[0] = ref_count[1] = 0;
1278  }
1279 
1280  if (list_count != sl->list_count ||
1281  ref_count[0] != sl->ref_count[0] ||
1282  ref_count[1] != sl->ref_count[1]) {
1283  sl->ref_count[0] = ref_count[0];
1284  sl->ref_count[1] = ref_count[1];
1285  sl->list_count = list_count;
1286  return 1;
1287  }
1288 
1289  return 0;
1290 }
1291 
1292 static const uint8_t start_code[] = { 0x00, 0x00, 0x01 };
1293 
1295  const uint8_t *ptr, int dst_length,
1296  int i, int next_avc)
1297 {
1298  if ((h->workaround_bugs & FF_BUG_AUTODETECT) && i + 3 < next_avc &&
1299  buf[i] == 0x00 && buf[i + 1] == 0x00 &&
1300  buf[i + 2] == 0x01 && buf[i + 3] == 0xE0)
1302 
1303  if (!(h->workaround_bugs & FF_BUG_TRUNCATED))
1304  while (dst_length > 0 && ptr[dst_length - 1] == 0)
1305  dst_length--;
1306 
1307  if (!dst_length)
1308  return 0;
1309 
1310  return 8 * dst_length - decode_rbsp_trailing(h, ptr + dst_length - 1);
1311 }
1312 
1313 static int get_last_needed_nal(H264Context *h, const uint8_t *buf, int buf_size)
1314 {
1315  int next_avc = h->is_avc ? 0 : buf_size;
1316  int nal_index = 0;
1317  int buf_index = 0;
1318  int nals_needed = 0;
1319  int first_slice = 0;
1320 
1321  while(1) {
1322  GetBitContext gb;
1323  int nalsize = 0;
1324  int dst_length, bit_length, consumed;
1325  const uint8_t *ptr;
1326 
1327  if (buf_index >= next_avc) {
1328  nalsize = get_avc_nalsize(h, buf, buf_size, &buf_index);
1329  if (nalsize < 0)
1330  break;
1331  next_avc = buf_index + nalsize;
1332  } else {
1333  buf_index = find_start_code(buf, buf_size, buf_index, next_avc);
1334  if (buf_index >= buf_size)
1335  break;
1336  if (buf_index >= next_avc)
1337  continue;
1338  }
1339 
1340  ptr = ff_h264_decode_nal(h, &h->slice_ctx[0], buf + buf_index, &dst_length, &consumed,
1341  next_avc - buf_index);
1342 
1343  if (!ptr || dst_length < 0)
1344  return AVERROR_INVALIDDATA;
1345 
1346  buf_index += consumed;
1347 
1348  bit_length = get_bit_length(h, buf, ptr, dst_length,
1349  buf_index, next_avc);
1350  nal_index++;
1351 
1352  /* packets can sometimes contain multiple PPS/SPS,
1353  * e.g. two PAFF field pictures in one packet, or a demuxer
1354  * which splits NALs strangely if so, when frame threading we
1355  * can't start the next thread until we've read all of them */
1356  switch (h->nal_unit_type) {
1357  case NAL_SPS:
1358  case NAL_PPS:
1359  nals_needed = nal_index;
1360  break;
1361  case NAL_DPA:
1362  case NAL_IDR_SLICE:
1363  case NAL_SLICE:
1364  init_get_bits(&gb, ptr, bit_length);
1365  if (!get_ue_golomb(&gb) ||
1366  !first_slice ||
1367  first_slice != h->nal_unit_type)
1368  nals_needed = nal_index;
1369  if (!first_slice)
1370  first_slice = h->nal_unit_type;
1371  }
1372  }
1373 
1374  return nals_needed;
1375 }
1376 
1377 static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size,
1378  int parse_extradata)
1379 {
1380  AVCodecContext *const avctx = h->avctx;
1381  H264SliceContext *sl;
1382  int buf_index;
1383  unsigned context_count;
1384  int next_avc;
1385  int nals_needed = 0; ///< number of NALs that need decoding before the next frame thread starts
1386  int nal_index;
1387  int idr_cleared=0;
1388  int ret = 0;
1389 
1390  h->nal_unit_type= 0;
1391 
1392  if(!h->slice_context_count)
1393  h->slice_context_count= 1;
1395  if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)) {
1396  h->current_slice = 0;
1397  if (!h->first_field)
1398  h->cur_pic_ptr = NULL;
1399  ff_h264_reset_sei(h);
1400  }
1401 
1402  if (h->nal_length_size == 4) {
1403  if (buf_size > 8 && AV_RB32(buf) == 1 && AV_RB32(buf+5) > (unsigned)buf_size) {
1404  h->is_avc = 0;
1405  }else if(buf_size > 3 && AV_RB32(buf) > 1 && AV_RB32(buf) <= (unsigned)buf_size)
1406  h->is_avc = 1;
1407  }
1408 
1409  if (avctx->active_thread_type & FF_THREAD_FRAME)
1410  nals_needed = get_last_needed_nal(h, buf, buf_size);
1411 
1412  {
1413  buf_index = 0;
1414  context_count = 0;
1415  next_avc = h->is_avc ? 0 : buf_size;
1416  nal_index = 0;
1417  for (;;) {
1418  int consumed;
1419  int dst_length;
1420  int bit_length;
1421  const uint8_t *ptr;
1422  int nalsize = 0;
1423  int err;
1424 
1425  if (buf_index >= next_avc) {
1426  nalsize = get_avc_nalsize(h, buf, buf_size, &buf_index);
1427  if (nalsize < 0)
1428  break;
1429  next_avc = buf_index + nalsize;
1430  } else {
1431  buf_index = find_start_code(buf, buf_size, buf_index, next_avc);
1432  if (buf_index >= buf_size)
1433  break;
1434  if (buf_index >= next_avc)
1435  continue;
1436  }
1437 
1438  sl = &h->slice_ctx[context_count];
1439 
1440  ptr = ff_h264_decode_nal(h, sl, buf + buf_index, &dst_length,
1441  &consumed, next_avc - buf_index);
1442  if (!ptr || dst_length < 0) {
1443  ret = -1;
1444  goto end;
1445  }
1446 
1447  bit_length = get_bit_length(h, buf, ptr, dst_length,
1448  buf_index + consumed, next_avc);
1449 
1450  if (h->avctx->debug & FF_DEBUG_STARTCODE)
1452  "NAL %d/%d at %d/%d length %d\n",
1453  h->nal_unit_type, h->nal_ref_idc, buf_index, buf_size, dst_length);
1454 
1455  if (h->is_avc && (nalsize != consumed) && nalsize)
1457  "AVC: Consumed only %d bytes instead of %d\n",
1458  consumed, nalsize);
1459 
1460  buf_index += consumed;
1461  nal_index++;
1462 
1463  if (avctx->skip_frame >= AVDISCARD_NONREF &&
1464  h->nal_ref_idc == 0 &&
1465  h->nal_unit_type != NAL_SEI)
1466  continue;
1467 
1468 again:
1469  /* Ignore per frame NAL unit type during extradata
1470  * parsing. Decoding slices is not possible in codec init
1471  * with frame-mt */
1472  if (parse_extradata) {
1473  switch (h->nal_unit_type) {
1474  case NAL_IDR_SLICE:
1475  case NAL_SLICE:
1476  case NAL_DPA:
1477  case NAL_DPB:
1478  case NAL_DPC:
1480  "Ignoring NAL %d in global header/extradata\n",
1481  h->nal_unit_type);
1482  // fall through to next case
1483  case NAL_AUXILIARY_SLICE:
1485  }
1486  }
1487 
1488  err = 0;
1489 
1490  switch (h->nal_unit_type) {
1491  case NAL_IDR_SLICE:
1492  if ((ptr[0] & 0xFC) == 0x98) {
1493  av_log(h->avctx, AV_LOG_ERROR, "Invalid inter IDR frame\n");
1494  h->next_outputed_poc = INT_MIN;
1495  ret = -1;
1496  goto end;
1497  }
1498  if (h->nal_unit_type != NAL_IDR_SLICE) {
1500  "Invalid mix of idr and non-idr slices\n");
1501  ret = -1;
1502  goto end;
1503  }
1504  if(!idr_cleared) {
1505  if (h->current_slice && (avctx->active_thread_type & FF_THREAD_SLICE)) {
1506  av_log(h, AV_LOG_ERROR, "invalid mixed IDR / non IDR frames cannot be decoded in slice multithreading mode\n");
1507  ret = AVERROR_INVALIDDATA;
1508  goto end;
1509  }
1510  idr(h); // FIXME ensure we don't lose some frames if there is reordering
1511  }
1512  idr_cleared = 1;
1513  h->has_recovery_point = 1;
1514  case NAL_SLICE:
1515  init_get_bits(&sl->gb, ptr, bit_length);
1516 
1517  if ( nals_needed >= nal_index
1518  || (!(avctx->active_thread_type & FF_THREAD_FRAME) && !context_count))
1519  h->au_pps_id = -1;
1520 
1521  if ((err = ff_h264_decode_slice_header(h, sl)))
1522  break;
1523 
1524  if (h->sei_recovery_frame_cnt >= 0) {
1526  h->valid_recovery_point = 1;
1527 
1528  if ( h->recovery_frame < 0
1529  || av_mod_uintp2(h->recovery_frame - h->frame_num, h->sps.log2_max_frame_num) > h->sei_recovery_frame_cnt) {
1530  h->recovery_frame = av_mod_uintp2(h->frame_num + h->sei_recovery_frame_cnt, h->sps.log2_max_frame_num);
1531 
1532  if (!h->valid_recovery_point)
1533  h->recovery_frame = h->frame_num;
1534  }
1535  }
1536 
1537  h->cur_pic_ptr->f->key_frame |=
1538  (h->nal_unit_type == NAL_IDR_SLICE);
1539 
1540  if (h->nal_unit_type == NAL_IDR_SLICE ||
1541  h->recovery_frame == h->frame_num) {
1542  h->recovery_frame = -1;
1543  h->cur_pic_ptr->recovered = 1;
1544  }
1545  // If we have an IDR, all frames after it in decoded order are
1546  // "recovered".
1547  if (h->nal_unit_type == NAL_IDR_SLICE)
1549  h->frame_recovered |= 3*!!(avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL);
1550  h->frame_recovered |= 3*!!(avctx->flags & AV_CODEC_FLAG_OUTPUT_CORRUPT);
1551 #if 1
1553 #else
1555 #endif
1556 
1557  if (h->current_slice == 1) {
1558  if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS))
1559  decode_postinit(h, nal_index >= nals_needed);
1560 
1561  if (h->avctx->hwaccel &&
1562  (ret = h->avctx->hwaccel->start_frame(h->avctx, buf, buf_size)) < 0)
1563  goto end;
1564 #if FF_API_CAP_VDPAU
1565  if (CONFIG_H264_VDPAU_DECODER &&
1568 #endif
1569  }
1570 
1571  if (sl->redundant_pic_count == 0) {
1572  if (avctx->hwaccel) {
1573  ret = avctx->hwaccel->decode_slice(avctx,
1574  &buf[buf_index - consumed],
1575  consumed);
1576  if (ret < 0)
1577  goto end;
1578 #if FF_API_CAP_VDPAU
1579  } else if (CONFIG_H264_VDPAU_DECODER &&
1582  start_code,
1583  sizeof(start_code));
1585  &buf[buf_index - consumed],
1586  consumed);
1587 #endif
1588  } else
1589  context_count++;
1590  }
1591  break;
1592  case NAL_DPA:
1593  case NAL_DPB:
1594  case NAL_DPC:
1595  avpriv_request_sample(avctx, "data partitioning");
1596  break;
1597  case NAL_SEI:
1598  init_get_bits(&h->gb, ptr, bit_length);
1599  ret = ff_h264_decode_sei(h);
1600  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1601  goto end;
1602  break;
1603  case NAL_SPS:
1604  init_get_bits(&h->gb, ptr, bit_length);
1605  if (ff_h264_decode_seq_parameter_set(h, 0) >= 0)
1606  break;
1607  if (h->is_avc ? nalsize : 1) {
1609  "SPS decoding failure, trying again with the complete NAL\n");
1610  if (h->is_avc)
1611  av_assert0(next_avc - buf_index + consumed == nalsize);
1612  if ((next_avc - buf_index + consumed - 1) >= INT_MAX/8)
1613  break;
1614  init_get_bits(&h->gb, &buf[buf_index + 1 - consumed],
1615  8*(next_avc - buf_index + consumed - 1));
1616  if (ff_h264_decode_seq_parameter_set(h, 0) >= 0)
1617  break;
1618  }
1619  init_get_bits(&h->gb, ptr, bit_length);
1621 
1622  break;
1623  case NAL_PPS:
1624  init_get_bits(&h->gb, ptr, bit_length);
1625  ret = ff_h264_decode_picture_parameter_set(h, bit_length);
1626  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1627  goto end;
1628  break;
1629  case NAL_AUD:
1630  case NAL_END_SEQUENCE:
1631  case NAL_END_STREAM:
1632  case NAL_FILLER_DATA:
1633  case NAL_SPS_EXT:
1634  case NAL_AUXILIARY_SLICE:
1635  break;
1636  case NAL_FF_IGNORE:
1637  break;
1638  default:
1639  av_log(avctx, AV_LOG_DEBUG, "Unknown NAL code: %d (%d bits)\n",
1640  h->nal_unit_type, bit_length);
1641  }
1642 
1643  if (context_count == h->max_contexts) {
1644  ret = ff_h264_execute_decode_slices(h, context_count);
1645  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1646  goto end;
1647  context_count = 0;
1648  }
1649 
1650  if (err < 0 || err == SLICE_SKIPED) {
1651  if (err < 0)
1652  av_log(h->avctx, AV_LOG_ERROR, "decode_slice_header error\n");
1653  sl->ref_count[0] = sl->ref_count[1] = sl->list_count = 0;
1654  } else if (err == SLICE_SINGLETHREAD) {
1655  if (context_count > 1) {
1656  ret = ff_h264_execute_decode_slices(h, context_count - 1);
1657  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1658  goto end;
1659  context_count = 0;
1660  }
1661  /* Slice could not be decoded in parallel mode, restart. Note
1662  * that rbsp_buffer is not transferred, but since we no longer
1663  * run in parallel mode this should not be an issue. */
1664  sl = &h->slice_ctx[0];
1665  goto again;
1666  }
1667  }
1668  }
1669  if (context_count) {
1670  ret = ff_h264_execute_decode_slices(h, context_count);
1671  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1672  goto end;
1673  }
1674 
1675  ret = 0;
1676 end:
1677  /* clean up */
1678  if (h->cur_pic_ptr && !h->droppable) {
1681  }
1682 
1683  return (ret < 0) ? ret : buf_index;
1684 }
1685 
1686 /**
1687  * Return the number of bytes consumed for building the current frame.
1688  */
1689 static int get_consumed_bytes(int pos, int buf_size)
1690 {
1691  if (pos == 0)
1692  pos = 1; // avoid infinite loops (I doubt that is needed but...)
1693  if (pos + 10 > buf_size)
1694  pos = buf_size; // oops ;)
1695 
1696  return pos;
1697 }
1698 
1700 {
1701  AVFrame *src = srcp->f;
1702  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(src->format);
1703  int i;
1704  int ret = av_frame_ref(dst, src);
1705  if (ret < 0)
1706  return ret;
1707 
1708  av_dict_set(&dst->metadata, "stereo_mode", ff_h264_sei_stereo_mode(h), 0);
1709 
1710  h->backup_width = h->avctx->width;
1711  h->backup_height = h->avctx->height;
1712  h->backup_pix_fmt = h->avctx->pix_fmt;
1713 
1714  h->avctx->width = dst->width;
1715  h->avctx->height = dst->height;
1716  h->avctx->pix_fmt = dst->format;
1717 
1718  if (srcp->sei_recovery_frame_cnt == 0)
1719  dst->key_frame = 1;
1720  if (!srcp->crop)
1721  return 0;
1722 
1723  for (i = 0; i < desc->nb_components; i++) {
1724  int hshift = (i > 0) ? desc->log2_chroma_w : 0;
1725  int vshift = (i > 0) ? desc->log2_chroma_h : 0;
1726  int off = ((srcp->crop_left >> hshift) << h->pixel_shift) +
1727  (srcp->crop_top >> vshift) * dst->linesize[i];
1728  dst->data[i] += off;
1729  }
1730  return 0;
1731 }
1732 
1733 static int is_extra(const uint8_t *buf, int buf_size)
1734 {
1735  int cnt= buf[5]&0x1f;
1736  const uint8_t *p= buf+6;
1737  while(cnt--){
1738  int nalsize= AV_RB16(p) + 2;
1739  if(nalsize > buf_size - (p-buf) || p[2]!=0x67)
1740  return 0;
1741  p += nalsize;
1742  }
1743  cnt = *(p++);
1744  if(!cnt)
1745  return 0;
1746  while(cnt--){
1747  int nalsize= AV_RB16(p) + 2;
1748  if(nalsize > buf_size - (p-buf) || p[2]!=0x68)
1749  return 0;
1750  p += nalsize;
1751  }
1752  return 1;
1753 }
1754 
1755 static int h264_decode_frame(AVCodecContext *avctx, void *data,
1756  int *got_frame, AVPacket *avpkt)
1757 {
1758  const uint8_t *buf = avpkt->data;
1759  int buf_size = avpkt->size;
1760  H264Context *h = avctx->priv_data;
1761  AVFrame *pict = data;
1762  int buf_index = 0;
1763  H264Picture *out;
1764  int i, out_idx;
1765  int ret;
1766 
1767  h->flags = avctx->flags;
1768  h->setup_finished = 0;
1769 
1770  if (h->backup_width != -1) {
1771  avctx->width = h->backup_width;
1772  h->backup_width = -1;
1773  }
1774  if (h->backup_height != -1) {
1775  avctx->height = h->backup_height;
1776  h->backup_height = -1;
1777  }
1778  if (h->backup_pix_fmt != AV_PIX_FMT_NONE) {
1779  avctx->pix_fmt = h->backup_pix_fmt;
1781  }
1782 
1784 
1785  /* end of stream, output what is still in the buffers */
1786  if (buf_size == 0) {
1787  out:
1788 
1789  h->cur_pic_ptr = NULL;
1790  h->first_field = 0;
1791 
1792  // FIXME factorize this with the output code below
1793  out = h->delayed_pic[0];
1794  out_idx = 0;
1795  for (i = 1;
1796  h->delayed_pic[i] &&
1797  !h->delayed_pic[i]->f->key_frame &&
1798  !h->delayed_pic[i]->mmco_reset;
1799  i++)
1800  if (h->delayed_pic[i]->poc < out->poc) {
1801  out = h->delayed_pic[i];
1802  out_idx = i;
1803  }
1804 
1805  for (i = out_idx; h->delayed_pic[i]; i++)
1806  h->delayed_pic[i] = h->delayed_pic[i + 1];
1807 
1808  if (out) {
1809  out->reference &= ~DELAYED_PIC_REF;
1810  ret = output_frame(h, pict, out);
1811  if (ret < 0)
1812  return ret;
1813  *got_frame = 1;
1814  }
1815 
1816  return buf_index;
1817  }
1819  int side_size;
1820  uint8_t *side = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, &side_size);
1821  if (is_extra(side, side_size))
1822  ff_h264_decode_extradata(h, side, side_size);
1823  }
1824  if(h->is_avc && buf_size >= 9 && buf[0]==1 && buf[2]==0 && (buf[4]&0xFC)==0xFC && (buf[5]&0x1F) && buf[8]==0x67){
1825  if (is_extra(buf, buf_size))
1826  return ff_h264_decode_extradata(h, buf, buf_size);
1827  }
1828 
1829  buf_index = decode_nal_units(h, buf, buf_size, 0);
1830  if (buf_index < 0)
1831  return AVERROR_INVALIDDATA;
1832 
1833  if (!h->cur_pic_ptr && h->nal_unit_type == NAL_END_SEQUENCE) {
1834  av_assert0(buf_index <= buf_size);
1835  goto out;
1836  }
1837 
1838  if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) && !h->cur_pic_ptr) {
1839  if (avctx->skip_frame >= AVDISCARD_NONREF ||
1840  buf_size >= 4 && !memcmp("Q264", buf, 4))
1841  return buf_size;
1842  av_log(avctx, AV_LOG_ERROR, "no frame!\n");
1843  return AVERROR_INVALIDDATA;
1844  }
1845 
1846  if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) ||
1847  (h->mb_y >= h->mb_height && h->mb_height)) {
1848  if (avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)
1849  decode_postinit(h, 1);
1850 
1851  if ((ret = ff_h264_field_end(h, &h->slice_ctx[0], 0)) < 0)
1852  return ret;
1853 
1854  /* Wait for second field. */
1855  *got_frame = 0;
1856  if (h->next_output_pic && (
1857  h->next_output_pic->recovered)) {
1858  if (!h->next_output_pic->recovered)
1860 
1861  if (!h->avctx->hwaccel &&
1862  (h->next_output_pic->field_poc[0] == INT_MAX ||
1863  h->next_output_pic->field_poc[1] == INT_MAX)
1864  ) {
1865  int p;
1866  AVFrame *f = h->next_output_pic->f;
1867  int field = h->next_output_pic->field_poc[0] == INT_MAX;
1868  uint8_t *dst_data[4];
1869  int linesizes[4];
1870  const uint8_t *src_data[4];
1871 
1872  av_log(h->avctx, AV_LOG_DEBUG, "Duplicating field %d to fill missing\n", field);
1873 
1874  for (p = 0; p<4; p++) {
1875  dst_data[p] = f->data[p] + (field^1)*f->linesize[p];
1876  src_data[p] = f->data[p] + field *f->linesize[p];
1877  linesizes[p] = 2*f->linesize[p];
1878  }
1879 
1880  av_image_copy(dst_data, linesizes, src_data, linesizes,
1881  f->format, f->width, f->height>>1);
1882  }
1883 
1884  ret = output_frame(h, pict, h->next_output_pic);
1885  if (ret < 0)
1886  return ret;
1887  *got_frame = 1;
1888  if (CONFIG_MPEGVIDEO) {
1889  ff_print_debug_info2(h->avctx, pict, NULL,
1893  &h->low_delay,
1894  h->mb_width, h->mb_height, h->mb_stride, 1);
1895  }
1896  }
1897  }
1898 
1899  av_assert0(pict->buf[0] || !*got_frame);
1900 
1902 
1903  return get_consumed_bytes(buf_index, buf_size);
1904 }
1905 
1907 {
1908  int i;
1909 
1911 
1912  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
1913  ff_h264_unref_picture(h, &h->DPB[i]);
1914  av_frame_free(&h->DPB[i].f);
1915  }
1916  memset(h->delayed_pic, 0, sizeof(h->delayed_pic));
1917 
1918  h->cur_pic_ptr = NULL;
1919 
1920  for (i = 0; i < h->nb_slice_ctx; i++)
1921  av_freep(&h->slice_ctx[i].rbsp_buffer);
1922  av_freep(&h->slice_ctx);
1923  h->nb_slice_ctx = 0;
1924 
1925  for (i = 0; i < MAX_SPS_COUNT; i++)
1926  av_freep(h->sps_buffers + i);
1927 
1928  for (i = 0; i < MAX_PPS_COUNT; i++)
1929  av_freep(h->pps_buffers + i);
1930 }
1931 
1933 {
1934  H264Context *h = avctx->priv_data;
1935 
1938 
1940  av_frame_free(&h->cur_pic.f);
1943 
1944  return 0;
1945 }
1946 
1947 #define OFFSET(x) offsetof(H264Context, x)
1948 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
1949 static const AVOption h264_options[] = {
1950  {"is_avc", "is avc", offsetof(H264Context, is_avc), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, 0},
1951  {"nal_length_size", "nal_length_size", offsetof(H264Context, nal_length_size), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 4, 0},
1952  { "enable_er", "Enable error resilience on damaged frames (unsafe)", OFFSET(enable_er), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, VD },
1953  { NULL },
1954 };
1955 
1956 static const AVClass h264_class = {
1957  .class_name = "H264 Decoder",
1958  .item_name = av_default_item_name,
1959  .option = h264_options,
1960  .version = LIBAVUTIL_VERSION_INT,
1961 };
1962 
1963 static const AVProfile profiles[] = {
1964  { FF_PROFILE_H264_BASELINE, "Baseline" },
1965  { FF_PROFILE_H264_CONSTRAINED_BASELINE, "Constrained Baseline" },
1966  { FF_PROFILE_H264_MAIN, "Main" },
1967  { FF_PROFILE_H264_EXTENDED, "Extended" },
1968  { FF_PROFILE_H264_HIGH, "High" },
1969  { FF_PROFILE_H264_HIGH_10, "High 10" },
1970  { FF_PROFILE_H264_HIGH_10_INTRA, "High 10 Intra" },
1971  { FF_PROFILE_H264_HIGH_422, "High 4:2:2" },
1972  { FF_PROFILE_H264_HIGH_422_INTRA, "High 4:2:2 Intra" },
1973  { FF_PROFILE_H264_HIGH_444, "High 4:4:4" },
1974  { FF_PROFILE_H264_HIGH_444_PREDICTIVE, "High 4:4:4 Predictive" },
1975  { FF_PROFILE_H264_HIGH_444_INTRA, "High 4:4:4 Intra" },
1976  { FF_PROFILE_H264_CAVLC_444, "CAVLC 4:4:4" },
1977  { FF_PROFILE_UNKNOWN },
1978 };
1979 
1981  .name = "h264",
1982  .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
1983  .type = AVMEDIA_TYPE_VIDEO,
1984  .id = AV_CODEC_ID_H264,
1985  .priv_data_size = sizeof(H264Context),
1987  .close = h264_decode_end,
1989  .capabilities = /*AV_CODEC_CAP_DRAW_HORIZ_BAND |*/ AV_CODEC_CAP_DR1 |
1992  .flush = flush_dpb,
1994  .update_thread_context = ONLY_IF_THREADS_ENABLED(ff_h264_update_thread_context),
1995  .profiles = NULL_IF_CONFIG_SMALL(profiles),
1996  .priv_class = &h264_class,
1997 };
1998 
1999 #if CONFIG_H264_VDPAU_DECODER && FF_API_VDPAU
2000 static const AVClass h264_vdpau_class = {
2001  .class_name = "H264 VDPAU Decoder",
2002  .item_name = av_default_item_name,
2003  .option = h264_options,
2004  .version = LIBAVUTIL_VERSION_INT,
2005 };
2006 
2007 AVCodec ff_h264_vdpau_decoder = {
2008  .name = "h264_vdpau",
2009  .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (VDPAU acceleration)"),
2010  .type = AVMEDIA_TYPE_VIDEO,
2011  .id = AV_CODEC_ID_H264,
2012  .priv_data_size = sizeof(H264Context),
2014  .close = h264_decode_end,
2017  .flush = flush_dpb,
2018  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_VDPAU_H264,
2019  AV_PIX_FMT_NONE},
2020  .profiles = NULL_IF_CONFIG_SMALL(profiles),
2021  .priv_class = &h264_vdpau_class,
2022 };
2023 #endif
int chroma_format_idc
Definition: h264.h:178
struct H264Context * h264
Definition: h264.h:363
#define ff_tlog(ctx,...)
Definition: internal.h:54
void ff_h264_unref_picture(H264Context *h, H264Picture *pic)
Definition: h264_picture.c:47
#define FF_PROFILE_H264_HIGH_10
Definition: avcodec.h:3152
#define NULL
Definition: coverity.c:32
const struct AVCodec * codec
Definition: avcodec.h:1511
int ff_h264_check_intra_pred_mode(const H264Context *h, H264SliceContext *sl, int mode, int is_chroma)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264.c:184
void ff_h264_flush_change(H264Context *h)
Definition: h264.c:1084
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: avcodec.h:3640
int workaround_bugs
Definition: h264.h:554
float v
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size, int parse_extradata)
Definition: h264.c:1377
#define DC_128_PRED8x8
Definition: h264pred.h:76
GetBitContext gb
Definition: h264.h:524
int sei_recovery_frame_cnt
Definition: h264.h:344
Views are packed per line, as if interlaced.
Definition: stereo3d.h:97
#define AV_NUM_DATA_POINTERS
Definition: frame.h:172
enum AVPixelFormat backup_pix_fmt
Definition: h264.h:546
int ff_h264_execute_decode_slices(H264Context *h, unsigned context_count)
Call decode_slice() for each context.
Definition: h264_slice.c:2534
5: top field, bottom field, top field repeated, in that order
Definition: h264.h:152
int low_delay
Definition: h264.h:550
int mb_num
Definition: h264.h:621
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2129
mpeg2/4 4:2:0, h264 default for 4:2:0
Definition: pixfmt.h:561
This structure describes decoded (raw) audio or video data.
Definition: frame.h:171
#define FF_ALLOCZ_ARRAY_OR_GOTO(ctx, p, nelem, elsize, label)
Definition: internal.h:156
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
Definition: h264.h:486
AVOption.
Definition: opt.h:255
static const AVClass h264_class
Definition: h264.c:1956
int delta_poc[2]
Definition: h264.h:649
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
static void flush(AVCodecContext *avctx)
Views are alternated temporally.
Definition: stereo3d.h:66
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:183
int luma_weight[48][2][2]
Definition: h264.h:390
int quincunx_subsampling
Definition: h264.h:735
int edge_emu_buffer_allocated
Definition: h264.h:474
int a53_caption_size
Definition: h264.h:749
3: top field, bottom field, in that order
Definition: h264.h:150
#define FF_PROFILE_H264_HIGH_444
Definition: avcodec.h:3156
#define H264_MAX_PICTURE_COUNT
Definition: h264.h:46
int first_field
Definition: h264.h:591
misc image utilities
Views are next to each other, but when upscaling apply a checkerboard pattern.
Definition: stereo3d.h:87
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
#define LIBAVUTIL_VERSION_INT
Definition: version.h:62
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:441
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
uint16_t * cbp_table
Definition: h264.h:596
av_cold int ff_h264_decode_init(AVCodecContext *avctx)
Definition: h264.c:647
const uint8_t * ff_h264_decode_nal(H264Context *h, H264SliceContext *sl, const uint8_t *src, int *dst_length, int *consumed, int length)
Decode a network abstraction layer unit.
Definition: h264.c:226
7: frame doubling
Definition: h264.h:154
#define MAX_PPS_COUNT
Definition: h264.h:50
Sequence parameter set.
Definition: h264.h:174
int mb_y
Definition: h264.h:618
int bitstream_restriction_flag
Definition: h264.h:214
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264.h:243
#define FMO
Definition: h264.h:62
int num
numerator
Definition: rational.h:44
int repeat_pict
When decoding, this signals how much the picture must be delayed.
Definition: frame.h:362
int bipred_scratchpad_allocated
Definition: h264.h:473
static int get_last_needed_nal(H264Context *h, const uint8_t *buf, int buf_size)
Definition: h264.c:1313
int size
Definition: avcodec.h:1424
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output...
Definition: diracdec.c:74
AVBufferPool * mb_type_pool
Definition: h264.h:831
int crop
Definition: h264.h:346
void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height)
Definition: h264.c:98
int16_t(*[2] motion_val)[2]
Definition: h264.h:317
int flags
Definition: h264.h:553
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1722
int mb_height
Definition: h264.h:619
H264Picture * delayed_pic[MAX_DELAYED_PIC_COUNT+2]
Definition: h264.h:670
int is_avc
Used to parse AVC variant of h264.
Definition: h264.h:632
AVBufferPool * ref_index_pool
Definition: h264.h:833
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:126
void ff_h264_free_tables(H264Context *h)
Definition: h264.c:355
int ff_h264_get_profile(SPS *sps)
Compute profile from profile_idc and constraint_set?_flags.
Definition: h264.c:1218
int sei_reguserdata_afd_present
User data registered by Rec.
Definition: h264.h:747
void ff_h264_decode_init_vlc(void)
Definition: h264_cavlc.c:327
H264Context.
Definition: h264.h:517
AVFrame * f
Definition: h264.h:310
#define AV_CODEC_FLAG2_CHUNKS
Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries...
Definition: avcodec.h:822
int prev_poc_msb
poc_msb of the last reference pic for POC type 0
Definition: h264.h:651
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
Definition: bytestream.h:87
#define FF_PROFILE_H264_MAIN
Definition: avcodec.h:3149
4: bottom field, top field, in that order
Definition: h264.h:151
#define FRAME_RECOVERED_IDR
We have seen an IDR, so all the following frames in coded order are correctly decodable.
Definition: h264.h:797
AVCodec.
Definition: avcodec.h:3472
int picture_structure
Definition: h264.h:590
Definition: h264.h:117
int luma_weight_flag[2]
7.4.3.2 luma_weight_lX_flag
Definition: h264.h:387
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:80
int profile_idc
Definition: h264.h:176
unsigned current_sps_id
id of the current SPS
Definition: h264.h:575
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264.h:460
int avpriv_h264_has_num_reorder_frames(AVCodecContext *avctx)
Definition: h264.c:56
int ff_set_ref_count(H264Context *h, H264SliceContext *sl)
Definition: h264.c:1238
Definition: h264.h:118
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1631
uint8_t * chroma_pred_mode_table
Definition: h264.h:599
int setup_finished
Definition: h264.h:814
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:3226
#define AV_RN32A(p)
Definition: intreadwrite.h:526
BYTE int const BYTE * srcp
Definition: avisynth_c.h:676
struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:2922
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:882
static int decode_init_thread_copy(AVCodecContext *avctx)
Definition: h264.c:704
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
Definition: h264.h:119
void void avpriv_request_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
uint32_t(*[6] dequant4_coeff)[16]
Definition: h264.h:583
uint8_t * a53_caption
Definition: h264.h:750
uint8_t
#define av_cold
Definition: attributes.h:74
int prev_frame_num_offset
for POC type 2
Definition: h264.h:654
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:135
int offset_for_non_ref_pic
Definition: h264.h:184
mode
Definition: f_perms.c:27
AVOptions.
void ff_h264_reset_sei(H264Context *h)
Reset SEI values at the beginning of the frame.
Definition: h264_sei.c:37
Stereo 3D type: this structure describes how two videos are packed within a single video surface...
Definition: stereo3d.h:123
#define FF_PROFILE_H264_EXTENDED
Definition: avcodec.h:3150
static int h264_init_context(AVCodecContext *avctx, H264Context *h)
Definition: h264.c:589
int poc
frame POC
Definition: h264.h:329
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
AVCodec ff_h264_decoder
Definition: h264.c:1980
Multithreading support functions.
#define TOP_DC_PRED8x8
Definition: h264pred.h:75
static int find_start_code(const uint8_t *buf, int buf_size, int buf_index, int next_avc)
Definition: h264.h:1179
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:365
#define FF_PROFILE_UNKNOWN
Definition: avcodec.h:3116
#define FF_PROFILE_H264_CONSTRAINED
Definition: avcodec.h:3144
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1617
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:87
uint8_t(*[2] top_borders)[(16 *3)*2]
Definition: h264.h:472
#define FF_PROFILE_H264_HIGH_444_INTRA
Definition: avcodec.h:3158
int frame_recovered
Initial frame has been completely recovered.
Definition: h264.h:804
Structure to hold side data for an AVFrame.
Definition: frame.h:134
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:34
uint8_t * data
Definition: avcodec.h:1423
static int decode_rbsp_trailing(H264Context *h, const uint8_t *src)
Identify the exact end of the bitstream.
Definition: h264.c:340
#define AV_CODEC_CAP_HWACCEL_VDPAU
Codec can export data for HW decoding (VDPAU).
Definition: avcodec.h:893
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
AVDictionary * metadata
metadata.
Definition: frame.h:543
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:367
#define MAX_DELAYED_PIC_COUNT
Definition: h264.h:54
static void fill_rectangle(SDL_Surface *screen, int x, int y, int w, int h, int color, int update)
Definition: ffplay.c:785
ptrdiff_t size
Definition: opengl_enc.c:101
#define FF_PROFILE_H264_HIGH_422_INTRA
Definition: avcodec.h:3155
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
high precision timer, useful to profile code
int recovered
picture at IDR or recovery point + recovery count
Definition: h264.h:342
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:89
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:2244
#define av_log(a,...)
int sei_vflip
Definition: h264.h:742
unsigned int rbsp_buffer_size
Definition: h264.h:511
int last_pocs[MAX_DELAYED_PIC_COUNT]
Definition: h264.h:671
H.264 / AVC / MPEG4 part10 codec.
#define U(x)
Definition: vp56_arith.h:37
int frame_num
Definition: h264.h:650
void ff_h264_hl_decode_mb(const H264Context *h, H264SliceContext *sl)
Definition: h264_mb.c:818
H264Picture DPB[H264_MAX_PICTURE_COUNT]
Definition: h264.h:526
int width
width and height of the video frame
Definition: frame.h:220
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1812
int flags
Additional information about the frame packing.
Definition: stereo3d.h:132
static int get_ue_golomb(GetBitContext *gb)
read unsigned exp golomb code.
Definition: golomb.h:53
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
static int get_consumed_bytes(int pos, int buf_size)
Return the number of bytes consumed for building the current frame.
Definition: h264.c:1689
int16_t * dc_val_base
Definition: h264.h:468
int poc_type
pic_order_cnt_type
Definition: h264.h:181
int context_initialized
Definition: h264.h:552
int profile
Definition: mxfenc.c:1806
static const uint16_t mask[17]
Definition: lzw.c:38
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:2901
ERContext er
Definition: h264.h:365
int nal_unit_type
Definition: h264.h:627
av_default_item_name
Definition: h264.h:115
int num_reorder_frames
Definition: h264.h:215
int is_copy
Whether the parent AVCodecContext is a copy of the context which had init() called on it...
Definition: internal.h:100
#define AVERROR(e)
Definition: error.h:43
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:148
int backup_height
Definition: h264.h:545
#define ALZHEIMER_DC_L0T_PRED8x8
Definition: h264pred.h:79
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:175
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:3052
int8_t intra4x4_pred_mode_cache[5 *8]
Definition: h264.h:400
int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc)
Definition: h264.c:1133
const char * r
Definition: vf_curves.c:107
int backup_width
Backup frame properties: needed, because they can be different between returned frame and last decode...
Definition: h264.h:544
static void flush_dpb(AVCodecContext *avctx)
Definition: h264.c:1113
int capabilities
Codec capabilities.
Definition: avcodec.h:3491
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
static const AVOption h264_options[]
Definition: h264.c:1949
int ff_pred_weight_table(H264Context *h, H264SliceContext *sl)
Definition: h264.c:996
void(* decode_mb)(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
PPS pps
current pps
Definition: h264.h:577
#define FF_BUG_TRUNCATED
Definition: avcodec.h:2806
uint8_t(*[2] mvd_table)[2]
Definition: h264.h:600
int prev_interlaced_frame
Complement sei_pic_struct SEI_PIC_STRUCT_TOP_BOTTOM and SEI_PIC_STRUCT_BOTTOM_TOP indicate interlaced...
Definition: h264.h:727
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1597
#define FF_BUG_AUTODETECT
autodetection
Definition: avcodec.h:2787
ThreadFrame tf
Definition: h264.h:311
0: frame
Definition: h264.h:147
simple assert() macros that are a bit more flexible than ISO C assert().
#define PICT_TOP_FIELD
Definition: mpegutils.h:33
GLsizei GLsizei * length
Definition: opengl_enc.c:115
const char * name
Name of the codec implementation.
Definition: avcodec.h:3479
int direct_spatial_mv_pred
Definition: h264.h:444
void ff_init_cabac_states(void)
Definition: cabac.c:69
unsigned int top_samples_available
Definition: h264.h:417
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
Definition: h264.h:1023
int valid_recovery_point
Are the SEI recovery points looking valid.
Definition: h264.h:781
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:35
static const uint8_t offset[127][2]
Definition: vf_spp.c:92
uint8_t * list_counts
Array of list_count per MB specifying the slice type.
Definition: h264.h:593
#define FFMAX(a, b)
Definition: common.h:79
Libavcodec external API header.
#define fail()
Definition: checkasm.h:57
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:920
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
Definition: imgutils.c:288
int * mb_index2xy
int offset_for_top_to_bottom_field
Definition: h264.h:185
#define FIELD_OR_MBAFF_PICTURE(h)
Definition: h264.h:91
int slice_type_nos
S free slice type (SI/SP are remapped to I/P)
Definition: h264.h:369
int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
Decode a slice header.
Definition: h264_slice.c:1151
static const uint8_t scan8[16 *3+3]
Definition: h264.h:1007
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:214
int crop_left
Definition: h264.h:347
uint8_t * error_status_table
int use_weight
Definition: h264.h:383
uint8_t * direct_table
Definition: h264.h:601
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:2856
int nal_length_size
Number of bytes used for nal length (1, 2 or 4)
Definition: h264.h:633
useful rectangle filling function
uint8_t * data[3]
Definition: h264.h:352
void ff_vdpau_h264_picture_start(H264Context *h)
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:71
static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
Definition: h264.c:62
int sei_anticlockwise_rotation
Definition: h264.h:741
void(* draw_horiz_band)(struct AVCodecContext *s, const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], int y, int type, int height)
If non NULL, 'draw_horiz_band' is called by the libavcodec decoder to draw a horizontal band...
Definition: avcodec.h:1755
Definition: h264.h:114
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:242
int frame_num_offset
for POC type 2
Definition: h264.h:653
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:482
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:2890
FPA sei_fpa
Definition: h264.h:783
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
int x264_build
Definition: h264.h:616
uint32_t * mb2br_xy
Definition: h264.h:571
uint8_t * er_temp_buffer
#define OFFSET(x)
Definition: h264.c:1947
#define FFMIN(a, b)
Definition: common.h:81
uint16_t * slice_table
slice_table_base + 2*mb_stride + 1
Definition: h264.h:586
#define H264_MAX_THREADS
Definition: h264.h:47
float y
int poc_cycle_length
num_ref_frames_in_pic_order_cnt_cycle
Definition: h264.h:186
int reference
Definition: h264.h:341
int sei_frame_packing_present
frame_packing_arrangment SEI message
Definition: h264.h:732
int width
picture width / height.
Definition: avcodec.h:1681
int redundant_pic_count
Definition: h264.h:437
int nb_slice_ctx
Definition: h264.h:532
#define FF_PROFILE_H264_HIGH_10_INTRA
Definition: avcodec.h:3153
uint32_t * mb_type
Definition: h264.h:320
#define AV_FRAME_FLAG_CORRUPT
The frame data may be corrupted, e.g.
Definition: frame.h:474
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
SPS sps
current sps
Definition: h264.h:576
int32_t
PPS * pps_buffers[MAX_PPS_COUNT]
Definition: h264.h:639
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:114
int sei_hflip
Definition: h264.h:742
#define MAX_SPS_COUNT
Definition: h264.h:49
int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length)
Decode PPS.
Definition: h264_ps.c:589
Context Adaptive Binary Arithmetic Coder inline functions.
int mmco_reset
Definition: h264.h:680
H264SliceContext * slice_ctx
Definition: h264.h:531
int poc_lsb
Definition: h264.h:646
int reference
Definition: h264.h:355
static int h264_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: h264.c:1755
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:1640
int top_borders_allocated[2]
Definition: h264.h:475
uint8_t active_format_description
Definition: h264.h:748
int chroma_log2_weight_denom
Definition: h264.h:386
static int output_frame(H264Context *h, AVFrame *dst, H264Picture *srcp)
Definition: h264.c:1699
#define LEFT_DC_PRED8x8
Definition: h264pred.h:74
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure counterclockwise rotation by the specified angle...
Definition: display.c:50
#define PART_NOT_AVAILABLE
Definition: h264.h:562
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:3033
#define SLICE_FLAG_ALLOW_FIELD
allow draw_horiz_band() with field slices (MPEG2 field pics)
Definition: avcodec.h:2036
uint8_t * edge_emu_buffer
Definition: h264.h:471
int dequant_coeff_pps
reinit tables when pps changes
Definition: h264.h:641
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: avcodec.h:924
SPS * sps_buffers[MAX_SPS_COUNT]
Definition: h264.h:638
static const int8_t mv[256][2]
Definition: 4xm.c:77
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:232
short offset_for_ref_frame[256]
Definition: h264.h:213
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:127
int ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl)
Init context Allocate buffers which are not shared amongst multiple threads.
Definition: h264.c:459
int mb_stride
Definition: h264.h:620
AVCodecContext * avctx
Definition: h264.h:519
AVS_Value src
Definition: avisynth_c.h:482
H264 / AVC / MPEG4 part10 codec data table
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:3045
static int get_bit_length(H264Context *h, const uint8_t *buf, const uint8_t *ptr, int dst_length, int i, int next_avc)
Definition: h264.c:1294
1: top field
Definition: h264.h:148
enum AVCodecID codec_id
Definition: avcodec.h:1519
void ff_h264_remove_all_refs(H264Context *h)
Definition: h264_refs.c:507
int prev_frame_num
frame_num of the last pic for POC type 1/2
Definition: h264.h:655
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:199
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
Definition: avcodec.h:3437
int next_outputed_poc
Definition: h264.h:673
int ff_h264_decode_sei(H264Context *h)
Decode SEI.
Definition: h264_sei.c:419
int poc_msb
Definition: h264.h:647
int field_poc[2]
top/bottom POC
Definition: h264.h:328
#define AV_CODEC_FLAG2_FAST
Definition: avcodec.h:803
int debug
debug
Definition: avcodec.h:2842
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
#define FF_PROFILE_H264_HIGH_444_PREDICTIVE
Definition: avcodec.h:3157
int max_contexts
Max number of threads / contexts.
Definition: h264.h:700
int recovery_frame
recovery_frame is the frame_num at which the next frame should be fully constructed.
Definition: h264.h:791
main external API structure.
Definition: avcodec.h:1502
void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip)
Flip the input matrix horizontally and/or vertically.
Definition: display.c:65
static void decode_postinit(H264Context *h, int setup_finished)
Run setup operations that must be run after slice header decoding.
Definition: h264.c:731
int ff_h264_alloc_tables(H264Context *h)
Allocate tables.
Definition: h264.c:398
2: bottom field
Definition: h264.h:149
uint8_t * data
Definition: frame.h:136
int ff_h264_check_intra4x4_pred_mode(const H264Context *h, H264SliceContext *sl)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264.c:137
void * buf
Definition: avisynth_c.h:553
int frame_packing_arrangement_type
Definition: h264.h:733
void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table, uint32_t *mbtype_table, int8_t *qscale_table, int16_t(*motion_val[2])[2], int *low_delay, int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.
Definition: mpegvideo.c:1570
int8_t * qscale_table
Definition: h264.h:314
int extradata_size
Definition: avcodec.h:1618
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:69
H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstre...
Definition: pixfmt.h:105
int constraint_set_flags
constraint_set[0-3]_flag
Definition: h264.h:230
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:304
SEI_PicStructType sei_pic_struct
pic_struct in picture timing SEI message
Definition: h264.h:719
BYTE int const BYTE int int int height
Definition: avisynth_c.h:676
int ff_h264_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: h264_slice.c:433
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:3044
int slice_flags
slice flags
Definition: avcodec.h:2034
static int get_avc_nalsize(H264Context *h, const uint8_t *buf, int buf_size, int *buf_index)
Definition: h264.h:1189
Describe the class of an AVClass context structure.
Definition: log.h:67
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:589
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
Definition: h264.h:481
static av_cold int h264_decode_end(AVCodecContext *avctx)
Definition: h264.c:1932
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:250
Definition: h264.h:120
int8_t * ref_index[2]
Definition: h264.h:326
int use_weight_chroma
Definition: h264.h:384
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:410
int pixel_shift
0 for 8-bit H264, 1 for high-bit-depth H264
Definition: h264.h:534
int mmco_reset
MMCO_RESET set this 1.
Definition: h264.h:331
H264Picture * cur_pic_ptr
Definition: h264.h:527
int mb_mbaff
mb_aff_frame && mb_field_decoding_flag
Definition: h264.h:435
int enable_er
Definition: h264.h:828
int frame_packing_arrangement_cancel_flag
is previous arrangement canceled, -1 if never received
Definition: h264.h:264
#define FF_PROFILE_H264_CAVLC_444
Definition: avcodec.h:3159
int allocate_progress
Whether to allocate progress for frame threading.
Definition: internal.h:115
int log2_max_poc_lsb
log2_max_pic_order_cnt_lsb_minus4
Definition: h264.h:182
6: bottom field, top field, bottom field repeated, in that order
Definition: h264.h:153
#define FF_PROFILE_H264_INTRA
Definition: avcodec.h:3145
static int is_extra(const uint8_t *buf, int buf_size)
Definition: h264.c:1733
AVCodecContext * avctx
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:209
static const uint8_t start_code[]
Definition: h264.c:1292
Views are on top of each other.
Definition: stereo3d.h:55
int pic_struct_present_flag
Definition: h264.h:221
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:32
unsigned int list_count
Definition: h264.h:461
av_cold void ff_h264_free_context(H264Context *h)
Free any data that may have been allocated in the H264 context like SPS, PPS etc. ...
Definition: h264.c:1906
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:182
int has_recovery_point
Definition: h264.h:806
Views are next to each other.
Definition: stereo3d.h:45
#define MAX_MBPAIR_SIZE
Definition: h264.h:56
static int decode(AVCodecContext *avctx, void *data, int *got_sub, AVPacket *avpkt)
Definition: ccaption_dec.c:523
static void idr(H264Context *h)
instantaneous decoder refresh.
Definition: h264.c:1071
int ff_h264_decode_seq_parameter_set(H264Context *h, int ignore_truncation)
Decode SPS.
Definition: h264_ps.c:303
discard all non reference
Definition: avcodec.h:685
AVBufferPool * qscale_table_pool
Definition: h264.h:830
H264Picture * next_output_pic
Definition: h264.h:672
int slice_context_count
Definition: h264.h:702
AVBufferPool * motion_val_pool
Definition: h264.h:832
#define FF_PROFILE_H264_HIGH
Definition: avcodec.h:3151
#define SLICE_SINGLETHREAD
Definition: h264.h:1222
common internal api header.
if(ret< 0)
Definition: vf_mcdeint.c:280
int ff_h264_decode_extradata(H264Context *h, const uint8_t *buf, int size)
Definition: h264.c:527
#define FRAME_RECOVERED_SEI
Sufficient number of frames have been decoded since a SEI recovery point, so all the following frames...
Definition: h264.h:802
#define FF_ALLOC_OR_GOTO(ctx, p, size, label)
Definition: internal.h:129
uint16_t * slice_table_base
Definition: h264.h:643
int log2_max_frame_num
log2_max_frame_num_minus4 + 4
Definition: h264.h:180
int missing_fields
Definition: h264.h:808
int16_t * dc_val[3]
H.264 / AVC / MPEG4 part10 motion vector predicion.
Bi-dir predicted.
Definition: avutil.h:268
const char * ff_h264_sei_stereo_mode(H264Context *h)
Get stereo_mode string from the h264 frame_packing_arrangement.
Definition: h264_sei.c:489
AVProfile.
Definition: avcodec.h:3460
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
Definition: avcodec.h:2786
int cur_chroma_format_idc
Definition: h264.h:820
int8_t * intra4x4_pred_mode
Definition: h264.h:401
unsigned properties
Definition: avcodec.h:3435
int den
denominator
Definition: rational.h:45
uint8_t * rbsp_buffer
Definition: h264.h:510
int sei_ct_type
Bit set of clock types for fields/frames in picture timing SEI message.
Definition: h264.h:757
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:636
#define FF_PROFILE_H264_CONSTRAINED_BASELINE
Definition: avcodec.h:3148
void * priv_data
Definition: avcodec.h:1544
int ff_h264_field_end(H264Context *h, H264SliceContext *sl, int in_setup)
Definition: h264_picture.c:154
#define PICT_FRAME
Definition: mpegutils.h:35
int prev_poc_lsb
poc_lsb of the last reference pic for POC type 0
Definition: h264.h:652
int8_t ref_cache[2][5 *8]
Definition: h264.h:487
#define AV_CODEC_FLAG_OUTPUT_CORRUPT
Output even those frames that might be corrupted.
Definition: avcodec.h:743
Definition: h264.h:113
#define SLICE_SKIPED
Definition: h264.h:1223
#define VD
Definition: h264.c:1948
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:372
const uint16_t ff_h264_mb_sizes[4]
Definition: h264.c:54
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1552
#define FF_PROFILE_H264_BASELINE
Definition: avcodec.h:3147
int luma_log2_weight_denom
Definition: h264.h:385
int chroma_weight[48][2][2][2]
Definition: h264.h:391
Views are packed in a checkerboard-like structure per pixel.
Definition: stereo3d.h:76
#define FF_COMPLIANCE_STRICT
Strictly conform to all the things in the spec no matter what consequences.
Definition: avcodec.h:2822
H264Picture cur_pic
Definition: h264.h:528
int sei_display_orientation_present
display orientation SEI message
Definition: h264.h:740
int content_interpretation_type
Definition: h264.h:734
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:237
Views are packed per column.
Definition: stereo3d.h:107
int mb_width
Definition: h264.h:619
enum AVPictureType pict_type
Definition: h264.h:710
int current_slice
current slice number, used to initialize slice_num of each thread/context
Definition: h264.h:692
#define FF_PROFILE_H264_HIGH_422
Definition: avcodec.h:3154
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:1604
uint32_t * mb2b_xy
Definition: h264.h:570
H264Ref ref_list[2][48]
0..15: frame refs, 16..47: mbaff field refs.
Definition: h264.h:462
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> out
int delta_poc_bottom
Definition: h264.h:648
H264Picture last_pic_for_ec
Definition: h264.h:529
int au_pps_id
pps_id of current access unit
Definition: h264.h:579
static void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.h:228
int height
Definition: frame.h:220
int crop_top
Definition: h264.h:348
#define AV_CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
Definition: avcodec.h:831
unsigned int left_samples_available
Definition: h264.h:419
uint8_t(*[2] mvd_table)[2]
Definition: h264.h:500
#define av_freep(p)
static int init_thread_copy(AVCodecContext *avctx)
Definition: alac.c:646
uint8_t * av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:324
void ff_vdpau_add_data_chunk(uint8_t *data, const uint8_t *buf, int buf_size)
int8_t * intra4x4_pred_mode
Definition: h264.h:556
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:3654
8: frame tripling
Definition: h264.h:155
#define AV_RN64A(p)
Definition: intreadwrite.h:530
int mb_field_decoding_flag
Definition: h264.h:434
uint8_t(* non_zero_count)[48]
Definition: h264.h:559
exp golomb vlc stuff
uint8_t * bipred_scratchpad
Definition: h264.h:470
AVPixelFormat
Pixel format.
Definition: pixfmt.h:61
This structure stores compressed data.
Definition: avcodec.h:1400
int sei_recovery_frame_cnt
recovery_frame_cnt from SEI message
Definition: h264.h:776
int droppable
Definition: h264.h:548
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:857
int strict_std_compliance
strictly follow the standard (MPEG4, ...).
Definition: avcodec.h:2820
#define STARTCODE_TEST
int nal_ref_idc
Definition: h264.h:626
GetBitContext gb
Definition: h264.h:364
for(j=16;j >0;--j)
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
Definition: internal.h:138
int b_stride
Definition: h264.h:572
Context Adaptive Binary Arithmetic Coder.
int chroma_weight_flag[2]
7.4.3.2 chroma_weight_lX_flag
Definition: h264.h:388
static const AVProfile profiles[]
Definition: h264.c:1963
void ff_h264_init_dequant_tables(H264Context *h)
Definition: h264_slice.c:367