FFmpeg
h264_slice.c
Go to the documentation of this file.
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... decoder
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * H.264 / AVC / MPEG-4 part10 codec.
25  * @author Michael Niedermayer <michaelni@gmx.at>
26  */
27 
28 #include "libavutil/avassert.h"
29 #include "libavutil/display.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/stereo3d.h"
32 #include "internal.h"
33 #include "cabac.h"
34 #include "cabac_functions.h"
35 #include "error_resilience.h"
36 #include "avcodec.h"
37 #include "h264.h"
38 #include "h264dec.h"
39 #include "h264data.h"
40 #include "h264chroma.h"
41 #include "h264_mvpred.h"
42 #include "h264_ps.h"
43 #include "golomb.h"
44 #include "mathops.h"
45 #include "mpegutils.h"
46 #include "mpegvideo.h"
47 #include "rectangle.h"
48 #include "thread.h"
49 
50 static const uint8_t field_scan[16+1] = {
51  0 + 0 * 4, 0 + 1 * 4, 1 + 0 * 4, 0 + 2 * 4,
52  0 + 3 * 4, 1 + 1 * 4, 1 + 2 * 4, 1 + 3 * 4,
53  2 + 0 * 4, 2 + 1 * 4, 2 + 2 * 4, 2 + 3 * 4,
54  3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4, 3 + 3 * 4,
55 };
56 
57 static const uint8_t field_scan8x8[64+1] = {
58  0 + 0 * 8, 0 + 1 * 8, 0 + 2 * 8, 1 + 0 * 8,
59  1 + 1 * 8, 0 + 3 * 8, 0 + 4 * 8, 1 + 2 * 8,
60  2 + 0 * 8, 1 + 3 * 8, 0 + 5 * 8, 0 + 6 * 8,
61  0 + 7 * 8, 1 + 4 * 8, 2 + 1 * 8, 3 + 0 * 8,
62  2 + 2 * 8, 1 + 5 * 8, 1 + 6 * 8, 1 + 7 * 8,
63  2 + 3 * 8, 3 + 1 * 8, 4 + 0 * 8, 3 + 2 * 8,
64  2 + 4 * 8, 2 + 5 * 8, 2 + 6 * 8, 2 + 7 * 8,
65  3 + 3 * 8, 4 + 1 * 8, 5 + 0 * 8, 4 + 2 * 8,
66  3 + 4 * 8, 3 + 5 * 8, 3 + 6 * 8, 3 + 7 * 8,
67  4 + 3 * 8, 5 + 1 * 8, 6 + 0 * 8, 5 + 2 * 8,
68  4 + 4 * 8, 4 + 5 * 8, 4 + 6 * 8, 4 + 7 * 8,
69  5 + 3 * 8, 6 + 1 * 8, 6 + 2 * 8, 5 + 4 * 8,
70  5 + 5 * 8, 5 + 6 * 8, 5 + 7 * 8, 6 + 3 * 8,
71  7 + 0 * 8, 7 + 1 * 8, 6 + 4 * 8, 6 + 5 * 8,
72  6 + 6 * 8, 6 + 7 * 8, 7 + 2 * 8, 7 + 3 * 8,
73  7 + 4 * 8, 7 + 5 * 8, 7 + 6 * 8, 7 + 7 * 8,
74 };
75 
76 static const uint8_t field_scan8x8_cavlc[64+1] = {
77  0 + 0 * 8, 1 + 1 * 8, 2 + 0 * 8, 0 + 7 * 8,
78  2 + 2 * 8, 2 + 3 * 8, 2 + 4 * 8, 3 + 3 * 8,
79  3 + 4 * 8, 4 + 3 * 8, 4 + 4 * 8, 5 + 3 * 8,
80  5 + 5 * 8, 7 + 0 * 8, 6 + 6 * 8, 7 + 4 * 8,
81  0 + 1 * 8, 0 + 3 * 8, 1 + 3 * 8, 1 + 4 * 8,
82  1 + 5 * 8, 3 + 1 * 8, 2 + 5 * 8, 4 + 1 * 8,
83  3 + 5 * 8, 5 + 1 * 8, 4 + 5 * 8, 6 + 1 * 8,
84  5 + 6 * 8, 7 + 1 * 8, 6 + 7 * 8, 7 + 5 * 8,
85  0 + 2 * 8, 0 + 4 * 8, 0 + 5 * 8, 2 + 1 * 8,
86  1 + 6 * 8, 4 + 0 * 8, 2 + 6 * 8, 5 + 0 * 8,
87  3 + 6 * 8, 6 + 0 * 8, 4 + 6 * 8, 6 + 2 * 8,
88  5 + 7 * 8, 6 + 4 * 8, 7 + 2 * 8, 7 + 6 * 8,
89  1 + 0 * 8, 1 + 2 * 8, 0 + 6 * 8, 3 + 0 * 8,
90  1 + 7 * 8, 3 + 2 * 8, 2 + 7 * 8, 4 + 2 * 8,
91  3 + 7 * 8, 5 + 2 * 8, 4 + 7 * 8, 5 + 4 * 8,
92  6 + 3 * 8, 6 + 5 * 8, 7 + 3 * 8, 7 + 7 * 8,
93 };
94 
95 // zigzag_scan8x8_cavlc[i] = zigzag_scan8x8[(i/4) + 16*(i%4)]
96 static const uint8_t zigzag_scan8x8_cavlc[64+1] = {
97  0 + 0 * 8, 1 + 1 * 8, 1 + 2 * 8, 2 + 2 * 8,
98  4 + 1 * 8, 0 + 5 * 8, 3 + 3 * 8, 7 + 0 * 8,
99  3 + 4 * 8, 1 + 7 * 8, 5 + 3 * 8, 6 + 3 * 8,
100  2 + 7 * 8, 6 + 4 * 8, 5 + 6 * 8, 7 + 5 * 8,
101  1 + 0 * 8, 2 + 0 * 8, 0 + 3 * 8, 3 + 1 * 8,
102  3 + 2 * 8, 0 + 6 * 8, 4 + 2 * 8, 6 + 1 * 8,
103  2 + 5 * 8, 2 + 6 * 8, 6 + 2 * 8, 5 + 4 * 8,
104  3 + 7 * 8, 7 + 3 * 8, 4 + 7 * 8, 7 + 6 * 8,
105  0 + 1 * 8, 3 + 0 * 8, 0 + 4 * 8, 4 + 0 * 8,
106  2 + 3 * 8, 1 + 5 * 8, 5 + 1 * 8, 5 + 2 * 8,
107  1 + 6 * 8, 3 + 5 * 8, 7 + 1 * 8, 4 + 5 * 8,
108  4 + 6 * 8, 7 + 4 * 8, 5 + 7 * 8, 6 + 7 * 8,
109  0 + 2 * 8, 2 + 1 * 8, 1 + 3 * 8, 5 + 0 * 8,
110  1 + 4 * 8, 2 + 4 * 8, 6 + 0 * 8, 4 + 3 * 8,
111  0 + 7 * 8, 4 + 4 * 8, 7 + 2 * 8, 3 + 6 * 8,
112  5 + 5 * 8, 6 + 5 * 8, 6 + 6 * 8, 7 + 7 * 8,
113 };
114 
115 static void release_unused_pictures(H264Context *h, int remove_current)
116 {
117  int i;
118 
119  /* release non reference frames */
120  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
121  if (h->DPB[i].f->buf[0] && !h->DPB[i].reference &&
122  (remove_current || &h->DPB[i] != h->cur_pic_ptr)) {
123  ff_h264_unref_picture(h, &h->DPB[i]);
124  }
125  }
126 }
127 
128 static int alloc_scratch_buffers(H264SliceContext *sl, int linesize)
129 {
130  const H264Context *h = sl->h264;
131  int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
132 
133  av_fast_malloc(&sl->bipred_scratchpad, &sl->bipred_scratchpad_allocated, 16 * 6 * alloc_size);
134  // edge emu needs blocksize + filter length - 1
135  // (= 21x21 for H.264)
136  av_fast_malloc(&sl->edge_emu_buffer, &sl->edge_emu_buffer_allocated, alloc_size * 2 * 21);
137 
139  h->mb_width * 16 * 3 * sizeof(uint8_t) * 2);
141  h->mb_width * 16 * 3 * sizeof(uint8_t) * 2);
142 
143  if (!sl->bipred_scratchpad || !sl->edge_emu_buffer ||
144  !sl->top_borders[0] || !sl->top_borders[1]) {
147  av_freep(&sl->top_borders[0]);
148  av_freep(&sl->top_borders[1]);
149 
152  sl->top_borders_allocated[0] = 0;
153  sl->top_borders_allocated[1] = 0;
154  return AVERROR(ENOMEM);
155  }
156 
157  return 0;
158 }
159 
161 {
162  const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
163  const int mb_array_size = h->mb_stride * h->mb_height;
164  const int b4_stride = h->mb_width * 4 + 1;
165  const int b4_array_size = b4_stride * h->mb_height * 4;
166 
167  h->qscale_table_pool = av_buffer_pool_init(big_mb_num + h->mb_stride,
169  h->mb_type_pool = av_buffer_pool_init((big_mb_num + h->mb_stride) *
170  sizeof(uint32_t), av_buffer_allocz);
171  h->motion_val_pool = av_buffer_pool_init(2 * (b4_array_size + 4) *
172  sizeof(int16_t), av_buffer_allocz);
173  h->ref_index_pool = av_buffer_pool_init(4 * mb_array_size, av_buffer_allocz);
174 
175  if (!h->qscale_table_pool || !h->mb_type_pool || !h->motion_val_pool ||
176  !h->ref_index_pool) {
181  return AVERROR(ENOMEM);
182  }
183 
184  return 0;
185 }
186 
188 {
189  int i, ret = 0;
190 
191  av_assert0(!pic->f->data[0]);
192 
193  pic->tf.f = pic->f;
194  ret = ff_thread_get_buffer(h->avctx, &pic->tf, pic->reference ?
196  if (ret < 0)
197  goto fail;
198 
199  if (h->avctx->hwaccel) {
200  const AVHWAccel *hwaccel = h->avctx->hwaccel;
202  if (hwaccel->frame_priv_data_size) {
204  if (!pic->hwaccel_priv_buf)
205  return AVERROR(ENOMEM);
207  }
208  }
209  if (CONFIG_GRAY && !h->avctx->hwaccel && h->flags & AV_CODEC_FLAG_GRAY && pic->f->data[2]) {
210  int h_chroma_shift, v_chroma_shift;
212  &h_chroma_shift, &v_chroma_shift);
213 
214  for(i=0; i<AV_CEIL_RSHIFT(pic->f->height, v_chroma_shift); i++) {
215  memset(pic->f->data[1] + pic->f->linesize[1]*i,
216  0x80, AV_CEIL_RSHIFT(pic->f->width, h_chroma_shift));
217  memset(pic->f->data[2] + pic->f->linesize[2]*i,
218  0x80, AV_CEIL_RSHIFT(pic->f->width, h_chroma_shift));
219  }
220  }
221 
222  if (!h->qscale_table_pool) {
223  ret = init_table_pools(h);
224  if (ret < 0)
225  goto fail;
226  }
227 
230  if (!pic->qscale_table_buf || !pic->mb_type_buf)
231  goto fail;
232 
233  pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
234  pic->qscale_table = pic->qscale_table_buf->data + 2 * h->mb_stride + 1;
235 
236  for (i = 0; i < 2; i++) {
239  if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
240  goto fail;
241 
242  pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
243  pic->ref_index[i] = pic->ref_index_buf[i]->data;
244  }
245 
246  pic->pps_buf = av_buffer_ref(h->ps.pps_ref);
247  if (!pic->pps_buf)
248  goto fail;
249  pic->pps = (const PPS*)pic->pps_buf->data;
250 
251  pic->mb_width = h->mb_width;
252  pic->mb_height = h->mb_height;
253  pic->mb_stride = h->mb_stride;
254 
255  return 0;
256 fail:
257  ff_h264_unref_picture(h, pic);
258  return (ret < 0) ? ret : AVERROR(ENOMEM);
259 }
260 
262 {
263  int i;
264 
265  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
266  if (!h->DPB[i].f->buf[0])
267  return i;
268  }
269  return AVERROR_INVALIDDATA;
270 }
271 
272 
273 #define IN_RANGE(a, b, size) (((void*)(a) >= (void*)(b)) && ((void*)(a) < (void*)((b) + (size))))
274 
275 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
276  (((pic) && (pic) >= (old_ctx)->DPB && \
277  (pic) < (old_ctx)->DPB + H264_MAX_PICTURE_COUNT) ? \
278  &(new_ctx)->DPB[(pic) - (old_ctx)->DPB] : NULL)
279 
281  H264Context *new_base,
282  H264Context *old_base)
283 {
284  int i;
285 
286  for (i = 0; i < count; i++) {
287  av_assert1(!from[i] ||
288  IN_RANGE(from[i], old_base, 1) ||
289  IN_RANGE(from[i], old_base->DPB, H264_MAX_PICTURE_COUNT));
290  to[i] = REBASE_PICTURE(from[i], new_base, old_base);
291  }
292 }
293 
295 
297  const AVCodecContext *src)
298 {
299  H264Context *h = dst->priv_data, *h1 = src->priv_data;
300  int inited = h->context_initialized, err = 0;
301  int need_reinit = 0;
302  int i, ret;
303 
304  if (dst == src)
305  return 0;
306 
307  // We can't fail if SPS isn't set at it breaks current skip_frame code
308  //if (!h1->ps.sps)
309  // return AVERROR_INVALIDDATA;
310 
311  if (inited &&
312  (h->width != h1->width ||
313  h->height != h1->height ||
314  h->mb_width != h1->mb_width ||
315  h->mb_height != h1->mb_height ||
316  !h->ps.sps ||
317  h->ps.sps->bit_depth_luma != h1->ps.sps->bit_depth_luma ||
318  h->ps.sps->chroma_format_idc != h1->ps.sps->chroma_format_idc ||
319  h->ps.sps->colorspace != h1->ps.sps->colorspace)) {
320  need_reinit = 1;
321  }
322 
323  /* copy block_offset since frame_start may not be called */
324  memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset));
325 
326  // SPS/PPS
327  for (i = 0; i < FF_ARRAY_ELEMS(h->ps.sps_list); i++) {
328  av_buffer_unref(&h->ps.sps_list[i]);
329  if (h1->ps.sps_list[i]) {
330  h->ps.sps_list[i] = av_buffer_ref(h1->ps.sps_list[i]);
331  if (!h->ps.sps_list[i])
332  return AVERROR(ENOMEM);
333  }
334  }
335  for (i = 0; i < FF_ARRAY_ELEMS(h->ps.pps_list); i++) {
336  av_buffer_unref(&h->ps.pps_list[i]);
337  if (h1->ps.pps_list[i]) {
338  h->ps.pps_list[i] = av_buffer_ref(h1->ps.pps_list[i]);
339  if (!h->ps.pps_list[i])
340  return AVERROR(ENOMEM);
341  }
342  }
343 
345  h->ps.pps = NULL;
346  h->ps.sps = NULL;
347  if (h1->ps.pps_ref) {
348  h->ps.pps_ref = av_buffer_ref(h1->ps.pps_ref);
349  if (!h->ps.pps_ref)
350  return AVERROR(ENOMEM);
351  h->ps.pps = (const PPS*)h->ps.pps_ref->data;
352  h->ps.sps = h->ps.pps->sps;
353  }
354 
355  if (need_reinit || !inited) {
356  h->width = h1->width;
357  h->height = h1->height;
358  h->mb_height = h1->mb_height;
359  h->mb_width = h1->mb_width;
360  h->mb_num = h1->mb_num;
361  h->mb_stride = h1->mb_stride;
362  h->b_stride = h1->b_stride;
363  h->x264_build = h1->x264_build;
364 
365  if (h->context_initialized || h1->context_initialized) {
366  if ((err = h264_slice_header_init(h)) < 0) {
367  av_log(h->avctx, AV_LOG_ERROR, "h264_slice_header_init() failed");
368  return err;
369  }
370  }
371 
372  /* copy block_offset since frame_start may not be called */
373  memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset));
374  }
375 
376  h->avctx->coded_height = h1->avctx->coded_height;
377  h->avctx->coded_width = h1->avctx->coded_width;
378  h->avctx->width = h1->avctx->width;
379  h->avctx->height = h1->avctx->height;
380  h->width_from_caller = h1->width_from_caller;
381  h->height_from_caller = h1->height_from_caller;
382  h->coded_picture_number = h1->coded_picture_number;
383  h->first_field = h1->first_field;
384  h->picture_structure = h1->picture_structure;
385  h->mb_aff_frame = h1->mb_aff_frame;
386  h->droppable = h1->droppable;
387 
388  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
389  ff_h264_unref_picture(h, &h->DPB[i]);
390  if (h1->DPB[i].f->buf[0] &&
391  (ret = ff_h264_ref_picture(h, &h->DPB[i], &h1->DPB[i])) < 0)
392  return ret;
393  }
394 
395  h->cur_pic_ptr = REBASE_PICTURE(h1->cur_pic_ptr, h, h1);
397  if (h1->cur_pic.f->buf[0]) {
398  ret = ff_h264_ref_picture(h, &h->cur_pic, &h1->cur_pic);
399  if (ret < 0)
400  return ret;
401  }
402 
403  h->enable_er = h1->enable_er;
404  h->workaround_bugs = h1->workaround_bugs;
405  h->droppable = h1->droppable;
406 
407  // extradata/NAL handling
408  h->is_avc = h1->is_avc;
409  h->nal_length_size = h1->nal_length_size;
410 
411  memcpy(&h->poc, &h1->poc, sizeof(h->poc));
412 
413  memcpy(h->short_ref, h1->short_ref, sizeof(h->short_ref));
414  memcpy(h->long_ref, h1->long_ref, sizeof(h->long_ref));
415  memcpy(h->delayed_pic, h1->delayed_pic, sizeof(h->delayed_pic));
416  memcpy(h->last_pocs, h1->last_pocs, sizeof(h->last_pocs));
417 
418  h->next_output_pic = h1->next_output_pic;
419  h->next_outputed_poc = h1->next_outputed_poc;
420 
421  memcpy(h->mmco, h1->mmco, sizeof(h->mmco));
422  h->nb_mmco = h1->nb_mmco;
423  h->mmco_reset = h1->mmco_reset;
424  h->explicit_ref_marking = h1->explicit_ref_marking;
425  h->long_ref_count = h1->long_ref_count;
426  h->short_ref_count = h1->short_ref_count;
427 
428  copy_picture_range(h->short_ref, h1->short_ref, 32, h, h1);
429  copy_picture_range(h->long_ref, h1->long_ref, 32, h, h1);
430  copy_picture_range(h->delayed_pic, h1->delayed_pic,
431  MAX_DELAYED_PIC_COUNT + 2, h, h1);
432 
433  h->frame_recovered = h1->frame_recovered;
434 
436  if (h1->sei.a53_caption.buf_ref) {
437  h->sei.a53_caption.buf_ref = av_buffer_ref(h1->sei.a53_caption.buf_ref);
438  if (!h->sei.a53_caption.buf_ref)
439  return AVERROR(ENOMEM);
440  }
441 
442  for (i = 0; i < h->sei.unregistered.nb_buf_ref; i++)
444  h->sei.unregistered.nb_buf_ref = 0;
445 
446  if (h1->sei.unregistered.nb_buf_ref) {
448  h1->sei.unregistered.nb_buf_ref,
449  sizeof(*h->sei.unregistered.buf_ref));
450  if (ret < 0)
451  return ret;
452 
453  for (i = 0; i < h1->sei.unregistered.nb_buf_ref; i++) {
454  h->sei.unregistered.buf_ref[i] = av_buffer_ref(h1->sei.unregistered.buf_ref[i]);
455  if (!h->sei.unregistered.buf_ref[i])
456  return AVERROR(ENOMEM);
458  }
459  }
460  h->sei.unregistered.x264_build = h1->sei.unregistered.x264_build;
461 
462  if (!h->cur_pic_ptr)
463  return 0;
464 
465  if (!h->droppable) {
467  h->poc.prev_poc_msb = h->poc.poc_msb;
468  h->poc.prev_poc_lsb = h->poc.poc_lsb;
469  }
472 
473  h->recovery_frame = h1->recovery_frame;
474 
475  return err;
476 }
477 
479 {
480  H264Picture *pic;
481  int i, ret;
482  const int pixel_shift = h->pixel_shift;
483 
484  if (!ff_thread_can_start_frame(h->avctx)) {
485  av_log(h->avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
486  return -1;
487  }
488 
490  h->cur_pic_ptr = NULL;
491 
492  i = find_unused_picture(h);
493  if (i < 0) {
494  av_log(h->avctx, AV_LOG_ERROR, "no frame buffer available\n");
495  return i;
496  }
497  pic = &h->DPB[i];
498 
499  pic->reference = h->droppable ? 0 : h->picture_structure;
502  pic->frame_num = h->poc.frame_num;
503  /*
504  * Zero key_frame here; IDR markings per slice in frame or fields are ORed
505  * in later.
506  * See decode_nal_units().
507  */
508  pic->f->key_frame = 0;
509  pic->mmco_reset = 0;
510  pic->recovered = 0;
511  pic->invalid_gap = 0;
513 
514  pic->f->pict_type = h->slice_ctx[0].slice_type;
515 
516  pic->f->crop_left = h->crop_left;
517  pic->f->crop_right = h->crop_right;
518  pic->f->crop_top = h->crop_top;
519  pic->f->crop_bottom = h->crop_bottom;
520 
521  if ((ret = alloc_picture(h, pic)) < 0)
522  return ret;
523 
524  h->cur_pic_ptr = pic;
526  if (CONFIG_ERROR_RESILIENCE) {
528  }
529 
530  if ((ret = ff_h264_ref_picture(h, &h->cur_pic, h->cur_pic_ptr)) < 0)
531  return ret;
532 
533  for (i = 0; i < h->nb_slice_ctx; i++) {
534  h->slice_ctx[i].linesize = h->cur_pic_ptr->f->linesize[0];
535  h->slice_ctx[i].uvlinesize = h->cur_pic_ptr->f->linesize[1];
536  }
537 
538  if (CONFIG_ERROR_RESILIENCE && h->enable_er) {
542  }
543 
544  for (i = 0; i < 16; i++) {
545  h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
546  h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
547  }
548  for (i = 0; i < 16; i++) {
549  h->block_offset[16 + i] =
550  h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
551  h->block_offset[48 + 16 + i] =
552  h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
553  }
554 
555  /* We mark the current picture as non-reference after allocating it, so
556  * that if we break out due to an error it can be released automatically
557  * in the next ff_mpv_frame_start().
558  */
559  h->cur_pic_ptr->reference = 0;
560 
561  h->cur_pic_ptr->field_poc[0] = h->cur_pic_ptr->field_poc[1] = INT_MAX;
562 
563  h->next_output_pic = NULL;
564 
565  h->postpone_filter = 0;
566 
568 
569  if (h->sei.unregistered.x264_build >= 0)
571 
572  assert(h->cur_pic_ptr->long_ref == 0);
573 
574  return 0;
575 }
576 
578  uint8_t *src_y,
579  uint8_t *src_cb, uint8_t *src_cr,
580  int linesize, int uvlinesize,
581  int simple)
582 {
583  uint8_t *top_border;
584  int top_idx = 1;
585  const int pixel_shift = h->pixel_shift;
586  int chroma444 = CHROMA444(h);
587  int chroma422 = CHROMA422(h);
588 
589  src_y -= linesize;
590  src_cb -= uvlinesize;
591  src_cr -= uvlinesize;
592 
593  if (!simple && FRAME_MBAFF(h)) {
594  if (sl->mb_y & 1) {
595  if (!MB_MBAFF(sl)) {
596  top_border = sl->top_borders[0][sl->mb_x];
597  AV_COPY128(top_border, src_y + 15 * linesize);
598  if (pixel_shift)
599  AV_COPY128(top_border + 16, src_y + 15 * linesize + 16);
600  if (simple || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
601  if (chroma444) {
602  if (pixel_shift) {
603  AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
604  AV_COPY128(top_border + 48, src_cb + 15 * uvlinesize + 16);
605  AV_COPY128(top_border + 64, src_cr + 15 * uvlinesize);
606  AV_COPY128(top_border + 80, src_cr + 15 * uvlinesize + 16);
607  } else {
608  AV_COPY128(top_border + 16, src_cb + 15 * uvlinesize);
609  AV_COPY128(top_border + 32, src_cr + 15 * uvlinesize);
610  }
611  } else if (chroma422) {
612  if (pixel_shift) {
613  AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
614  AV_COPY128(top_border + 48, src_cr + 15 * uvlinesize);
615  } else {
616  AV_COPY64(top_border + 16, src_cb + 15 * uvlinesize);
617  AV_COPY64(top_border + 24, src_cr + 15 * uvlinesize);
618  }
619  } else {
620  if (pixel_shift) {
621  AV_COPY128(top_border + 32, src_cb + 7 * uvlinesize);
622  AV_COPY128(top_border + 48, src_cr + 7 * uvlinesize);
623  } else {
624  AV_COPY64(top_border + 16, src_cb + 7 * uvlinesize);
625  AV_COPY64(top_border + 24, src_cr + 7 * uvlinesize);
626  }
627  }
628  }
629  }
630  } else if (MB_MBAFF(sl)) {
631  top_idx = 0;
632  } else
633  return;
634  }
635 
636  top_border = sl->top_borders[top_idx][sl->mb_x];
637  /* There are two lines saved, the line above the top macroblock
638  * of a pair, and the line above the bottom macroblock. */
639  AV_COPY128(top_border, src_y + 16 * linesize);
640  if (pixel_shift)
641  AV_COPY128(top_border + 16, src_y + 16 * linesize + 16);
642 
643  if (simple || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
644  if (chroma444) {
645  if (pixel_shift) {
646  AV_COPY128(top_border + 32, src_cb + 16 * linesize);
647  AV_COPY128(top_border + 48, src_cb + 16 * linesize + 16);
648  AV_COPY128(top_border + 64, src_cr + 16 * linesize);
649  AV_COPY128(top_border + 80, src_cr + 16 * linesize + 16);
650  } else {
651  AV_COPY128(top_border + 16, src_cb + 16 * linesize);
652  AV_COPY128(top_border + 32, src_cr + 16 * linesize);
653  }
654  } else if (chroma422) {
655  if (pixel_shift) {
656  AV_COPY128(top_border + 32, src_cb + 16 * uvlinesize);
657  AV_COPY128(top_border + 48, src_cr + 16 * uvlinesize);
658  } else {
659  AV_COPY64(top_border + 16, src_cb + 16 * uvlinesize);
660  AV_COPY64(top_border + 24, src_cr + 16 * uvlinesize);
661  }
662  } else {
663  if (pixel_shift) {
664  AV_COPY128(top_border + 32, src_cb + 8 * uvlinesize);
665  AV_COPY128(top_border + 48, src_cr + 8 * uvlinesize);
666  } else {
667  AV_COPY64(top_border + 16, src_cb + 8 * uvlinesize);
668  AV_COPY64(top_border + 24, src_cr + 8 * uvlinesize);
669  }
670  }
671  }
672 }
673 
674 /**
675  * Initialize implicit_weight table.
676  * @param field 0/1 initialize the weight for interlaced MBAFF
677  * -1 initializes the rest
678  */
680 {
681  int ref0, ref1, i, cur_poc, ref_start, ref_count0, ref_count1;
682 
683  for (i = 0; i < 2; i++) {
684  sl->pwt.luma_weight_flag[i] = 0;
685  sl->pwt.chroma_weight_flag[i] = 0;
686  }
687 
688  if (field < 0) {
689  if (h->picture_structure == PICT_FRAME) {
690  cur_poc = h->cur_pic_ptr->poc;
691  } else {
692  cur_poc = h->cur_pic_ptr->field_poc[h->picture_structure - 1];
693  }
694  if (sl->ref_count[0] == 1 && sl->ref_count[1] == 1 && !FRAME_MBAFF(h) &&
695  sl->ref_list[0][0].poc + (int64_t)sl->ref_list[1][0].poc == 2LL * cur_poc) {
696  sl->pwt.use_weight = 0;
697  sl->pwt.use_weight_chroma = 0;
698  return;
699  }
700  ref_start = 0;
701  ref_count0 = sl->ref_count[0];
702  ref_count1 = sl->ref_count[1];
703  } else {
704  cur_poc = h->cur_pic_ptr->field_poc[field];
705  ref_start = 16;
706  ref_count0 = 16 + 2 * sl->ref_count[0];
707  ref_count1 = 16 + 2 * sl->ref_count[1];
708  }
709 
710  sl->pwt.use_weight = 2;
711  sl->pwt.use_weight_chroma = 2;
712  sl->pwt.luma_log2_weight_denom = 5;
714 
715  for (ref0 = ref_start; ref0 < ref_count0; ref0++) {
716  int64_t poc0 = sl->ref_list[0][ref0].poc;
717  for (ref1 = ref_start; ref1 < ref_count1; ref1++) {
718  int w = 32;
719  if (!sl->ref_list[0][ref0].parent->long_ref && !sl->ref_list[1][ref1].parent->long_ref) {
720  int poc1 = sl->ref_list[1][ref1].poc;
721  int td = av_clip_int8(poc1 - poc0);
722  if (td) {
723  int tb = av_clip_int8(cur_poc - poc0);
724  int tx = (16384 + (FFABS(td) >> 1)) / td;
725  int dist_scale_factor = (tb * tx + 32) >> 8;
726  if (dist_scale_factor >= -64 && dist_scale_factor <= 128)
727  w = 64 - dist_scale_factor;
728  }
729  }
730  if (field < 0) {
731  sl->pwt.implicit_weight[ref0][ref1][0] =
732  sl->pwt.implicit_weight[ref0][ref1][1] = w;
733  } else {
734  sl->pwt.implicit_weight[ref0][ref1][field] = w;
735  }
736  }
737  }
738 }
739 
740 /**
741  * initialize scan tables
742  */
744 {
745  int i;
746  for (i = 0; i < 16; i++) {
747 #define TRANSPOSE(x) ((x) >> 2) | (((x) << 2) & 0xF)
749  h->field_scan[i] = TRANSPOSE(field_scan[i]);
750 #undef TRANSPOSE
751  }
752  for (i = 0; i < 64; i++) {
753 #define TRANSPOSE(x) ((x) >> 3) | (((x) & 7) << 3)
758 #undef TRANSPOSE
759  }
760  if (h->ps.sps->transform_bypass) { // FIXME same ugly
761  memcpy(h->zigzag_scan_q0 , ff_zigzag_scan , sizeof(h->zigzag_scan_q0 ));
762  memcpy(h->zigzag_scan8x8_q0 , ff_zigzag_direct , sizeof(h->zigzag_scan8x8_q0 ));
764  memcpy(h->field_scan_q0 , field_scan , sizeof(h->field_scan_q0 ));
765  memcpy(h->field_scan8x8_q0 , field_scan8x8 , sizeof(h->field_scan8x8_q0 ));
767  } else {
768  memcpy(h->zigzag_scan_q0 , h->zigzag_scan , sizeof(h->zigzag_scan_q0 ));
769  memcpy(h->zigzag_scan8x8_q0 , h->zigzag_scan8x8 , sizeof(h->zigzag_scan8x8_q0 ));
771  memcpy(h->field_scan_q0 , h->field_scan , sizeof(h->field_scan_q0 ));
772  memcpy(h->field_scan8x8_q0 , h->field_scan8x8 , sizeof(h->field_scan8x8_q0 ));
774  }
775 }
776 
777 static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
778 {
779 #define HWACCEL_MAX (CONFIG_H264_DXVA2_HWACCEL + \
780  (CONFIG_H264_D3D11VA_HWACCEL * 2) + \
781  CONFIG_H264_NVDEC_HWACCEL + \
782  CONFIG_H264_VAAPI_HWACCEL + \
783  CONFIG_H264_VIDEOTOOLBOX_HWACCEL + \
784  CONFIG_H264_VDPAU_HWACCEL)
785  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmt = pix_fmts;
786  const enum AVPixelFormat *choices = pix_fmts;
787  int i;
788 
789  switch (h->ps.sps->bit_depth_luma) {
790  case 9:
791  if (CHROMA444(h)) {
792  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
793  *fmt++ = AV_PIX_FMT_GBRP9;
794  } else
795  *fmt++ = AV_PIX_FMT_YUV444P9;
796  } else if (CHROMA422(h))
797  *fmt++ = AV_PIX_FMT_YUV422P9;
798  else
799  *fmt++ = AV_PIX_FMT_YUV420P9;
800  break;
801  case 10:
802  if (CHROMA444(h)) {
803  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
804  *fmt++ = AV_PIX_FMT_GBRP10;
805  } else
806  *fmt++ = AV_PIX_FMT_YUV444P10;
807  } else if (CHROMA422(h))
808  *fmt++ = AV_PIX_FMT_YUV422P10;
809  else
810  *fmt++ = AV_PIX_FMT_YUV420P10;
811  break;
812  case 12:
813  if (CHROMA444(h)) {
814  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
815  *fmt++ = AV_PIX_FMT_GBRP12;
816  } else
817  *fmt++ = AV_PIX_FMT_YUV444P12;
818  } else if (CHROMA422(h))
819  *fmt++ = AV_PIX_FMT_YUV422P12;
820  else
821  *fmt++ = AV_PIX_FMT_YUV420P12;
822  break;
823  case 14:
824  if (CHROMA444(h)) {
825  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
826  *fmt++ = AV_PIX_FMT_GBRP14;
827  } else
828  *fmt++ = AV_PIX_FMT_YUV444P14;
829  } else if (CHROMA422(h))
830  *fmt++ = AV_PIX_FMT_YUV422P14;
831  else
832  *fmt++ = AV_PIX_FMT_YUV420P14;
833  break;
834  case 8:
835 #if CONFIG_H264_VDPAU_HWACCEL
836  *fmt++ = AV_PIX_FMT_VDPAU;
837 #endif
838 #if CONFIG_H264_NVDEC_HWACCEL
839  *fmt++ = AV_PIX_FMT_CUDA;
840 #endif
841  if (CHROMA444(h)) {
842  if (h->avctx->colorspace == AVCOL_SPC_RGB)
843  *fmt++ = AV_PIX_FMT_GBRP;
844  else if (h->avctx->color_range == AVCOL_RANGE_JPEG)
845  *fmt++ = AV_PIX_FMT_YUVJ444P;
846  else
847  *fmt++ = AV_PIX_FMT_YUV444P;
848  } else if (CHROMA422(h)) {
850  *fmt++ = AV_PIX_FMT_YUVJ422P;
851  else
852  *fmt++ = AV_PIX_FMT_YUV422P;
853  } else {
854 #if CONFIG_H264_DXVA2_HWACCEL
855  *fmt++ = AV_PIX_FMT_DXVA2_VLD;
856 #endif
857 #if CONFIG_H264_D3D11VA_HWACCEL
858  *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
859  *fmt++ = AV_PIX_FMT_D3D11;
860 #endif
861 #if CONFIG_H264_VAAPI_HWACCEL
862  *fmt++ = AV_PIX_FMT_VAAPI;
863 #endif
864 #if CONFIG_H264_VIDEOTOOLBOX_HWACCEL
865  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
866 #endif
867  if (h->avctx->codec->pix_fmts)
868  choices = h->avctx->codec->pix_fmts;
869  else if (h->avctx->color_range == AVCOL_RANGE_JPEG)
870  *fmt++ = AV_PIX_FMT_YUVJ420P;
871  else
872  *fmt++ = AV_PIX_FMT_YUV420P;
873  }
874  break;
875  default:
877  "Unsupported bit depth %d\n", h->ps.sps->bit_depth_luma);
878  return AVERROR_INVALIDDATA;
879  }
880 
881  *fmt = AV_PIX_FMT_NONE;
882 
883  for (i=0; choices[i] != AV_PIX_FMT_NONE; i++)
884  if (choices[i] == h->avctx->pix_fmt && !force_callback)
885  return choices[i];
886  return ff_thread_get_format(h->avctx, choices);
887 }
888 
889 /* export coded and cropped frame dimensions to AVCodecContext */
891 {
892  const SPS *sps = (const SPS*)h->ps.sps;
893  int cr = sps->crop_right;
894  int cl = sps->crop_left;
895  int ct = sps->crop_top;
896  int cb = sps->crop_bottom;
897  int width = h->width - (cr + cl);
898  int height = h->height - (ct + cb);
899  av_assert0(sps->crop_right + sps->crop_left < (unsigned)h->width);
900  av_assert0(sps->crop_top + sps->crop_bottom < (unsigned)h->height);
901 
902  /* handle container cropping */
903  if (h->width_from_caller > 0 && h->height_from_caller > 0 &&
904  !sps->crop_top && !sps->crop_left &&
905  FFALIGN(h->width_from_caller, 16) == FFALIGN(width, 16) &&
906  FFALIGN(h->height_from_caller, 16) == FFALIGN(height, 16) &&
907  h->width_from_caller <= width &&
908  h->height_from_caller <= height) {
910  height = h->height_from_caller;
911  cl = 0;
912  ct = 0;
913  cr = h->width - width;
914  cb = h->height - height;
915  } else {
916  h->width_from_caller = 0;
917  h->height_from_caller = 0;
918  }
919 
920  h->avctx->coded_width = h->width;
921  h->avctx->coded_height = h->height;
922  h->avctx->width = width;
923  h->avctx->height = height;
924  h->crop_right = cr;
925  h->crop_left = cl;
926  h->crop_top = ct;
927  h->crop_bottom = cb;
928 }
929 
931 {
932  const SPS *sps = h->ps.sps;
933  int i, ret;
934 
935  ff_set_sar(h->avctx, sps->sar);
937  &h->chroma_x_shift, &h->chroma_y_shift);
938 
939  if (sps->timing_info_present_flag) {
940  int64_t den = sps->time_scale;
941  if (h->x264_build < 44U)
942  den *= 2;
944  sps->num_units_in_tick * h->avctx->ticks_per_frame, den, 1 << 30);
945  }
946 
948 
949  h->first_field = 0;
950  h->prev_interlaced_frame = 1;
951 
952  init_scan_tables(h);
953  ret = ff_h264_alloc_tables(h);
954  if (ret < 0) {
955  av_log(h->avctx, AV_LOG_ERROR, "Could not allocate memory\n");
956  goto fail;
957  }
958 
959  if (sps->bit_depth_luma < 8 || sps->bit_depth_luma > 14 ||
960  sps->bit_depth_luma == 11 || sps->bit_depth_luma == 13
961  ) {
962  av_log(h->avctx, AV_LOG_ERROR, "Unsupported bit depth %d\n",
963  sps->bit_depth_luma);
964  ret = AVERROR_INVALIDDATA;
965  goto fail;
966  }
967 
968  h->cur_bit_depth_luma =
971  h->pixel_shift = sps->bit_depth_luma > 8;
973  h->bit_depth_luma = sps->bit_depth_luma;
974 
976  sps->chroma_format_idc);
980  sps->chroma_format_idc);
982 
983  if (!HAVE_THREADS || !(h->avctx->active_thread_type & FF_THREAD_SLICE)) {
984  ret = ff_h264_slice_context_init(h, &h->slice_ctx[0]);
985  if (ret < 0) {
986  av_log(h->avctx, AV_LOG_ERROR, "context_init() failed.\n");
987  goto fail;
988  }
989  } else {
990  for (i = 0; i < h->nb_slice_ctx; i++) {
991  H264SliceContext *sl = &h->slice_ctx[i];
992 
993  sl->h264 = h;
994  sl->intra4x4_pred_mode = h->intra4x4_pred_mode + i * 8 * 2 * h->mb_stride;
995  sl->mvd_table[0] = h->mvd_table[0] + i * 8 * 2 * h->mb_stride;
996  sl->mvd_table[1] = h->mvd_table[1] + i * 8 * 2 * h->mb_stride;
997 
998  if ((ret = ff_h264_slice_context_init(h, sl)) < 0) {
999  av_log(h->avctx, AV_LOG_ERROR, "context_init() failed.\n");
1000  goto fail;
1001  }
1002  }
1003  }
1004 
1005  h->context_initialized = 1;
1006 
1007  return 0;
1008 fail:
1010  h->context_initialized = 0;
1011  return ret;
1012 }
1013 
1015 {
1016  switch (a) {
1020  default:
1021  return a;
1022  }
1023 }
1024 
1025 static int h264_init_ps(H264Context *h, const H264SliceContext *sl, int first_slice)
1026 {
1027  const SPS *sps;
1028  int needs_reinit = 0, must_reinit, ret;
1029 
1030  if (first_slice) {
1031  av_buffer_unref(&h->ps.pps_ref);
1032  h->ps.pps = NULL;
1033  h->ps.pps_ref = av_buffer_ref(h->ps.pps_list[sl->pps_id]);
1034  if (!h->ps.pps_ref)
1035  return AVERROR(ENOMEM);
1036  h->ps.pps = (const PPS*)h->ps.pps_ref->data;
1037  }
1038 
1039  if (h->ps.sps != h->ps.pps->sps) {
1040  h->ps.sps = (const SPS*)h->ps.pps->sps;
1041 
1042  if (h->mb_width != h->ps.sps->mb_width ||
1043  h->mb_height != h->ps.sps->mb_height ||
1046  )
1047  needs_reinit = 1;
1048 
1049  if (h->bit_depth_luma != h->ps.sps->bit_depth_luma ||
1051  needs_reinit = 1;
1052  }
1053  sps = h->ps.sps;
1054 
1055  must_reinit = (h->context_initialized &&
1056  ( 16*sps->mb_width != h->avctx->coded_width
1057  || 16*sps->mb_height != h->avctx->coded_height
1058  || h->cur_bit_depth_luma != sps->bit_depth_luma
1060  || h->mb_width != sps->mb_width
1061  || h->mb_height != sps->mb_height
1062  ));
1063  if (h->avctx->pix_fmt == AV_PIX_FMT_NONE
1065  must_reinit = 1;
1066 
1067  if (first_slice && av_cmp_q(sps->sar, h->avctx->sample_aspect_ratio))
1068  must_reinit = 1;
1069 
1070  if (!h->setup_finished) {
1071  h->avctx->profile = ff_h264_get_profile(sps);
1072  h->avctx->level = sps->level_idc;
1073  h->avctx->refs = sps->ref_frame_count;
1074 
1075  h->mb_width = sps->mb_width;
1076  h->mb_height = sps->mb_height;
1077  h->mb_num = h->mb_width * h->mb_height;
1078  h->mb_stride = h->mb_width + 1;
1079 
1080  h->b_stride = h->mb_width * 4;
1081 
1082  h->chroma_y_shift = sps->chroma_format_idc <= 1; // 400 uses yuv420p
1083 
1084  h->width = 16 * h->mb_width;
1085  h->height = 16 * h->mb_height;
1086 
1087  init_dimensions(h);
1088 
1089  if (sps->video_signal_type_present_flag) {
1090  h->avctx->color_range = sps->full_range > 0 ? AVCOL_RANGE_JPEG
1091  : AVCOL_RANGE_MPEG;
1093  if (h->avctx->colorspace != sps->colorspace)
1094  needs_reinit = 1;
1096  h->avctx->color_trc = sps->color_trc;
1097  h->avctx->colorspace = sps->colorspace;
1098  }
1099  }
1100 
1101  if (h->sei.alternative_transfer.present &&
1105  }
1106  }
1108 
1109  if (!h->context_initialized || must_reinit || needs_reinit) {
1110  int flush_changes = h->context_initialized;
1111  h->context_initialized = 0;
1112  if (sl != h->slice_ctx) {
1114  "changing width %d -> %d / height %d -> %d on "
1115  "slice %d\n",
1116  h->width, h->avctx->coded_width,
1117  h->height, h->avctx->coded_height,
1118  h->current_slice + 1);
1119  return AVERROR_INVALIDDATA;
1120  }
1121 
1122  av_assert1(first_slice);
1123 
1124  if (flush_changes)
1126 
1127  if ((ret = get_pixel_format(h, 1)) < 0)
1128  return ret;
1129  h->avctx->pix_fmt = ret;
1130 
1131  av_log(h->avctx, AV_LOG_VERBOSE, "Reinit context to %dx%d, "
1132  "pix_fmt: %s\n", h->width, h->height, av_get_pix_fmt_name(h->avctx->pix_fmt));
1133 
1134  if ((ret = h264_slice_header_init(h)) < 0) {
1136  "h264_slice_header_init() failed\n");
1137  return ret;
1138  }
1139  }
1140 
1141  return 0;
1142 }
1143 
1145 {
1146  const SPS *sps = h->ps.sps;
1147  H264Picture *cur = h->cur_pic_ptr;
1148  AVFrame *out = cur->f;
1149 
1150  out->interlaced_frame = 0;
1151  out->repeat_pict = 0;
1152 
1153  /* Signal interlacing information externally. */
1154  /* Prioritize picture timing SEI information over used
1155  * decoding process if it exists. */
1156  if (h->sei.picture_timing.present) {
1158  h->avctx);
1159  if (ret < 0) {
1160  av_log(h->avctx, AV_LOG_ERROR, "Error processing a picture timing SEI\n");
1162  return ret;
1163  h->sei.picture_timing.present = 0;
1164  }
1165  }
1166 
1169  switch (pt->pic_struct) {
1171  break;
1174  out->interlaced_frame = 1;
1175  break;
1178  if (FIELD_OR_MBAFF_PICTURE(h))
1179  out->interlaced_frame = 1;
1180  else
1181  // try to flag soft telecine progressive
1183  break;
1186  /* Signal the possibility of telecined film externally
1187  * (pic_struct 5,6). From these hints, let the applications
1188  * decide if they apply deinterlacing. */
1189  out->repeat_pict = 1;
1190  break;
1192  out->repeat_pict = 2;
1193  break;
1195  out->repeat_pict = 4;
1196  break;
1197  }
1198 
1199  if ((pt->ct_type & 3) &&
1201  out->interlaced_frame = (pt->ct_type & (1 << 1)) != 0;
1202  } else {
1203  /* Derive interlacing flag from used decoding process. */
1205  }
1207 
1208  if (cur->field_poc[0] != cur->field_poc[1]) {
1209  /* Derive top_field_first from field pocs. */
1210  out->top_field_first = cur->field_poc[0] < cur->field_poc[1];
1211  } else {
1213  /* Use picture timing SEI information. Even if it is a
1214  * information of a past frame, better than nothing. */
1217  out->top_field_first = 1;
1218  else
1219  out->top_field_first = 0;
1220  } else if (out->interlaced_frame) {
1221  /* Default to top field first when pic_struct_present_flag
1222  * is not set but interlaced frame detected */
1223  out->top_field_first = 1;
1224  } else {
1225  /* Most likely progressive */
1226  out->top_field_first = 0;
1227  }
1228  }
1229 
1230  if (h->sei.frame_packing.present &&
1236  if (stereo) {
1237  switch (fp->arrangement_type) {
1239  stereo->type = AV_STEREO3D_CHECKERBOARD;
1240  break;
1242  stereo->type = AV_STEREO3D_COLUMNS;
1243  break;
1245  stereo->type = AV_STEREO3D_LINES;
1246  break;
1248  if (fp->quincunx_sampling_flag)
1250  else
1251  stereo->type = AV_STEREO3D_SIDEBYSIDE;
1252  break;
1254  stereo->type = AV_STEREO3D_TOPBOTTOM;
1255  break;
1257  stereo->type = AV_STEREO3D_FRAMESEQUENCE;
1258  break;
1259  case H264_SEI_FPA_TYPE_2D:
1260  stereo->type = AV_STEREO3D_2D;
1261  break;
1262  }
1263 
1264  if (fp->content_interpretation_type == 2)
1265  stereo->flags = AV_STEREO3D_FLAG_INVERT;
1266 
1269  stereo->view = AV_STEREO3D_VIEW_LEFT;
1270  else
1271  stereo->view = AV_STEREO3D_VIEW_RIGHT;
1272  }
1273  }
1274  }
1275 
1276  if (h->sei.display_orientation.present &&
1281  double angle = o->anticlockwise_rotation * 360 / (double) (1 << 16);
1282  AVFrameSideData *rotation = av_frame_new_side_data(out,
1284  sizeof(int32_t) * 9);
1285  if (rotation) {
1286  av_display_rotation_set((int32_t *)rotation->data, angle);
1287  av_display_matrix_flip((int32_t *)rotation->data,
1288  o->hflip, o->vflip);
1289  }
1290  }
1291 
1292  if (h->sei.afd.present) {
1294  sizeof(uint8_t));
1295 
1296  if (sd) {
1298  h->sei.afd.present = 0;
1299  }
1300  }
1301 
1302  if (h->sei.a53_caption.buf_ref) {
1303  H264SEIA53Caption *a53 = &h->sei.a53_caption;
1304 
1306  if (!sd)
1307  av_buffer_unref(&a53->buf_ref);
1308  a53->buf_ref = NULL;
1309 
1311  }
1312 
1313  for (int i = 0; i < h->sei.unregistered.nb_buf_ref; i++) {
1314  H264SEIUnregistered *unreg = &h->sei.unregistered;
1315 
1316  if (unreg->buf_ref[i]) {
1319  unreg->buf_ref[i]);
1320  if (!sd)
1321  av_buffer_unref(&unreg->buf_ref[i]);
1322  unreg->buf_ref[i] = NULL;
1323  }
1324  }
1325  h->sei.unregistered.nb_buf_ref = 0;
1326 
1327  if (h->sei.picture_timing.timecode_cnt > 0) {
1328  uint32_t *tc_sd;
1329  char tcbuf[AV_TIMECODE_STR_SIZE];
1330 
1333  sizeof(uint32_t)*4);
1334  if (!tcside)
1335  return AVERROR(ENOMEM);
1336 
1337  tc_sd = (uint32_t*)tcside->data;
1338  tc_sd[0] = h->sei.picture_timing.timecode_cnt;
1339 
1340  for (int i = 0; i < tc_sd[0]; i++) {
1341  int drop = h->sei.picture_timing.timecode[i].dropframe;
1342  int hh = h->sei.picture_timing.timecode[i].hours;
1343  int mm = h->sei.picture_timing.timecode[i].minutes;
1344  int ss = h->sei.picture_timing.timecode[i].seconds;
1345  int ff = h->sei.picture_timing.timecode[i].frame;
1346 
1347  tc_sd[i + 1] = av_timecode_get_smpte(h->avctx->framerate, drop, hh, mm, ss, ff);
1348  av_timecode_make_smpte_tc_string2(tcbuf, h->avctx->framerate, tc_sd[i + 1], 0, 0);
1349  av_dict_set(&out->metadata, "timecode", tcbuf, 0);
1350  }
1352  }
1353 
1354  return 0;
1355 }
1356 
1358 {
1359  const SPS *sps = h->ps.sps;
1360  H264Picture *out = h->cur_pic_ptr;
1361  H264Picture *cur = h->cur_pic_ptr;
1362  int i, pics, out_of_order, out_idx;
1363 
1364  cur->mmco_reset = h->mmco_reset;
1365  h->mmco_reset = 0;
1366 
1367  if (sps->bitstream_restriction_flag ||
1370  }
1371 
1372  for (i = 0; 1; i++) {
1373  if(i == MAX_DELAYED_PIC_COUNT || cur->poc < h->last_pocs[i]){
1374  if(i)
1375  h->last_pocs[i-1] = cur->poc;
1376  break;
1377  } else if(i) {
1378  h->last_pocs[i-1]= h->last_pocs[i];
1379  }
1380  }
1381  out_of_order = MAX_DELAYED_PIC_COUNT - i;
1382  if( cur->f->pict_type == AV_PICTURE_TYPE_B
1383  || (h->last_pocs[MAX_DELAYED_PIC_COUNT-2] > INT_MIN && h->last_pocs[MAX_DELAYED_PIC_COUNT-1] - (int64_t)h->last_pocs[MAX_DELAYED_PIC_COUNT-2] > 2))
1384  out_of_order = FFMAX(out_of_order, 1);
1385  if (out_of_order == MAX_DELAYED_PIC_COUNT) {
1386  av_log(h->avctx, AV_LOG_VERBOSE, "Invalid POC %d<%d\n", cur->poc, h->last_pocs[0]);
1387  for (i = 1; i < MAX_DELAYED_PIC_COUNT; i++)
1388  h->last_pocs[i] = INT_MIN;
1389  h->last_pocs[0] = cur->poc;
1390  cur->mmco_reset = 1;
1391  } else if(h->avctx->has_b_frames < out_of_order && !sps->bitstream_restriction_flag){
1392  int loglevel = h->avctx->frame_number > 1 ? AV_LOG_WARNING : AV_LOG_VERBOSE;
1393  av_log(h->avctx, loglevel, "Increasing reorder buffer to %d\n", out_of_order);
1394  h->avctx->has_b_frames = out_of_order;
1395  }
1396 
1397  pics = 0;
1398  while (h->delayed_pic[pics])
1399  pics++;
1400 
1402 
1403  h->delayed_pic[pics++] = cur;
1404  if (cur->reference == 0)
1405  cur->reference = DELAYED_PIC_REF;
1406 
1407  out = h->delayed_pic[0];
1408  out_idx = 0;
1409  for (i = 1; h->delayed_pic[i] &&
1410  !h->delayed_pic[i]->f->key_frame &&
1411  !h->delayed_pic[i]->mmco_reset;
1412  i++)
1413  if (h->delayed_pic[i]->poc < out->poc) {
1414  out = h->delayed_pic[i];
1415  out_idx = i;
1416  }
1417  if (h->avctx->has_b_frames == 0 &&
1418  (h->delayed_pic[0]->f->key_frame || h->delayed_pic[0]->mmco_reset))
1419  h->next_outputed_poc = INT_MIN;
1420  out_of_order = out->poc < h->next_outputed_poc;
1421 
1422  if (out_of_order || pics > h->avctx->has_b_frames) {
1423  out->reference &= ~DELAYED_PIC_REF;
1424  for (i = out_idx; h->delayed_pic[i]; i++)
1425  h->delayed_pic[i] = h->delayed_pic[i + 1];
1426  }
1427  if (!out_of_order && pics > h->avctx->has_b_frames) {
1428  h->next_output_pic = out;
1429  if (out_idx == 0 && h->delayed_pic[0] && (h->delayed_pic[0]->f->key_frame || h->delayed_pic[0]->mmco_reset)) {
1430  h->next_outputed_poc = INT_MIN;
1431  } else
1432  h->next_outputed_poc = out->poc;
1433 
1434  if (out->recovered) {
1435  // We have reached an recovery point and all frames after it in
1436  // display order are "recovered".
1438  }
1440 
1441  if (!out->recovered) {
1442  if (!(h->avctx->flags & AV_CODEC_FLAG_OUTPUT_CORRUPT) &&
1444  h->next_output_pic = NULL;
1445  } else {
1446  out->f->flags |= AV_FRAME_FLAG_CORRUPT;
1447  }
1448  }
1449  } else {
1450  av_log(h->avctx, AV_LOG_DEBUG, "no picture %s\n", out_of_order ? "ooo" : "");
1451  }
1452 
1453  return 0;
1454 }
1455 
1456 /* This function is called right after decoding the slice header for a first
1457  * slice in a field (or a frame). It decides whether we are decoding a new frame
1458  * or a second field in a pair and does the necessary setup.
1459  */
1461  const H2645NAL *nal, int first_slice)
1462 {
1463  int i;
1464  const SPS *sps;
1465 
1466  int last_pic_structure, last_pic_droppable, ret;
1467 
1468  ret = h264_init_ps(h, sl, first_slice);
1469  if (ret < 0)
1470  return ret;
1471 
1472  sps = h->ps.sps;
1473 
1474  if (sps && sps->bitstream_restriction_flag &&
1475  h->avctx->has_b_frames < sps->num_reorder_frames) {
1477  }
1478 
1479  last_pic_droppable = h->droppable;
1480  last_pic_structure = h->picture_structure;
1481  h->droppable = (nal->ref_idc == 0);
1483 
1484  h->poc.frame_num = sl->frame_num;
1485  h->poc.poc_lsb = sl->poc_lsb;
1487  h->poc.delta_poc[0] = sl->delta_poc[0];
1488  h->poc.delta_poc[1] = sl->delta_poc[1];
1489 
1490  /* Shorten frame num gaps so we don't have to allocate reference
1491  * frames just to throw them away */
1492  if (h->poc.frame_num != h->poc.prev_frame_num) {
1493  int unwrap_prev_frame_num = h->poc.prev_frame_num;
1494  int max_frame_num = 1 << sps->log2_max_frame_num;
1495 
1496  if (unwrap_prev_frame_num > h->poc.frame_num)
1497  unwrap_prev_frame_num -= max_frame_num;
1498 
1499  if ((h->poc.frame_num - unwrap_prev_frame_num) > sps->ref_frame_count) {
1500  unwrap_prev_frame_num = (h->poc.frame_num - sps->ref_frame_count) - 1;
1501  if (unwrap_prev_frame_num < 0)
1502  unwrap_prev_frame_num += max_frame_num;
1503 
1504  h->poc.prev_frame_num = unwrap_prev_frame_num;
1505  }
1506  }
1507 
1508  /* See if we have a decoded first field looking for a pair...
1509  * Here, we're using that to see if we should mark previously
1510  * decode frames as "finished".
1511  * We have to do that before the "dummy" in-between frame allocation,
1512  * since that can modify h->cur_pic_ptr. */
1513  if (h->first_field) {
1514  int last_field = last_pic_structure == PICT_BOTTOM_FIELD;
1515  av_assert0(h->cur_pic_ptr);
1516  av_assert0(h->cur_pic_ptr->f->buf[0]);
1517  assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF);
1518 
1519  /* Mark old field/frame as completed */
1520  if (h->cur_pic_ptr->tf.owner[last_field] == h->avctx) {
1521  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, last_field);
1522  }
1523 
1524  /* figure out if we have a complementary field pair */
1525  if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
1526  /* Previous field is unmatched. Don't display it, but let it
1527  * remain for reference if marked as such. */
1528  if (last_pic_structure != PICT_FRAME) {
1530  last_pic_structure == PICT_TOP_FIELD);
1531  }
1532  } else {
1533  if (h->cur_pic_ptr->frame_num != h->poc.frame_num) {
1534  /* This and previous field were reference, but had
1535  * different frame_nums. Consider this field first in
1536  * pair. Throw away previous field except for reference
1537  * purposes. */
1538  if (last_pic_structure != PICT_FRAME) {
1540  last_pic_structure == PICT_TOP_FIELD);
1541  }
1542  } else {
1543  /* Second field in complementary pair */
1544  if (!((last_pic_structure == PICT_TOP_FIELD &&
1546  (last_pic_structure == PICT_BOTTOM_FIELD &&
1549  "Invalid field mode combination %d/%d\n",
1550  last_pic_structure, h->picture_structure);
1551  h->picture_structure = last_pic_structure;
1552  h->droppable = last_pic_droppable;
1553  return AVERROR_INVALIDDATA;
1554  } else if (last_pic_droppable != h->droppable) {
1556  "Found reference and non-reference fields in the same frame, which");
1557  h->picture_structure = last_pic_structure;
1558  h->droppable = last_pic_droppable;
1559  return AVERROR_PATCHWELCOME;
1560  }
1561  }
1562  }
1563  }
1564 
1565  while (h->poc.frame_num != h->poc.prev_frame_num && !h->first_field &&
1566  h->poc.frame_num != (h->poc.prev_frame_num + 1) % (1 << sps->log2_max_frame_num)) {
1567  H264Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
1568  av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",
1569  h->poc.frame_num, h->poc.prev_frame_num);
1571  for(i=0; i<FF_ARRAY_ELEMS(h->last_pocs); i++)
1572  h->last_pocs[i] = INT_MIN;
1573  ret = h264_frame_start(h);
1574  if (ret < 0) {
1575  h->first_field = 0;
1576  return ret;
1577  }
1578 
1579  h->poc.prev_frame_num++;
1580  h->poc.prev_frame_num %= 1 << sps->log2_max_frame_num;
1583  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
1584  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
1585 
1586  h->explicit_ref_marking = 0;
1588  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1589  return ret;
1590  /* Error concealment: If a ref is missing, copy the previous ref
1591  * in its place.
1592  * FIXME: Avoiding a memcpy would be nice, but ref handling makes
1593  * many assumptions about there being no actual duplicates.
1594  * FIXME: This does not copy padding for out-of-frame motion
1595  * vectors. Given we are concealing a lost frame, this probably
1596  * is not noticeable by comparison, but it should be fixed. */
1597  if (h->short_ref_count) {
1598  int c[4] = {
1599  1<<(h->ps.sps->bit_depth_luma-1),
1600  1<<(h->ps.sps->bit_depth_chroma-1),
1601  1<<(h->ps.sps->bit_depth_chroma-1),
1602  -1
1603  };
1604 
1605  if (prev &&
1606  h->short_ref[0]->f->width == prev->f->width &&
1607  h->short_ref[0]->f->height == prev->f->height &&
1608  h->short_ref[0]->f->format == prev->f->format) {
1609  ff_thread_await_progress(&prev->tf, INT_MAX, 0);
1610  if (prev->field_picture)
1611  ff_thread_await_progress(&prev->tf, INT_MAX, 1);
1612  av_image_copy(h->short_ref[0]->f->data,
1613  h->short_ref[0]->f->linesize,
1614  (const uint8_t **)prev->f->data,
1615  prev->f->linesize,
1616  prev->f->format,
1617  prev->f->width,
1618  prev->f->height);
1619  h->short_ref[0]->poc = prev->poc + 2;
1620  } else if (!h->frame_recovered && !h->avctx->hwaccel)
1621  ff_color_frame(h->short_ref[0]->f, c);
1622  h->short_ref[0]->frame_num = h->poc.prev_frame_num;
1623  }
1624  }
1625 
1626  /* See if we have a decoded first field looking for a pair...
1627  * We're using that to see whether to continue decoding in that
1628  * frame, or to allocate a new one. */
1629  if (h->first_field) {
1630  av_assert0(h->cur_pic_ptr);
1631  av_assert0(h->cur_pic_ptr->f->buf[0]);
1632  assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF);
1633 
1634  /* figure out if we have a complementary field pair */
1635  if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
1636  /* Previous field is unmatched. Don't display it, but let it
1637  * remain for reference if marked as such. */
1638  h->missing_fields ++;
1639  h->cur_pic_ptr = NULL;
1640  h->first_field = FIELD_PICTURE(h);
1641  } else {
1642  h->missing_fields = 0;
1643  if (h->cur_pic_ptr->frame_num != h->poc.frame_num) {
1646  /* This and the previous field had different frame_nums.
1647  * Consider this field first in pair. Throw away previous
1648  * one except for reference purposes. */
1649  h->first_field = 1;
1650  h->cur_pic_ptr = NULL;
1651  } else if (h->cur_pic_ptr->reference & DELAYED_PIC_REF) {
1652  /* This frame was already output, we cannot draw into it
1653  * anymore.
1654  */
1655  h->first_field = 1;
1656  h->cur_pic_ptr = NULL;
1657  } else {
1658  /* Second field in complementary pair */
1659  h->first_field = 0;
1660  }
1661  }
1662  } else {
1663  /* Frame or first field in a potentially complementary pair */
1664  h->first_field = FIELD_PICTURE(h);
1665  }
1666 
1667  if (!FIELD_PICTURE(h) || h->first_field) {
1668  if (h264_frame_start(h) < 0) {
1669  h->first_field = 0;
1670  return AVERROR_INVALIDDATA;
1671  }
1672  } else {
1675  h->cur_pic_ptr->tf.owner[field] = h->avctx;
1676  }
1677  /* Some macroblocks can be accessed before they're available in case
1678  * of lost slices, MBAFF or threading. */
1679  if (FIELD_PICTURE(h)) {
1680  for(i = (h->picture_structure == PICT_BOTTOM_FIELD); i<h->mb_height; i++)
1681  memset(h->slice_table + i*h->mb_stride, -1, (h->mb_stride - (i+1==h->mb_height)) * sizeof(*h->slice_table));
1682  } else {
1683  memset(h->slice_table, -1,
1684  (h->mb_height * h->mb_stride - 1) * sizeof(*h->slice_table));
1685  }
1686 
1688  h->ps.sps, &h->poc, h->picture_structure, nal->ref_idc);
1689  if (ret < 0)
1690  return ret;
1691 
1692  memcpy(h->mmco, sl->mmco, sl->nb_mmco * sizeof(*h->mmco));
1693  h->nb_mmco = sl->nb_mmco;
1695 
1696  h->picture_idr = nal->type == H264_NAL_IDR_SLICE;
1697 
1698  if (h->sei.recovery_point.recovery_frame_cnt >= 0) {
1699  const int sei_recovery_frame_cnt = h->sei.recovery_point.recovery_frame_cnt;
1700 
1701  if (h->poc.frame_num != sei_recovery_frame_cnt || sl->slice_type_nos != AV_PICTURE_TYPE_I)
1702  h->valid_recovery_point = 1;
1703 
1704  if ( h->recovery_frame < 0
1705  || av_mod_uintp2(h->recovery_frame - h->poc.frame_num, h->ps.sps->log2_max_frame_num) > sei_recovery_frame_cnt) {
1706  h->recovery_frame = av_mod_uintp2(h->poc.frame_num + sei_recovery_frame_cnt, h->ps.sps->log2_max_frame_num);
1707 
1708  if (!h->valid_recovery_point)
1709  h->recovery_frame = h->poc.frame_num;
1710  }
1711  }
1712 
1713  h->cur_pic_ptr->f->key_frame |= (nal->type == H264_NAL_IDR_SLICE);
1714 
1715  if (nal->type == H264_NAL_IDR_SLICE ||
1716  (h->recovery_frame == h->poc.frame_num && nal->ref_idc)) {
1717  h->recovery_frame = -1;
1718  h->cur_pic_ptr->recovered = 1;
1719  }
1720  // If we have an IDR, all frames after it in decoded order are
1721  // "recovered".
1722  if (nal->type == H264_NAL_IDR_SLICE)
1724 #if 1
1726 #else
1728 #endif
1729 
1730  /* Set the frame properties/side data. Only done for the second field in
1731  * field coded frames, since some SEI information is present for each field
1732  * and is merged by the SEI parsing code. */
1733  if (!FIELD_PICTURE(h) || !h->first_field || h->missing_fields > 1) {
1734  ret = h264_export_frame_props(h);
1735  if (ret < 0)
1736  return ret;
1737 
1738  ret = h264_select_output_frame(h);
1739  if (ret < 0)
1740  return ret;
1741  }
1742 
1743  return 0;
1744 }
1745 
1747  const H2645NAL *nal)
1748 {
1749  const SPS *sps;
1750  const PPS *pps;
1751  int ret;
1752  unsigned int slice_type, tmp, i;
1753  int field_pic_flag, bottom_field_flag;
1754  int first_slice = sl == h->slice_ctx && !h->current_slice;
1755  int picture_structure;
1756 
1757  if (first_slice)
1759 
1760  sl->first_mb_addr = get_ue_golomb_long(&sl->gb);
1761 
1762  slice_type = get_ue_golomb_31(&sl->gb);
1763  if (slice_type > 9) {
1765  "slice type %d too large at %d\n",
1766  slice_type, sl->first_mb_addr);
1767  return AVERROR_INVALIDDATA;
1768  }
1769  if (slice_type > 4) {
1770  slice_type -= 5;
1771  sl->slice_type_fixed = 1;
1772  } else
1773  sl->slice_type_fixed = 0;
1774 
1775  slice_type = ff_h264_golomb_to_pict_type[slice_type];
1776  sl->slice_type = slice_type;
1777  sl->slice_type_nos = slice_type & 3;
1778 
1779  if (nal->type == H264_NAL_IDR_SLICE &&
1781  av_log(h->avctx, AV_LOG_ERROR, "A non-intra slice in an IDR NAL unit.\n");
1782  return AVERROR_INVALIDDATA;
1783  }
1784 
1785  sl->pps_id = get_ue_golomb(&sl->gb);
1786  if (sl->pps_id >= MAX_PPS_COUNT) {
1787  av_log(h->avctx, AV_LOG_ERROR, "pps_id %u out of range\n", sl->pps_id);
1788  return AVERROR_INVALIDDATA;
1789  }
1790  if (!h->ps.pps_list[sl->pps_id]) {
1792  "non-existing PPS %u referenced\n",
1793  sl->pps_id);
1794  return AVERROR_INVALIDDATA;
1795  }
1796  pps = (const PPS*)h->ps.pps_list[sl->pps_id]->data;
1797  sps = pps->sps;
1798 
1799  sl->frame_num = get_bits(&sl->gb, sps->log2_max_frame_num);
1800  if (!first_slice) {
1801  if (h->poc.frame_num != sl->frame_num) {
1802  av_log(h->avctx, AV_LOG_ERROR, "Frame num change from %d to %d\n",
1803  h->poc.frame_num, sl->frame_num);
1804  return AVERROR_INVALIDDATA;
1805  }
1806  }
1807 
1808  sl->mb_mbaff = 0;
1809 
1810  if (sps->frame_mbs_only_flag) {
1811  picture_structure = PICT_FRAME;
1812  } else {
1813  if (!sps->direct_8x8_inference_flag && slice_type == AV_PICTURE_TYPE_B) {
1814  av_log(h->avctx, AV_LOG_ERROR, "This stream was generated by a broken encoder, invalid 8x8 inference\n");
1815  return -1;
1816  }
1817  field_pic_flag = get_bits1(&sl->gb);
1818  if (field_pic_flag) {
1819  bottom_field_flag = get_bits1(&sl->gb);
1820  picture_structure = PICT_TOP_FIELD + bottom_field_flag;
1821  } else {
1822  picture_structure = PICT_FRAME;
1823  }
1824  }
1825  sl->picture_structure = picture_structure;
1826  sl->mb_field_decoding_flag = picture_structure != PICT_FRAME;
1827 
1828  if (picture_structure == PICT_FRAME) {
1829  sl->curr_pic_num = sl->frame_num;
1830  sl->max_pic_num = 1 << sps->log2_max_frame_num;
1831  } else {
1832  sl->curr_pic_num = 2 * sl->frame_num + 1;
1833  sl->max_pic_num = 1 << (sps->log2_max_frame_num + 1);
1834  }
1835 
1836  if (nal->type == H264_NAL_IDR_SLICE)
1837  get_ue_golomb_long(&sl->gb); /* idr_pic_id */
1838 
1839  if (sps->poc_type == 0) {
1840  sl->poc_lsb = get_bits(&sl->gb, sps->log2_max_poc_lsb);
1841 
1842  if (pps->pic_order_present == 1 && picture_structure == PICT_FRAME)
1843  sl->delta_poc_bottom = get_se_golomb(&sl->gb);
1844  }
1845 
1846  if (sps->poc_type == 1 && !sps->delta_pic_order_always_zero_flag) {
1847  sl->delta_poc[0] = get_se_golomb(&sl->gb);
1848 
1849  if (pps->pic_order_present == 1 && picture_structure == PICT_FRAME)
1850  sl->delta_poc[1] = get_se_golomb(&sl->gb);
1851  }
1852 
1853  sl->redundant_pic_count = 0;
1854  if (pps->redundant_pic_cnt_present)
1855  sl->redundant_pic_count = get_ue_golomb(&sl->gb);
1856 
1857  if (sl->slice_type_nos == AV_PICTURE_TYPE_B)
1858  sl->direct_spatial_mv_pred = get_bits1(&sl->gb);
1859 
1861  &sl->gb, pps, sl->slice_type_nos,
1862  picture_structure, h->avctx);
1863  if (ret < 0)
1864  return ret;
1865 
1866  if (sl->slice_type_nos != AV_PICTURE_TYPE_I) {
1868  if (ret < 0) {
1869  sl->ref_count[1] = sl->ref_count[0] = 0;
1870  return ret;
1871  }
1872  }
1873 
1874  sl->pwt.use_weight = 0;
1875  for (i = 0; i < 2; i++) {
1876  sl->pwt.luma_weight_flag[i] = 0;
1877  sl->pwt.chroma_weight_flag[i] = 0;
1878  }
1879  if ((pps->weighted_pred && sl->slice_type_nos == AV_PICTURE_TYPE_P) ||
1880  (pps->weighted_bipred_idc == 1 &&
1882  ret = ff_h264_pred_weight_table(&sl->gb, sps, sl->ref_count,
1883  sl->slice_type_nos, &sl->pwt,
1884  picture_structure, h->avctx);
1885  if (ret < 0)
1886  return ret;
1887  }
1888 
1889  sl->explicit_ref_marking = 0;
1890  if (nal->ref_idc) {
1891  ret = ff_h264_decode_ref_pic_marking(sl, &sl->gb, nal, h->avctx);
1892  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1893  return AVERROR_INVALIDDATA;
1894  }
1895 
1896  if (sl->slice_type_nos != AV_PICTURE_TYPE_I && pps->cabac) {
1897  tmp = get_ue_golomb_31(&sl->gb);
1898  if (tmp > 2) {
1899  av_log(h->avctx, AV_LOG_ERROR, "cabac_init_idc %u overflow\n", tmp);
1900  return AVERROR_INVALIDDATA;
1901  }
1902  sl->cabac_init_idc = tmp;
1903  }
1904 
1905  sl->last_qscale_diff = 0;
1906  tmp = pps->init_qp + (unsigned)get_se_golomb(&sl->gb);
1907  if (tmp > 51 + 6 * (sps->bit_depth_luma - 8)) {
1908  av_log(h->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp);
1909  return AVERROR_INVALIDDATA;
1910  }
1911  sl->qscale = tmp;
1912  sl->chroma_qp[0] = get_chroma_qp(pps, 0, sl->qscale);
1913  sl->chroma_qp[1] = get_chroma_qp(pps, 1, sl->qscale);
1914  // FIXME qscale / qp ... stuff
1915  if (sl->slice_type == AV_PICTURE_TYPE_SP)
1916  get_bits1(&sl->gb); /* sp_for_switch_flag */
1917  if (sl->slice_type == AV_PICTURE_TYPE_SP ||
1919  get_se_golomb(&sl->gb); /* slice_qs_delta */
1920 
1921  sl->deblocking_filter = 1;
1922  sl->slice_alpha_c0_offset = 0;
1923  sl->slice_beta_offset = 0;
1925  tmp = get_ue_golomb_31(&sl->gb);
1926  if (tmp > 2) {
1928  "deblocking_filter_idc %u out of range\n", tmp);
1929  return AVERROR_INVALIDDATA;
1930  }
1931  sl->deblocking_filter = tmp;
1932  if (sl->deblocking_filter < 2)
1933  sl->deblocking_filter ^= 1; // 1<->0
1934 
1935  if (sl->deblocking_filter) {
1936  int slice_alpha_c0_offset_div2 = get_se_golomb(&sl->gb);
1937  int slice_beta_offset_div2 = get_se_golomb(&sl->gb);
1938  if (slice_alpha_c0_offset_div2 > 6 ||
1939  slice_alpha_c0_offset_div2 < -6 ||
1940  slice_beta_offset_div2 > 6 ||
1941  slice_beta_offset_div2 < -6) {
1943  "deblocking filter parameters %d %d out of range\n",
1944  slice_alpha_c0_offset_div2, slice_beta_offset_div2);
1945  return AVERROR_INVALIDDATA;
1946  }
1947  sl->slice_alpha_c0_offset = slice_alpha_c0_offset_div2 * 2;
1948  sl->slice_beta_offset = slice_beta_offset_div2 * 2;
1949  }
1950  }
1951 
1952  return 0;
1953 }
1954 
1955 /* do all the per-slice initialization needed before we can start decoding the
1956  * actual MBs */
1958  const H2645NAL *nal)
1959 {
1960  int i, j, ret = 0;
1961 
1962  if (h->picture_idr && nal->type != H264_NAL_IDR_SLICE) {
1963  av_log(h->avctx, AV_LOG_ERROR, "Invalid mix of IDR and non-IDR slices\n");
1964  return AVERROR_INVALIDDATA;
1965  }
1966 
1967  av_assert1(h->mb_num == h->mb_width * h->mb_height);
1968  if (sl->first_mb_addr << FIELD_OR_MBAFF_PICTURE(h) >= h->mb_num ||
1969  sl->first_mb_addr >= h->mb_num) {
1970  av_log(h->avctx, AV_LOG_ERROR, "first_mb_in_slice overflow\n");
1971  return AVERROR_INVALIDDATA;
1972  }
1973  sl->resync_mb_x = sl->mb_x = sl->first_mb_addr % h->mb_width;
1974  sl->resync_mb_y = sl->mb_y = (sl->first_mb_addr / h->mb_width) <<
1977  sl->resync_mb_y = sl->mb_y = sl->mb_y + 1;
1978  av_assert1(sl->mb_y < h->mb_height);
1979 
1980  ret = ff_h264_build_ref_list(h, sl);
1981  if (ret < 0)
1982  return ret;
1983 
1984  if (h->ps.pps->weighted_bipred_idc == 2 &&
1986  implicit_weight_table(h, sl, -1);
1987  if (FRAME_MBAFF(h)) {
1988  implicit_weight_table(h, sl, 0);
1989  implicit_weight_table(h, sl, 1);
1990  }
1991  }
1992 
1995  if (!h->setup_finished)
1997 
1998  if (h->avctx->skip_loop_filter >= AVDISCARD_ALL ||
2006  nal->ref_idc == 0))
2007  sl->deblocking_filter = 0;
2008 
2009  if (sl->deblocking_filter == 1 && h->nb_slice_ctx > 1) {
2010  if (h->avctx->flags2 & AV_CODEC_FLAG2_FAST) {
2011  /* Cheat slightly for speed:
2012  * Do not bother to deblock across slices. */
2013  sl->deblocking_filter = 2;
2014  } else {
2015  h->postpone_filter = 1;
2016  }
2017  }
2018  sl->qp_thresh = 15 -
2020  FFMAX3(0,
2021  h->ps.pps->chroma_qp_index_offset[0],
2022  h->ps.pps->chroma_qp_index_offset[1]) +
2023  6 * (h->ps.sps->bit_depth_luma - 8);
2024 
2025  sl->slice_num = ++h->current_slice;
2026 
2027  if (sl->slice_num)
2028  h->slice_row[(sl->slice_num-1)&(MAX_SLICES-1)]= sl->resync_mb_y;
2029  if ( h->slice_row[sl->slice_num&(MAX_SLICES-1)] + 3 >= sl->resync_mb_y
2030  && h->slice_row[sl->slice_num&(MAX_SLICES-1)] <= sl->resync_mb_y
2031  && sl->slice_num >= MAX_SLICES) {
2032  //in case of ASO this check needs to be updated depending on how we decide to assign slice numbers in this case
2033  av_log(h->avctx, AV_LOG_WARNING, "Possibly too many slices (%d >= %d), increase MAX_SLICES and recompile if there are artifacts\n", sl->slice_num, MAX_SLICES);
2034  }
2035 
2036  for (j = 0; j < 2; j++) {
2037  int id_list[16];
2038  int *ref2frm = h->ref2frm[sl->slice_num & (MAX_SLICES - 1)][j];
2039  for (i = 0; i < 16; i++) {
2040  id_list[i] = 60;
2041  if (j < sl->list_count && i < sl->ref_count[j] &&
2042  sl->ref_list[j][i].parent->f->buf[0]) {
2043  int k;
2044  AVBuffer *buf = sl->ref_list[j][i].parent->f->buf[0]->buffer;
2045  for (k = 0; k < h->short_ref_count; k++)
2046  if (h->short_ref[k]->f->buf[0]->buffer == buf) {
2047  id_list[i] = k;
2048  break;
2049  }
2050  for (k = 0; k < h->long_ref_count; k++)
2051  if (h->long_ref[k] && h->long_ref[k]->f->buf[0]->buffer == buf) {
2052  id_list[i] = h->short_ref_count + k;
2053  break;
2054  }
2055  }
2056  }
2057 
2058  ref2frm[0] =
2059  ref2frm[1] = -1;
2060  for (i = 0; i < 16; i++)
2061  ref2frm[i + 2] = 4 * id_list[i] + (sl->ref_list[j][i].reference & 3);
2062  ref2frm[18 + 0] =
2063  ref2frm[18 + 1] = -1;
2064  for (i = 16; i < 48; i++)
2065  ref2frm[i + 4] = 4 * id_list[(i - 16) >> 1] +
2066  (sl->ref_list[j][i].reference & 3);
2067  }
2068 
2069  if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
2071  "slice:%d %s mb:%d %c%s%s frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n",
2072  sl->slice_num,
2073  (h->picture_structure == PICT_FRAME ? "F" : h->picture_structure == PICT_TOP_FIELD ? "T" : "B"),
2074  sl->mb_y * h->mb_width + sl->mb_x,
2076  sl->slice_type_fixed ? " fix" : "",
2077  nal->type == H264_NAL_IDR_SLICE ? " IDR" : "",
2078  h->poc.frame_num,
2079  h->cur_pic_ptr->field_poc[0],
2080  h->cur_pic_ptr->field_poc[1],
2081  sl->ref_count[0], sl->ref_count[1],
2082  sl->qscale,
2083  sl->deblocking_filter,
2085  sl->pwt.use_weight,
2086  sl->pwt.use_weight == 1 && sl->pwt.use_weight_chroma ? "c" : "",
2087  sl->slice_type == AV_PICTURE_TYPE_B ? (sl->direct_spatial_mv_pred ? "SPAT" : "TEMP") : "");
2088  }
2089 
2090  return 0;
2091 }
2092 
2094 {
2096  int first_slice = sl == h->slice_ctx && !h->current_slice;
2097  int ret;
2098 
2099  sl->gb = nal->gb;
2100 
2101  ret = h264_slice_header_parse(h, sl, nal);
2102  if (ret < 0)
2103  return ret;
2104 
2105  // discard redundant pictures
2106  if (sl->redundant_pic_count > 0) {
2107  sl->ref_count[0] = sl->ref_count[1] = 0;
2108  return 0;
2109  }
2110 
2111  if (sl->first_mb_addr == 0 || !h->current_slice) {
2112  if (h->setup_finished) {
2113  av_log(h->avctx, AV_LOG_ERROR, "Too many fields\n");
2114  return AVERROR_INVALIDDATA;
2115  }
2116  }
2117 
2118  if (sl->first_mb_addr == 0) { // FIXME better field boundary detection
2119  if (h->current_slice) {
2120  // this slice starts a new field
2121  // first decode any pending queued slices
2122  if (h->nb_slice_ctx_queued) {
2123  H264SliceContext tmp_ctx;
2124 
2126  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
2127  return ret;
2128 
2129  memcpy(&tmp_ctx, h->slice_ctx, sizeof(tmp_ctx));
2130  memcpy(h->slice_ctx, sl, sizeof(tmp_ctx));
2131  memcpy(sl, &tmp_ctx, sizeof(tmp_ctx));
2132  sl = h->slice_ctx;
2133  }
2134 
2135  if (h->cur_pic_ptr && FIELD_PICTURE(h) && h->first_field) {
2136  ret = ff_h264_field_end(h, h->slice_ctx, 1);
2137  if (ret < 0)
2138  return ret;
2139  } else if (h->cur_pic_ptr && !FIELD_PICTURE(h) && !h->first_field && h->nal_unit_type == H264_NAL_IDR_SLICE) {
2140  av_log(h, AV_LOG_WARNING, "Broken frame packetizing\n");
2141  ret = ff_h264_field_end(h, h->slice_ctx, 1);
2142  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
2143  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
2144  h->cur_pic_ptr = NULL;
2145  if (ret < 0)
2146  return ret;
2147  } else
2148  return AVERROR_INVALIDDATA;
2149  }
2150 
2151  if (!h->first_field) {
2152  if (h->cur_pic_ptr && !h->droppable) {
2155  }
2156  h->cur_pic_ptr = NULL;
2157  }
2158  }
2159 
2160  if (!h->current_slice)
2161  av_assert0(sl == h->slice_ctx);
2162 
2163  if (h->current_slice == 0 && !h->first_field) {
2164  if (
2165  (h->avctx->skip_frame >= AVDISCARD_NONREF && !h->nal_ref_idc) ||
2169  h->avctx->skip_frame >= AVDISCARD_ALL) {
2170  return 0;
2171  }
2172  }
2173 
2174  if (!first_slice) {
2175  const PPS *pps = (const PPS*)h->ps.pps_list[sl->pps_id]->data;
2176 
2177  if (h->ps.pps->sps_id != pps->sps_id ||
2178  h->ps.pps->transform_8x8_mode != pps->transform_8x8_mode /*||
2179  (h->setup_finished && h->ps.pps != pps)*/) {
2180  av_log(h->avctx, AV_LOG_ERROR, "PPS changed between slices\n");
2181  return AVERROR_INVALIDDATA;
2182  }
2183  if (h->ps.sps != pps->sps) {
2185  "SPS changed in the middle of the frame\n");
2186  return AVERROR_INVALIDDATA;
2187  }
2188  }
2189 
2190  if (h->current_slice == 0) {
2191  ret = h264_field_start(h, sl, nal, first_slice);
2192  if (ret < 0)
2193  return ret;
2194  } else {
2195  if (h->picture_structure != sl->picture_structure ||
2196  h->droppable != (nal->ref_idc == 0)) {
2198  "Changing field mode (%d -> %d) between slices is not allowed\n",
2200  return AVERROR_INVALIDDATA;
2201  } else if (!h->cur_pic_ptr) {
2203  "unset cur_pic_ptr on slice %d\n",
2204  h->current_slice + 1);
2205  return AVERROR_INVALIDDATA;
2206  }
2207  }
2208 
2209  ret = h264_slice_init(h, sl, nal);
2210  if (ret < 0)
2211  return ret;
2212 
2213  h->nb_slice_ctx_queued++;
2214 
2215  return 0;
2216 }
2217 
2219 {
2220  switch (sl->slice_type) {
2221  case AV_PICTURE_TYPE_P:
2222  return 0;
2223  case AV_PICTURE_TYPE_B:
2224  return 1;
2225  case AV_PICTURE_TYPE_I:
2226  return 2;
2227  case AV_PICTURE_TYPE_SP:
2228  return 3;
2229  case AV_PICTURE_TYPE_SI:
2230  return 4;
2231  default:
2232  return AVERROR_INVALIDDATA;
2233  }
2234 }
2235 
2237  H264SliceContext *sl,
2238  int mb_type, int top_xy,
2239  int left_xy[LEFT_MBS],
2240  int top_type,
2241  int left_type[LEFT_MBS],
2242  int mb_xy, int list)
2243 {
2244  int b_stride = h->b_stride;
2245  int16_t(*mv_dst)[2] = &sl->mv_cache[list][scan8[0]];
2246  int8_t *ref_cache = &sl->ref_cache[list][scan8[0]];
2247  if (IS_INTER(mb_type) || IS_DIRECT(mb_type)) {
2248  if (USES_LIST(top_type, list)) {
2249  const int b_xy = h->mb2b_xy[top_xy] + 3 * b_stride;
2250  const int b8_xy = 4 * top_xy + 2;
2251  const int *ref2frm = &h->ref2frm[h->slice_table[top_xy] & (MAX_SLICES - 1)][list][(MB_MBAFF(sl) ? 20 : 2)];
2252  AV_COPY128(mv_dst - 1 * 8, h->cur_pic.motion_val[list][b_xy + 0]);
2253  ref_cache[0 - 1 * 8] =
2254  ref_cache[1 - 1 * 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 0]];
2255  ref_cache[2 - 1 * 8] =
2256  ref_cache[3 - 1 * 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 1]];
2257  } else {
2258  AV_ZERO128(mv_dst - 1 * 8);
2259  AV_WN32A(&ref_cache[0 - 1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2260  }
2261 
2262  if (!IS_INTERLACED(mb_type ^ left_type[LTOP])) {
2263  if (USES_LIST(left_type[LTOP], list)) {
2264  const int b_xy = h->mb2b_xy[left_xy[LTOP]] + 3;
2265  const int b8_xy = 4 * left_xy[LTOP] + 1;
2266  const int *ref2frm = &h->ref2frm[h->slice_table[left_xy[LTOP]] & (MAX_SLICES - 1)][list][(MB_MBAFF(sl) ? 20 : 2)];
2267  AV_COPY32(mv_dst - 1 + 0, h->cur_pic.motion_val[list][b_xy + b_stride * 0]);
2268  AV_COPY32(mv_dst - 1 + 8, h->cur_pic.motion_val[list][b_xy + b_stride * 1]);
2269  AV_COPY32(mv_dst - 1 + 16, h->cur_pic.motion_val[list][b_xy + b_stride * 2]);
2270  AV_COPY32(mv_dst - 1 + 24, h->cur_pic.motion_val[list][b_xy + b_stride * 3]);
2271  ref_cache[-1 + 0] =
2272  ref_cache[-1 + 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 2 * 0]];
2273  ref_cache[-1 + 16] =
2274  ref_cache[-1 + 24] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 2 * 1]];
2275  } else {
2276  AV_ZERO32(mv_dst - 1 + 0);
2277  AV_ZERO32(mv_dst - 1 + 8);
2278  AV_ZERO32(mv_dst - 1 + 16);
2279  AV_ZERO32(mv_dst - 1 + 24);
2280  ref_cache[-1 + 0] =
2281  ref_cache[-1 + 8] =
2282  ref_cache[-1 + 16] =
2283  ref_cache[-1 + 24] = LIST_NOT_USED;
2284  }
2285  }
2286  }
2287 
2288  if (!USES_LIST(mb_type, list)) {
2289  fill_rectangle(mv_dst, 4, 4, 8, pack16to32(0, 0), 4);
2290  AV_WN32A(&ref_cache[0 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2291  AV_WN32A(&ref_cache[1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2292  AV_WN32A(&ref_cache[2 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2293  AV_WN32A(&ref_cache[3 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2294  return;
2295  }
2296 
2297  {
2298  int8_t *ref = &h->cur_pic.ref_index[list][4 * mb_xy];
2299  const int *ref2frm = &h->ref2frm[sl->slice_num & (MAX_SLICES - 1)][list][(MB_MBAFF(sl) ? 20 : 2)];
2300  uint32_t ref01 = (pack16to32(ref2frm[ref[0]], ref2frm[ref[1]]) & 0x00FF00FF) * 0x0101;
2301  uint32_t ref23 = (pack16to32(ref2frm[ref[2]], ref2frm[ref[3]]) & 0x00FF00FF) * 0x0101;
2302  AV_WN32A(&ref_cache[0 * 8], ref01);
2303  AV_WN32A(&ref_cache[1 * 8], ref01);
2304  AV_WN32A(&ref_cache[2 * 8], ref23);
2305  AV_WN32A(&ref_cache[3 * 8], ref23);
2306  }
2307 
2308  {
2309  int16_t(*mv_src)[2] = &h->cur_pic.motion_val[list][4 * sl->mb_x + 4 * sl->mb_y * b_stride];
2310  AV_COPY128(mv_dst + 8 * 0, mv_src + 0 * b_stride);
2311  AV_COPY128(mv_dst + 8 * 1, mv_src + 1 * b_stride);
2312  AV_COPY128(mv_dst + 8 * 2, mv_src + 2 * b_stride);
2313  AV_COPY128(mv_dst + 8 * 3, mv_src + 3 * b_stride);
2314  }
2315 }
2316 
2317 /**
2318  * @return non zero if the loop filter can be skipped
2319  */
2320 static int fill_filter_caches(const H264Context *h, H264SliceContext *sl, int mb_type)
2321 {
2322  const int mb_xy = sl->mb_xy;
2323  int top_xy, left_xy[LEFT_MBS];
2324  int top_type, left_type[LEFT_MBS];
2325  uint8_t *nnz;
2326  uint8_t *nnz_cache;
2327 
2328  top_xy = mb_xy - (h->mb_stride << MB_FIELD(sl));
2329 
2330  left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1;
2331  if (FRAME_MBAFF(h)) {
2332  const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.mb_type[mb_xy - 1]);
2333  const int curr_mb_field_flag = IS_INTERLACED(mb_type);
2334  if (sl->mb_y & 1) {
2335  if (left_mb_field_flag != curr_mb_field_flag)
2336  left_xy[LTOP] -= h->mb_stride;
2337  } else {
2338  if (curr_mb_field_flag)
2339  top_xy += h->mb_stride &
2340  (((h->cur_pic.mb_type[top_xy] >> 7) & 1) - 1);
2341  if (left_mb_field_flag != curr_mb_field_flag)
2342  left_xy[LBOT] += h->mb_stride;
2343  }
2344  }
2345 
2346  sl->top_mb_xy = top_xy;
2347  sl->left_mb_xy[LTOP] = left_xy[LTOP];
2348  sl->left_mb_xy[LBOT] = left_xy[LBOT];
2349  {
2350  /* For sufficiently low qp, filtering wouldn't do anything.
2351  * This is a conservative estimate: could also check beta_offset
2352  * and more accurate chroma_qp. */
2353  int qp_thresh = sl->qp_thresh; // FIXME strictly we should store qp_thresh for each mb of a slice
2354  int qp = h->cur_pic.qscale_table[mb_xy];
2355  if (qp <= qp_thresh &&
2356  (left_xy[LTOP] < 0 ||
2357  ((qp + h->cur_pic.qscale_table[left_xy[LTOP]] + 1) >> 1) <= qp_thresh) &&
2358  (top_xy < 0 ||
2359  ((qp + h->cur_pic.qscale_table[top_xy] + 1) >> 1) <= qp_thresh)) {
2360  if (!FRAME_MBAFF(h))
2361  return 1;
2362  if ((left_xy[LTOP] < 0 ||
2363  ((qp + h->cur_pic.qscale_table[left_xy[LBOT]] + 1) >> 1) <= qp_thresh) &&
2364  (top_xy < h->mb_stride ||
2365  ((qp + h->cur_pic.qscale_table[top_xy - h->mb_stride] + 1) >> 1) <= qp_thresh))
2366  return 1;
2367  }
2368  }
2369 
2370  top_type = h->cur_pic.mb_type[top_xy];
2371  left_type[LTOP] = h->cur_pic.mb_type[left_xy[LTOP]];
2372  left_type[LBOT] = h->cur_pic.mb_type[left_xy[LBOT]];
2373  if (sl->deblocking_filter == 2) {
2374  if (h->slice_table[top_xy] != sl->slice_num)
2375  top_type = 0;
2376  if (h->slice_table[left_xy[LBOT]] != sl->slice_num)
2377  left_type[LTOP] = left_type[LBOT] = 0;
2378  } else {
2379  if (h->slice_table[top_xy] == 0xFFFF)
2380  top_type = 0;
2381  if (h->slice_table[left_xy[LBOT]] == 0xFFFF)
2382  left_type[LTOP] = left_type[LBOT] = 0;
2383  }
2384  sl->top_type = top_type;
2385  sl->left_type[LTOP] = left_type[LTOP];
2386  sl->left_type[LBOT] = left_type[LBOT];
2387 
2388  if (IS_INTRA(mb_type))
2389  return 0;
2390 
2391  fill_filter_caches_inter(h, sl, mb_type, top_xy, left_xy,
2392  top_type, left_type, mb_xy, 0);
2393  if (sl->list_count == 2)
2394  fill_filter_caches_inter(h, sl, mb_type, top_xy, left_xy,
2395  top_type, left_type, mb_xy, 1);
2396 
2397  nnz = h->non_zero_count[mb_xy];
2398  nnz_cache = sl->non_zero_count_cache;
2399  AV_COPY32(&nnz_cache[4 + 8 * 1], &nnz[0]);
2400  AV_COPY32(&nnz_cache[4 + 8 * 2], &nnz[4]);
2401  AV_COPY32(&nnz_cache[4 + 8 * 3], &nnz[8]);
2402  AV_COPY32(&nnz_cache[4 + 8 * 4], &nnz[12]);
2403  sl->cbp = h->cbp_table[mb_xy];
2404 
2405  if (top_type) {
2406  nnz = h->non_zero_count[top_xy];
2407  AV_COPY32(&nnz_cache[4 + 8 * 0], &nnz[3 * 4]);
2408  }
2409 
2410  if (left_type[LTOP]) {
2411  nnz = h->non_zero_count[left_xy[LTOP]];
2412  nnz_cache[3 + 8 * 1] = nnz[3 + 0 * 4];
2413  nnz_cache[3 + 8 * 2] = nnz[3 + 1 * 4];
2414  nnz_cache[3 + 8 * 3] = nnz[3 + 2 * 4];
2415  nnz_cache[3 + 8 * 4] = nnz[3 + 3 * 4];
2416  }
2417 
2418  /* CAVLC 8x8dct requires NNZ values for residual decoding that differ
2419  * from what the loop filter needs */
2420  if (!CABAC(h) && h->ps.pps->transform_8x8_mode) {
2421  if (IS_8x8DCT(top_type)) {
2422  nnz_cache[4 + 8 * 0] =
2423  nnz_cache[5 + 8 * 0] = (h->cbp_table[top_xy] & 0x4000) >> 12;
2424  nnz_cache[6 + 8 * 0] =
2425  nnz_cache[7 + 8 * 0] = (h->cbp_table[top_xy] & 0x8000) >> 12;
2426  }
2427  if (IS_8x8DCT(left_type[LTOP])) {
2428  nnz_cache[3 + 8 * 1] =
2429  nnz_cache[3 + 8 * 2] = (h->cbp_table[left_xy[LTOP]] & 0x2000) >> 12; // FIXME check MBAFF
2430  }
2431  if (IS_8x8DCT(left_type[LBOT])) {
2432  nnz_cache[3 + 8 * 3] =
2433  nnz_cache[3 + 8 * 4] = (h->cbp_table[left_xy[LBOT]] & 0x8000) >> 12; // FIXME check MBAFF
2434  }
2435 
2436  if (IS_8x8DCT(mb_type)) {
2437  nnz_cache[scan8[0]] =
2438  nnz_cache[scan8[1]] =
2439  nnz_cache[scan8[2]] =
2440  nnz_cache[scan8[3]] = (sl->cbp & 0x1000) >> 12;
2441 
2442  nnz_cache[scan8[0 + 4]] =
2443  nnz_cache[scan8[1 + 4]] =
2444  nnz_cache[scan8[2 + 4]] =
2445  nnz_cache[scan8[3 + 4]] = (sl->cbp & 0x2000) >> 12;
2446 
2447  nnz_cache[scan8[0 + 8]] =
2448  nnz_cache[scan8[1 + 8]] =
2449  nnz_cache[scan8[2 + 8]] =
2450  nnz_cache[scan8[3 + 8]] = (sl->cbp & 0x4000) >> 12;
2451 
2452  nnz_cache[scan8[0 + 12]] =
2453  nnz_cache[scan8[1 + 12]] =
2454  nnz_cache[scan8[2 + 12]] =
2455  nnz_cache[scan8[3 + 12]] = (sl->cbp & 0x8000) >> 12;
2456  }
2457  }
2458 
2459  return 0;
2460 }
2461 
2462 static void loop_filter(const H264Context *h, H264SliceContext *sl, int start_x, int end_x)
2463 {
2464  uint8_t *dest_y, *dest_cb, *dest_cr;
2465  int linesize, uvlinesize, mb_x, mb_y;
2466  const int end_mb_y = sl->mb_y + FRAME_MBAFF(h);
2467  const int old_slice_type = sl->slice_type;
2468  const int pixel_shift = h->pixel_shift;
2469  const int block_h = 16 >> h->chroma_y_shift;
2470 
2471  if (h->postpone_filter)
2472  return;
2473 
2474  if (sl->deblocking_filter) {
2475  for (mb_x = start_x; mb_x < end_x; mb_x++)
2476  for (mb_y = end_mb_y - FRAME_MBAFF(h); mb_y <= end_mb_y; mb_y++) {
2477  int mb_xy, mb_type;
2478  mb_xy = sl->mb_xy = mb_x + mb_y * h->mb_stride;
2479  mb_type = h->cur_pic.mb_type[mb_xy];
2480 
2481  if (FRAME_MBAFF(h))
2482  sl->mb_mbaff =
2483  sl->mb_field_decoding_flag = !!IS_INTERLACED(mb_type);
2484 
2485  sl->mb_x = mb_x;
2486  sl->mb_y = mb_y;
2487  dest_y = h->cur_pic.f->data[0] +
2488  ((mb_x << pixel_shift) + mb_y * sl->linesize) * 16;
2489  dest_cb = h->cur_pic.f->data[1] +
2490  (mb_x << pixel_shift) * (8 << CHROMA444(h)) +
2491  mb_y * sl->uvlinesize * block_h;
2492  dest_cr = h->cur_pic.f->data[2] +
2493  (mb_x << pixel_shift) * (8 << CHROMA444(h)) +
2494  mb_y * sl->uvlinesize * block_h;
2495  // FIXME simplify above
2496 
2497  if (MB_FIELD(sl)) {
2498  linesize = sl->mb_linesize = sl->linesize * 2;
2499  uvlinesize = sl->mb_uvlinesize = sl->uvlinesize * 2;
2500  if (mb_y & 1) { // FIXME move out of this function?
2501  dest_y -= sl->linesize * 15;
2502  dest_cb -= sl->uvlinesize * (block_h - 1);
2503  dest_cr -= sl->uvlinesize * (block_h - 1);
2504  }
2505  } else {
2506  linesize = sl->mb_linesize = sl->linesize;
2507  uvlinesize = sl->mb_uvlinesize = sl->uvlinesize;
2508  }
2509  backup_mb_border(h, sl, dest_y, dest_cb, dest_cr, linesize,
2510  uvlinesize, 0);
2511  if (fill_filter_caches(h, sl, mb_type))
2512  continue;
2513  sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0, h->cur_pic.qscale_table[mb_xy]);
2514  sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1, h->cur_pic.qscale_table[mb_xy]);
2515 
2516  if (FRAME_MBAFF(h)) {
2517  ff_h264_filter_mb(h, sl, mb_x, mb_y, dest_y, dest_cb, dest_cr,
2518  linesize, uvlinesize);
2519  } else {
2520  ff_h264_filter_mb_fast(h, sl, mb_x, mb_y, dest_y, dest_cb,
2521  dest_cr, linesize, uvlinesize);
2522  }
2523  }
2524  }
2525  sl->slice_type = old_slice_type;
2526  sl->mb_x = end_x;
2527  sl->mb_y = end_mb_y - FRAME_MBAFF(h);
2528  sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0, sl->qscale);
2529  sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1, sl->qscale);
2530 }
2531 
2533 {
2534  const int mb_xy = sl->mb_x + sl->mb_y * h->mb_stride;
2535  int mb_type = (h->slice_table[mb_xy - 1] == sl->slice_num) ?
2536  h->cur_pic.mb_type[mb_xy - 1] :
2537  (h->slice_table[mb_xy - h->mb_stride] == sl->slice_num) ?
2538  h->cur_pic.mb_type[mb_xy - h->mb_stride] : 0;
2539  sl->mb_mbaff = sl->mb_field_decoding_flag = IS_INTERLACED(mb_type) ? 1 : 0;
2540 }
2541 
2542 /**
2543  * Draw edges and report progress for the last MB row.
2544  */
2546 {
2547  int top = 16 * (sl->mb_y >> FIELD_PICTURE(h));
2548  int pic_height = 16 * h->mb_height >> FIELD_PICTURE(h);
2549  int height = 16 << FRAME_MBAFF(h);
2550  int deblock_border = (16 + 4) << FRAME_MBAFF(h);
2551 
2552  if (sl->deblocking_filter) {
2553  if ((top + height) >= pic_height)
2554  height += deblock_border;
2555  top -= deblock_border;
2556  }
2557 
2558  if (top >= pic_height || (top + height) < 0)
2559  return;
2560 
2561  height = FFMIN(height, pic_height - top);
2562  if (top < 0) {
2563  height = top + height;
2564  top = 0;
2565  }
2566 
2567  ff_h264_draw_horiz_band(h, sl, top, height);
2568 
2569  if (h->droppable || sl->h264->slice_ctx[0].er.error_occurred)
2570  return;
2571 
2572  ff_thread_report_progress(&h->cur_pic_ptr->tf, top + height - 1,
2574 }
2575 
2577  int startx, int starty,
2578  int endx, int endy, int status)
2579 {
2580  if (!sl->h264->enable_er)
2581  return;
2582 
2583  if (CONFIG_ERROR_RESILIENCE) {
2584  ERContext *er = &sl->h264->slice_ctx[0].er;
2585 
2586  ff_er_add_slice(er, startx, starty, endx, endy, status);
2587  }
2588 }
2589 
2590 static int decode_slice(struct AVCodecContext *avctx, void *arg)
2591 {
2592  H264SliceContext *sl = arg;
2593  const H264Context *h = sl->h264;
2594  int lf_x_start = sl->mb_x;
2595  int orig_deblock = sl->deblocking_filter;
2596  int ret;
2597 
2598  sl->linesize = h->cur_pic_ptr->f->linesize[0];
2599  sl->uvlinesize = h->cur_pic_ptr->f->linesize[1];
2600 
2601  ret = alloc_scratch_buffers(sl, sl->linesize);
2602  if (ret < 0)
2603  return ret;
2604 
2605  sl->mb_skip_run = -1;
2606 
2607  av_assert0(h->block_offset[15] == (4 * ((scan8[15] - scan8[0]) & 7) << h->pixel_shift) + 4 * sl->linesize * ((scan8[15] - scan8[0]) >> 3));
2608 
2609  if (h->postpone_filter)
2610  sl->deblocking_filter = 0;
2611 
2612  sl->is_complex = FRAME_MBAFF(h) || h->picture_structure != PICT_FRAME ||
2613  (CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY));
2614 
2616  const int start_i = av_clip(sl->resync_mb_x + sl->resync_mb_y * h->mb_width, 0, h->mb_num - 1);
2617  if (start_i) {
2618  int prev_status = h->slice_ctx[0].er.error_status_table[h->slice_ctx[0].er.mb_index2xy[start_i - 1]];
2619  prev_status &= ~ VP_START;
2620  if (prev_status != (ER_MV_END | ER_DC_END | ER_AC_END))
2621  h->slice_ctx[0].er.error_occurred = 1;
2622  }
2623  }
2624 
2625  if (h->ps.pps->cabac) {
2626  /* realign */
2627  align_get_bits(&sl->gb);
2628 
2629  /* init cabac */
2630  ret = ff_init_cabac_decoder(&sl->cabac,
2631  sl->gb.buffer + get_bits_count(&sl->gb) / 8,
2632  (get_bits_left(&sl->gb) + 7) / 8);
2633  if (ret < 0)
2634  return ret;
2635 
2637 
2638  for (;;) {
2639  int ret, eos;
2640  if (sl->mb_x + sl->mb_y * h->mb_width >= sl->next_slice_idx) {
2641  av_log(h->avctx, AV_LOG_ERROR, "Slice overlaps with next at %d\n",
2642  sl->next_slice_idx);
2643  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2644  sl->mb_y, ER_MB_ERROR);
2645  return AVERROR_INVALIDDATA;
2646  }
2647 
2648  ret = ff_h264_decode_mb_cabac(h, sl);
2649 
2650  if (ret >= 0)
2651  ff_h264_hl_decode_mb(h, sl);
2652 
2653  // FIXME optimal? or let mb_decode decode 16x32 ?
2654  if (ret >= 0 && FRAME_MBAFF(h)) {
2655  sl->mb_y++;
2656 
2657  ret = ff_h264_decode_mb_cabac(h, sl);
2658 
2659  if (ret >= 0)
2660  ff_h264_hl_decode_mb(h, sl);
2661  sl->mb_y--;
2662  }
2663  eos = get_cabac_terminate(&sl->cabac);
2664 
2665  if ((h->workaround_bugs & FF_BUG_TRUNCATED) &&
2666  sl->cabac.bytestream > sl->cabac.bytestream_end + 2) {
2667  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x - 1,
2668  sl->mb_y, ER_MB_END);
2669  if (sl->mb_x >= lf_x_start)
2670  loop_filter(h, sl, lf_x_start, sl->mb_x + 1);
2671  goto finish;
2672  }
2673  if (sl->cabac.bytestream > sl->cabac.bytestream_end + 2 )
2674  av_log(h->avctx, AV_LOG_DEBUG, "bytestream overread %"PTRDIFF_SPECIFIER"\n", sl->cabac.bytestream_end - sl->cabac.bytestream);
2675  if (ret < 0 || sl->cabac.bytestream > sl->cabac.bytestream_end + 4) {
2677  "error while decoding MB %d %d, bytestream %"PTRDIFF_SPECIFIER"\n",
2678  sl->mb_x, sl->mb_y,
2679  sl->cabac.bytestream_end - sl->cabac.bytestream);
2680  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2681  sl->mb_y, ER_MB_ERROR);
2682  return AVERROR_INVALIDDATA;
2683  }
2684 
2685  if (++sl->mb_x >= h->mb_width) {
2686  loop_filter(h, sl, lf_x_start, sl->mb_x);
2687  sl->mb_x = lf_x_start = 0;
2688  decode_finish_row(h, sl);
2689  ++sl->mb_y;
2690  if (FIELD_OR_MBAFF_PICTURE(h)) {
2691  ++sl->mb_y;
2692  if (FRAME_MBAFF(h) && sl->mb_y < h->mb_height)
2694  }
2695  }
2696 
2697  if (eos || sl->mb_y >= h->mb_height) {
2698  ff_tlog(h->avctx, "slice end %d %d\n",
2699  get_bits_count(&sl->gb), sl->gb.size_in_bits);
2700  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x - 1,
2701  sl->mb_y, ER_MB_END);
2702  if (sl->mb_x > lf_x_start)
2703  loop_filter(h, sl, lf_x_start, sl->mb_x);
2704  goto finish;
2705  }
2706  }
2707  } else {
2708  for (;;) {
2709  int ret;
2710 
2711  if (sl->mb_x + sl->mb_y * h->mb_width >= sl->next_slice_idx) {
2712  av_log(h->avctx, AV_LOG_ERROR, "Slice overlaps with next at %d\n",
2713  sl->next_slice_idx);
2714  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2715  sl->mb_y, ER_MB_ERROR);
2716  return AVERROR_INVALIDDATA;
2717  }
2718 
2719  ret = ff_h264_decode_mb_cavlc(h, sl);
2720 
2721  if (ret >= 0)
2722  ff_h264_hl_decode_mb(h, sl);
2723 
2724  // FIXME optimal? or let mb_decode decode 16x32 ?
2725  if (ret >= 0 && FRAME_MBAFF(h)) {
2726  sl->mb_y++;
2727  ret = ff_h264_decode_mb_cavlc(h, sl);
2728 
2729  if (ret >= 0)
2730  ff_h264_hl_decode_mb(h, sl);
2731  sl->mb_y--;
2732  }
2733 
2734  if (ret < 0) {
2736  "error while decoding MB %d %d\n", sl->mb_x, sl->mb_y);
2737  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2738  sl->mb_y, ER_MB_ERROR);
2739  return ret;
2740  }
2741 
2742  if (++sl->mb_x >= h->mb_width) {
2743  loop_filter(h, sl, lf_x_start, sl->mb_x);
2744  sl->mb_x = lf_x_start = 0;
2745  decode_finish_row(h, sl);
2746  ++sl->mb_y;
2747  if (FIELD_OR_MBAFF_PICTURE(h)) {
2748  ++sl->mb_y;
2749  if (FRAME_MBAFF(h) && sl->mb_y < h->mb_height)
2751  }
2752  if (sl->mb_y >= h->mb_height) {
2753  ff_tlog(h->avctx, "slice end %d %d\n",
2754  get_bits_count(&sl->gb), sl->gb.size_in_bits);
2755 
2756  if ( get_bits_left(&sl->gb) == 0
2757  || get_bits_left(&sl->gb) > 0 && !(h->avctx->err_recognition & AV_EF_AGGRESSIVE)) {
2758  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
2759  sl->mb_x - 1, sl->mb_y, ER_MB_END);
2760 
2761  goto finish;
2762  } else {
2763  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
2764  sl->mb_x, sl->mb_y, ER_MB_END);
2765 
2766  return AVERROR_INVALIDDATA;
2767  }
2768  }
2769  }
2770 
2771  if (get_bits_left(&sl->gb) <= 0 && sl->mb_skip_run <= 0) {
2772  ff_tlog(h->avctx, "slice end %d %d\n",
2773  get_bits_count(&sl->gb), sl->gb.size_in_bits);
2774 
2775  if (get_bits_left(&sl->gb) == 0) {
2776  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
2777  sl->mb_x - 1, sl->mb_y, ER_MB_END);
2778  if (sl->mb_x > lf_x_start)
2779  loop_filter(h, sl, lf_x_start, sl->mb_x);
2780 
2781  goto finish;
2782  } else {
2783  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2784  sl->mb_y, ER_MB_ERROR);
2785 
2786  return AVERROR_INVALIDDATA;
2787  }
2788  }
2789  }
2790  }
2791 
2792 finish:
2793  sl->deblocking_filter = orig_deblock;
2794  return 0;
2795 }
2796 
2797 /**
2798  * Call decode_slice() for each context.
2799  *
2800  * @param h h264 master context
2801  */
2803 {
2804  AVCodecContext *const avctx = h->avctx;
2805  H264SliceContext *sl;
2806  int context_count = h->nb_slice_ctx_queued;
2807  int ret = 0;
2808  int i, j;
2809 
2810  h->slice_ctx[0].next_slice_idx = INT_MAX;
2811 
2812  if (h->avctx->hwaccel || context_count < 1)
2813  return 0;
2814 
2815  av_assert0(context_count && h->slice_ctx[context_count - 1].mb_y < h->mb_height);
2816 
2817  if (context_count == 1) {
2818 
2819  h->slice_ctx[0].next_slice_idx = h->mb_width * h->mb_height;
2820  h->postpone_filter = 0;
2821 
2822  ret = decode_slice(avctx, &h->slice_ctx[0]);
2823  h->mb_y = h->slice_ctx[0].mb_y;
2824  if (ret < 0)
2825  goto finish;
2826  } else {
2827  av_assert0(context_count > 0);
2828  for (i = 0; i < context_count; i++) {
2829  int next_slice_idx = h->mb_width * h->mb_height;
2830  int slice_idx;
2831 
2832  sl = &h->slice_ctx[i];
2833  if (CONFIG_ERROR_RESILIENCE) {
2834  sl->er.error_count = 0;
2835  }
2836 
2837  /* make sure none of those slices overlap */
2838  slice_idx = sl->mb_y * h->mb_width + sl->mb_x;
2839  for (j = 0; j < context_count; j++) {
2840  H264SliceContext *sl2 = &h->slice_ctx[j];
2841  int slice_idx2 = sl2->mb_y * h->mb_width + sl2->mb_x;
2842 
2843  if (i == j || slice_idx2 < slice_idx)
2844  continue;
2845  next_slice_idx = FFMIN(next_slice_idx, slice_idx2);
2846  }
2847  sl->next_slice_idx = next_slice_idx;
2848  }
2849 
2850  avctx->execute(avctx, decode_slice, h->slice_ctx,
2851  NULL, context_count, sizeof(h->slice_ctx[0]));
2852 
2853  /* pull back stuff from slices to master context */
2854  sl = &h->slice_ctx[context_count - 1];
2855  h->mb_y = sl->mb_y;
2856  if (CONFIG_ERROR_RESILIENCE) {
2857  for (i = 1; i < context_count; i++)
2859  }
2860 
2861  if (h->postpone_filter) {
2862  h->postpone_filter = 0;
2863 
2864  for (i = 0; i < context_count; i++) {
2865  int y_end, x_end;
2866 
2867  sl = &h->slice_ctx[i];
2868  y_end = FFMIN(sl->mb_y + 1, h->mb_height);
2869  x_end = (sl->mb_y >= h->mb_height) ? h->mb_width : sl->mb_x;
2870 
2871  for (j = sl->resync_mb_y; j < y_end; j += 1 + FIELD_OR_MBAFF_PICTURE(h)) {
2872  sl->mb_y = j;
2873  loop_filter(h, sl, j > sl->resync_mb_y ? 0 : sl->resync_mb_x,
2874  j == y_end - 1 ? x_end : h->mb_width);
2875  }
2876  }
2877  }
2878  }
2879 
2880 finish:
2881  h->nb_slice_ctx_queued = 0;
2882  return ret;
2883 }
int chroma_format_idc
Definition: h264_ps.h:48
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:167
int video_signal_type_present_flag
Definition: h264_ps.h:74
struct H264Context * h264
Definition: h264dec.h:184
#define AV_EF_AGGRESSIVE
consider things that a sane encoder should not do as an error
Definition: avcodec.h:1671
#define ff_tlog(ctx,...)
Definition: internal.h:86
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
#define NULL
Definition: coverity.c:32
int ff_thread_can_start_frame(AVCodecContext *avctx)
const struct AVCodec * codec
Definition: avcodec.h:535
AVRational framerate
Definition: avcodec.h:2069
discard all frames except keyframes
Definition: avcodec.h:235
static void init_dimensions(H264Context *h)
Definition: h264_slice.c:890
int nb_mmco
Definition: h264dec.h:480
int workaround_bugs
Definition: h264dec.h:373
int long_ref
1->long term reference 0->short term reference
Definition: h264dec.h:154
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
int sei_recovery_frame_cnt
Definition: h264dec.h:163
int ff_h264_queue_decode_slice(H264Context *h, const H2645NAL *nal)
Submit a slice for decoding.
Definition: h264_slice.c:2093
H264POCContext poc
Definition: h264dec.h:466
int mb_num
Definition: h264dec.h:443
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
This structure describes decoded (raw) audio or video data.
Definition: frame.h:308
int mb_aff_frame
Definition: h264dec.h:412
int recovery_frame_cnt
recovery_frame_cnt
Definition: h264_sei.h:141
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
Definition: h264dec.h:305
enum AVStereo3DView view
Determines which views are packed.
Definition: stereo3d.h:190
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:409
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:241
int edge_emu_buffer_allocated
Definition: h264dec.h:293
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:714
static void decode_finish_row(const H264Context *h, H264SliceContext *sl)
Draw edges and report progress for the last MB row.
Definition: h264_slice.c:2545
int first_field
Definition: h264dec.h:414
uint8_t field_scan8x8_q0[64]
Definition: h264dec.h:437
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
misc image utilities
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
#define ER_MB_END
AVFrame * f
Definition: thread.h:35
int weighted_bipred_idc
Definition: h264_ps.h:119
int ff_h264_build_ref_list(H264Context *h, H264SliceContext *sl)
Definition: h264_refs.c:299
int left_mb_xy[LEFT_MBS]
Definition: h264dec.h:217
int chroma_qp_index_offset[2]
Definition: h264_ps.h:122
AVBufferRef * sps_list[MAX_SPS_COUNT]
Definition: h264_ps.h:144
const uint8_t * bytestream_end
Definition: cabac.h:49
static av_always_inline int get_chroma_qp(const PPS *pps, int t, int qscale)
Get the chroma qp.
Definition: h264dec.h:687
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:499
hardware decoding through Videotoolbox
Definition: pixfmt.h:282
H264ChromaContext h264chroma
Definition: h264dec.h:348
uint16_t * cbp_table
Definition: h264dec.h:419
int luma_weight_flag[2]
7.4.3.2 luma_weight_lX_flag
Definition: h264_parse.h:35
MMCO mmco[MAX_MMCO_COUNT]
memory management control operations buffer.
Definition: h264dec.h:479
static void implicit_weight_table(const H264Context *h, H264SliceContext *sl, int field)
Initialize implicit_weight table.
Definition: h264_slice.c:679
#define avpriv_request_sample(...)
Sequence parameter set.
Definition: h264_ps.h:44
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1161
int mb_y
Definition: h264dec.h:440
int coded_picture_number
Definition: h264dec.h:369
int bitstream_restriction_flag
Definition: h264_ps.h:87
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
H264SEIAlternativeTransfer alternative_transfer
Definition: h264_sei.h:194
int num
Numerator.
Definition: rational.h:59
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:168
AVBufferRef * mb_type_buf
Definition: h264dec.h:138
int repeat_pict
When decoding, this signals how much the picture must be delayed.
Definition: frame.h:450
int bipred_scratchpad_allocated
Definition: h264dec.h:292
Views are next to each other, but when upscaling apply a checkerboard pattern.
Definition: stereo3d.h:117
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output...
Definition: diracdec.c:67
Frame contains only the right view.
Definition: stereo3d.h:161
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:415
#define VP_START
< current MB is the first after a resync marker
AVBufferPool * mb_type_pool
Definition: h264dec.h:556
int ff_h264_init_poc(int pic_field_poc[2], int *pic_poc, const SPS *sps, H264POCContext *pc, int picture_structure, int nal_ref_idc)
Definition: h264_parse.c:277
int chroma_x_shift
Definition: h264dec.h:366
const uint8_t * buffer
Definition: get_bits.h:62
Picture parameter set.
Definition: h264_ps.h:111
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:905
int16_t(*[2] motion_val)[2]
Definition: h264dec.h:136
int flags
Definition: h264dec.h:372
void ff_h264_flush_change(H264Context *h)
Definition: h264dec.c:439
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:36
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
int frame_mbs_only_flag
Definition: h264_ps.h:62
int mb_height
Definition: h264dec.h:441
H264Picture * delayed_pic[MAX_DELAYED_PIC_COUNT+2]
Definition: h264dec.h:471
int is_avc
Used to parse AVC variant of H.264.
Definition: h264dec.h:456
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:411
AVBufferPool * ref_index_pool
Definition: h264dec.h:558
int height_from_caller
Definition: h264dec.h:549
uint8_t zigzag_scan8x8_cavlc[64]
Definition: h264dec.h:429
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:403
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:41
ERPicture last_pic
H264SEIDisplayOrientation display_orientation
Definition: h264_sei.h:192
mpegvideo header.
int current_frame_is_frame0_flag
Definition: h264_sei.h:157
int next_slice_idx
Definition: h264dec.h:242
static const uint8_t zigzag_scan8x8_cavlc[64+1]
Definition: h264_slice.c:96
H264Context.
Definition: h264dec.h:343
discard all non intra frames
Definition: avcodec.h:234
discard all
Definition: avcodec.h:236
AVFrame * f
Definition: h264dec.h:129
const PPS * pps
Definition: h264dec.h:166
Views are next to each other.
Definition: stereo3d.h:67
size_t crop_bottom
Definition: frame.h:669
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1757
uint32_t num_units_in_tick
Definition: h264_ps.h:83
static const uint8_t field_scan[16+1]
Definition: h264_slice.c:50
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1690
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them.reget_buffer() and buffer age optimizations no longer work.*The contents of buffers must not be written to after ff_thread_report_progress() has been called on them.This includes draw_edges().Porting codecs to frame threading
H264Picture * long_ref[32]
Definition: h264dec.h:470
int profile
profile
Definition: avcodec.h:1859
int picture_structure
Definition: h264dec.h:413
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)
Definition: pixfmt.h:513
#define AV_WN32A(p, v)
Definition: intreadwrite.h:538
#define AV_COPY32(d, s)
Definition: intreadwrite.h:601
void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip)
Flip the input matrix horizontally and/or vertically.
Definition: display.c:65
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264dec.h:273
#define IN_RANGE(a, b, size)
Definition: h264_slice.c:273
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
Definition: h264_slice.c:275
MMCO mmco[MAX_MMCO_COUNT]
Definition: h264dec.h:328
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure counterclockwise rotation by the specified angle...
Definition: display.c:50
Frame contains only the left view.
Definition: stereo3d.h:156
int ff_h264_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: h264_slice.c:296
Switching Intra.
Definition: avutil.h:278
int setup_finished
Definition: h264dec.h:540
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:2004
int ff_h264_execute_decode_slices(H264Context *h)
Call decode_slice() for each context.
Definition: h264_slice.c:2802
H264SEIContext sei
Definition: h264dec.h:553
AVBufferRef * buf_ref
Definition: h264_sei.h:124
int ff_h264_sei_process_picture_timing(H264SEIPictureTiming *h, const SPS *sps, void *logctx)
Parse the contents of a picture timing message given an active SPS.
Definition: h264_sei.c:62
unsigned int crop_top
frame_cropping_rect_top_offset
Definition: h264_ps.h:70
#define USES_LIST(a, list)
Definition: mpegutils.h:99
void ff_color_frame(AVFrame *frame, const int color[4])
Definition: utils.c:414
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
const uint8_t * bytestream
Definition: cabac.h:48
int ref2frm[MAX_SLICES][2][64]
reference to frame number lists, used in the loop filter, the first 2 are for -2,-1 ...
Definition: h264dec.h:559
int deblocking_filter_parameters_present
deblocking_filter_parameters_present_flag
Definition: h264_ps.h:123
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:215
const PPS * pps
Definition: h264_ps.h:149
4: bottom field, top field, in that order
Definition: h264_sei.h:51
static enum AVPixelFormat non_j_pixfmt(enum AVPixelFormat a)
Definition: h264_slice.c:1014
uint8_t
int full_range
Definition: h264_ps.h:75
unsigned int crop_left
frame_cropping_rect_left_offset
Definition: h264_ps.h:68
int gaps_in_frame_num_allowed_flag
Definition: h264_ps.h:58
#define MB_MBAFF(h)
Definition: h264dec.h:71
int slice_alpha_c0_offset
Definition: h264dec.h:200
Stereo 3D type: this structure describes how two videos are packed within a single video surface...
Definition: stereo3d.h:176
int poc
Definition: h264dec.h:177
void ff_h264_set_erpic(ERPicture *dst, H264Picture *src)
Definition: h264_picture.c:136
int field_picture
whether or not picture was encoded in separate fields
Definition: h264dec.h:158
int bit_depth_chroma
bit_depth_chroma_minus8 + 8
Definition: h264_ps.h:101
void ff_h264_hl_decode_mb(const H264Context *h, H264SliceContext *sl)
Definition: h264_mb.c:799
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1612
size_t crop_left
Definition: frame.h:670
enum AVColorPrimaries color_primaries
Definition: h264_ps.h:77
int poc
frame POC
Definition: h264dec.h:148
int frame_num_offset
for POC type 2
Definition: h264_parse.h:51
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
int chroma_weight_flag[2]
7.4.3.2 chroma_weight_lX_flag
Definition: h264_parse.h:36
Multithreading support functions.
#define ER_MB_ERROR
int cabac
entropy_coding_mode_flag
Definition: h264_ps.h:113
#define MB_FIELD(sl)
Definition: h264dec.h:72
const char * from
Definition: jacosubdec.c:65
unsigned int crop_right
frame_cropping_rect_right_offset
Definition: h264_ps.h:69
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:414
uint8_t(*[2] top_borders)[(16 *3)*2]
Definition: h264dec.h:291
int invalid_gap
Definition: h264dec.h:162
ERPicture cur_pic
int frame_recovered
Initial frame has been completely recovered.
Definition: h264dec.h:530
Structure to hold side data for an AVFrame.
Definition: frame.h:214
int height
Definition: h264dec.h:365
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:38
#define height
#define MAX_PPS_COUNT
Definition: h264_ps.h:38
int pt
Definition: rtp.c:35
int transform_bypass
qpprime_y_zero_transform_bypass_flag
Definition: h264_ps.h:49
static void finish(void)
Definition: movenc.c:345
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
void ff_h264_filter_mb(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:88
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
#define ER_MV_END
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:121
int redundant_pic_cnt_present
redundant_pic_cnt_present_flag
Definition: h264_ps.h:125
int picture_structure
Definition: h264dec.h:246
int chroma_y_shift
Definition: h264dec.h:366
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:210
AVDictionary * metadata
metadata.
Definition: frame.h:594
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:455
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:55
AVBufferRef * qscale_table_buf
Definition: h264dec.h:132
static int h264_export_frame_props(H264Context *h)
Definition: h264_slice.c:1144
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:404
H264Picture * parent
Definition: h264dec.h:180
#define FRAME_RECOVERED_SEI
Sufficient number of frames have been decoded since a SEI recovery point, so all the following frames...
Definition: h264dec.h:528
H264SEIAFD afd
Definition: h264_sei.h:186
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:308
int recovered
picture at IDR or recovery point + recovery count
Definition: h264dec.h:161
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:89
#define AV_COPY64(d, s)
Definition: intreadwrite.h:605
int ff_h264_decode_ref_pic_list_reordering(H264SliceContext *sl, void *logctx)
Definition: h264_refs.c:423
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1168
#define FFALIGN(x, a)
Definition: macros.h:48
int chroma_qp[2]
Definition: h264dec.h:194
#define av_log(a,...)
int last_pocs[MAX_DELAYED_PIC_COUNT]
Definition: h264dec.h:472
const char * to
Definition: webvttdec.c:34
void ff_h264_direct_ref_list_init(const H264Context *const h, H264SliceContext *sl)
Definition: h264_direct.c:121
int width
Definition: h264dec.h:365
static int h264_frame_start(H264Context *h)
Definition: h264_slice.c:478
H.264 common definitions.
void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height)
Definition: h264dec.c:103
#define U(x)
Definition: vp56_arith.h:37
#define src
Definition: vp8dsp.c:254
int timecode_cnt
Number of timecode in use.
Definition: h264_sei.h:115
#define HWACCEL_MAX
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
H.264 parameter set handling.
H264Picture DPB[H264_MAX_PICTURE_COUNT]
Definition: h264dec.h:351
enum AVColorTransferCharacteristic color_trc
Definition: h264_ps.h:78
int mb_aff
mb_adaptive_frame_field_flag
Definition: h264_ps.h:63
H264PredContext hpc
Definition: h264dec.h:392
int chroma_log2_weight_denom
Definition: h264_parse.h:34
int width
Definition: frame.h:366
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:816
#define td
Definition: regdef.h:70
int flags
Additional information about the frame packing.
Definition: stereo3d.h:185
static int get_ue_golomb(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to 8190.
Definition: golomb.h:55
static int alloc_scratch_buffers(H264SliceContext *sl, int linesize)
Definition: h264_slice.c:128
int poc_type
pic_order_cnt_type
Definition: h264_ps.h:51
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
int context_initialized
Definition: h264dec.h:371
#define PTRDIFF_SPECIFIER
Definition: internal.h:228
ERContext er
Definition: h264dec.h:186
int nal_unit_type
Definition: h264dec.h:449
int ff_h264_decode_ref_pic_marking(H264SliceContext *sl, GetBitContext *gb, const H2645NAL *nal, void *logctx)
Definition: h264_refs.c:834
int ff_h264_get_profile(const SPS *sps)
Compute profile from profile_idc and constraint_set?_flags.
Definition: h264_parse.c:529
int num_reorder_frames
Definition: h264_ps.h:88
discard all bidirectional frames
Definition: avcodec.h:233
H264_SEI_FpaType arrangement_type
Definition: h264_sei.h:153
void * hwaccel_picture_private
hardware accelerator private data
Definition: h264dec.h:142
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2601
Display matrix.
Views are packed per line, as if interlaced.
Definition: stereo3d.h:129
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1804
static const uint8_t field_scan8x8[64+1]
Definition: h264_slice.c:57
const uint8_t ff_zigzag_scan[16+1]
Definition: mathtables.c:109
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
#define FIELD_PICTURE(h)
Definition: h264dec.h:74
int picture_idr
Definition: h264dec.h:384
const char * arg
Definition: jacosubdec.c:66
int deblocking_filter
disable_deblocking_filter_idc with 1 <-> 0
Definition: h264dec.h:199
uint8_t(*[2] mvd_table)[2]
Definition: h264dec.h:423
int prev_interlaced_frame
Complement sei_pic_struct SEI_PIC_STRUCT_TOP_BOTTOM and SEI_PIC_STRUCT_BOTTOM_TOP indicate interlaced...
Definition: h264dec.h:504
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:606
static int fill_filter_caches(const H264Context *h, H264SliceContext *sl, int mb_type)
Definition: h264_slice.c:2320
ThreadFrame tf
Definition: h264dec.h:130
simple assert() macros that are a bit more flexible than ISO C assert().
int weighted_pred
weighted_pred_flag
Definition: h264_ps.h:118
#define PICT_TOP_FIELD
Definition: mpegutils.h:37
H264QpelContext h264qpel
Definition: h264dec.h:349
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:402
int direct_spatial_mv_pred
Definition: h264dec.h:257
H264SEIUnregistered unregistered
Definition: h264_sei.h:188
int frame_num
frame_num (raw frame_num from slice header)
Definition: h264dec.h:149
const uint8_t ff_h264_golomb_to_pict_type[5]
Definition: h264data.c:37
#define MAX_SLICES
Definition: dxva2_hevc.c:29
int valid_recovery_point
Are the SEI recovery points looking valid.
Definition: h264dec.h:509
GLsizei count
Definition: opengl_enc.c:108
int ff_h264_get_slice_type(const H264SliceContext *sl)
Reconstruct bitstream slice_type.
Definition: h264_slice.c:2218
#define FFMAX(a, b)
Definition: common.h:94
#define fail()
Definition: checkasm.h:123
uint8_t active_format_description
Definition: h264_sei.h:120
int delta_pic_order_always_zero_flag
Definition: h264_ps.h:53
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
Definition: imgutils.c:422
int * mb_index2xy
int slice_type_nos
S free slice type (SI/SP are remapped to I/P)
Definition: h264dec.h:190
uint8_t zigzag_scan8x8[64]
Definition: h264dec.h:428
AVBufferRef * hwaccel_priv_buf
Definition: h264dec.h:141
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
int av_reallocp_array(void *ptr, size_t nmemb, size_t size)
Allocate, reallocate, or free an array through a pointer to a pointer.
Definition: mem.c:206
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:33
int crop_bottom
Definition: h264dec.h:389
uint8_t * error_status_table
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames.The frames must then be freed with ff_thread_release_buffer().Otherwise decode directly into the user-supplied frames.Call ff_thread_report_progress() after some part of the current picture has decoded.A good place to put this is where draw_horiz_band() is called-add this if it isn't called anywhere
size_t crop_top
Definition: frame.h:668
Views are alternated temporally.
Definition: stereo3d.h:92
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:502
int ff_h264_parse_ref_count(int *plist_count, int ref_count[2], GetBitContext *gb, const PPS *pps, int slice_type_nos, int picture_structure, void *logctx)
Definition: h264_parse.c:219
int nal_length_size
Number of bytes used for nal length (1, 2 or 4)
Definition: h264dec.h:457
useful rectangle filling function
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:397
int refs
number of reference frames
Definition: avcodec.h:1114
int prev_poc_msb
poc_msb of the last reference pic for POC type 0
Definition: h264_parse.h:49
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:261
AVBufferRef * motion_val_buf[2]
Definition: h264dec.h:135
int ref_frame_count
num_ref_frames
Definition: h264_ps.h:57
enum AVPixelFormat * pix_fmts
array of supported pixel formats, or NULL if unknown, array is terminated by -1
Definition: codec.h:211
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:391
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:545
H264_SEI_PicStructType pic_struct
Definition: h264_sei.h:88
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1655
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
int x264_build
Definition: h264dec.h:374
int ct_type
Bit set of clock types for fields/frames in picture timing SEI message.
Definition: h264_sei.h:95
void av_fast_mallocz(void *ptr, unsigned int *size, size_t min_size)
Allocate and clear a buffer, reusing the given one if large enough.
Definition: mem.c:507
#define FFMIN(a, b)
Definition: common.h:96
uint16_t * slice_table
slice_table_base + 2*mb_stride + 1
Definition: h264dec.h:409
static void copy_picture_range(H264Picture **to, H264Picture **from, int count, H264Context *new_base, H264Context *old_base)
Definition: h264_slice.c:280
static int h264_field_start(H264Context *h, const H264SliceContext *sl, const H2645NAL *nal, int first_slice)
Definition: h264_slice.c:1460
uint8_t field_scan8x8_cavlc[64]
Definition: h264dec.h:432
#define IS_DIRECT(a)
Definition: mpegutils.h:84
CABACContext cabac
Cabac.
Definition: h264dec.h:324
int colour_description_present_flag
Definition: h264_ps.h:76
unsigned int first_mb_addr
Definition: h264dec.h:240
int reference
Definition: h264dec.h:160
static void er_add_slice(H264SliceContext *sl, int startx, int starty, int endx, int endy, int status)
Definition: h264_slice.c:2576
#define LEFT_MBS
Definition: h264dec.h:75
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
AVRational sar
Definition: h264_ps.h:73
#define width
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
Definition: frame.c:707
int width
picture width / height.
Definition: avcodec.h:699
int redundant_pic_count
Definition: h264dec.h:250
int nb_slice_ctx
Definition: h264dec.h:357
uint8_t w
Definition: llviddspenc.c:38
H264PredWeightTable pwt
Definition: h264dec.h:203
int long_ref_count
number of actual long term references
Definition: h264dec.h:484
#define ER_DC_END
uint32_t * mb_type
Definition: h264dec.h:139
#define AV_FRAME_FLAG_CORRUPT
The frame data may be corrupted, e.g.
Definition: frame.h:533
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
static int h264_init_ps(H264Context *h, const H264SliceContext *sl, int first_slice)
Definition: h264_slice.c:1025
int size_in_bits
Definition: get_bits.h:68
int32_t
int ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size)
Definition: cabac.c:176
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:1140
char * av_timecode_make_smpte_tc_string2(char *buf, AVRational rate, uint32_t tcsmpte, int prevent_df, int skip_field)
Get the timecode string from the SMPTE timecode format.
Definition: timecode.c:139
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1797
Context Adaptive Binary Arithmetic Coder inline functions.
int level
level
Definition: avcodec.h:1982
int init_qp
pic_init_qp_minus26 + 26
Definition: h264_ps.h:120
H.264 / AVC / MPEG-4 part10 codec.
enum AVChromaLocation chroma_location
Definition: h264_ps.h:80
int mmco_reset
Definition: h264dec.h:481
H264SliceContext * slice_ctx
Definition: h264dec.h:356
int direct_8x8_inference_flag
Definition: h264_ps.h:64
static int h264_select_output_frame(H264Context *h)
Definition: h264_slice.c:1357
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:1666
int reference
Definition: h264dec.h:176
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:658
int top_borders_allocated[2]
Definition: h264dec.h:294
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:831
#define FIELD_OR_MBAFF_PICTURE(h)
Definition: h264dec.h:91
int ref_idc
H.264 only, nal_ref_idc.
Definition: h2645_parse.h:70
static void init_scan_tables(H264Context *h)
initialize scan tables
Definition: h264_slice.c:743
static int av_unused get_cabac_terminate(CABACContext *c)
int quincunx_sampling_flag
Definition: h264_sei.h:156
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:398
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:417
HW acceleration through CUDA.
Definition: pixfmt.h:235
int type
NAL unit type.
Definition: h2645_parse.h:52
#define FF_ARRAY_ELEMS(a)
Full range content.
Definition: pixfmt.h:586
static int init_table_pools(H264Context *h)
Definition: h264_slice.c:160
uint8_t * edge_emu_buffer
Definition: h264dec.h:290
if(ret)
static unsigned get_ue_golomb_long(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to UINT32_MAX-1.
Definition: golomb.h:106
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
int pic_order_present
pic_order_present_flag
Definition: h264_ps.h:114
uint8_t zigzag_scan_q0[16]
Definition: h264dec.h:433
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:381
int bit_depth_luma
luma bit depth from sps to detect changes
Definition: h264dec.h:459
int chroma_format_idc
chroma format from sps to detect changes
Definition: h264dec.h:460
VideoDSPContext vdsp
Definition: h264dec.h:346
int timing_info_present_flag
Definition: h264_ps.h:82
int coded_picture_number
picture number in bitstream order
Definition: frame.h:422
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:180
int mb_stride
Definition: h264dec.h:442
Views are packed in a checkerboard-like structure per pixel.
Definition: stereo3d.h:104
int postpone_filter
Definition: h264dec.h:379
#define IS_INTERLACED(a)
Definition: mpegutils.h:83
AVCodecContext * avctx
Definition: h264dec.h:345
uint8_t zigzag_scan8x8_q0[64]
Definition: h264dec.h:434
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:407
5: top field, bottom field, top field repeated, in that order
Definition: h264_sei.h:52
Libavcodec external API header.
#define MAX_DELAYED_PIC_COUNT
Definition: h264dec.h:56
Views are on top of each other.
Definition: stereo3d.h:79
int last_qscale_diff
Definition: h264dec.h:196
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
AVBufferRef * pps_list[MAX_PPS_COUNT]
Definition: h264_ps.h:145
enum AVCodecID codec_id
Definition: avcodec.h:536
static int get_ue_golomb_31(GetBitContext *gb)
read unsigned exp golomb code, constraint to a max of 31.
Definition: golomb.h:122
int crop_left
Definition: h264dec.h:386
int delta_poc_bottom
Definition: h264_parse.h:46
ERPicture next_pic
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:339
H264Picture * short_ref[32]
Definition: h264dec.h:469
int next_outputed_poc
Definition: h264dec.h:474
int ff_h264_decode_mb_cabac(const H264Context *h, H264SliceContext *sl)
Decode a macroblock.
Definition: h264_cabac.c:1911
int explicit_ref_marking
Definition: h264dec.h:482
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:348
int field_poc[2]
top/bottom POC
Definition: h264dec.h:147
int debug
debug
Definition: avcodec.h:1611
int recovery_frame
recovery_frame is the frame_num at which the next frame should be fully constructed.
Definition: h264dec.h:517
main external API structure.
Definition: avcodec.h:526
User data unregistered metadata associated with a video frame.
Definition: frame.h:194
int qp_thresh
QP threshold to skip loopfilter.
Definition: h264dec.h:195
int explicit_ref_marking
Definition: h264dec.h:330
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
uint8_t * data
The data buffer.
Definition: buffer.h:89
H264SEITimeCode timecode[3]
Maximum three timecodes in a pic_timing SEI.
Definition: h264_sei.h:110
#define fp
Definition: regdef.h:44
uint8_t * data
Definition: frame.h:216
int mb_height
Definition: h264dec.h:168
static int h264_slice_header_parse(const H264Context *h, H264SliceContext *sl, const H2645NAL *nal)
Definition: h264_slice.c:1746
H264SEIA53Caption a53_caption
Definition: h264_sei.h:187
int implicit_weight[48][48][2]
Definition: h264_parse.h:40
size_t crop_right
Definition: frame.h:671
int8_t * qscale_table
Definition: h264dec.h:133
static const uint8_t scan8[16 *3+3]
Definition: h264dec.h:650
#define CABAC(h)
Definition: h264_cabac.c:28
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:83
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
AVBuffer * buffer
Definition: buffer.h:82
static const uint8_t field_scan8x8_cavlc[64+1]
Definition: h264_slice.c:76
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:399
AVCodecContext * owner[2]
Definition: thread.h:36
int coded_height
Definition: avcodec.h:714
Switching Predicted.
Definition: avutil.h:279
int prev_frame_num
frame_num of the last pic for POC type 1/2
Definition: h264_parse.h:53
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
Definition: avcodec.h:2193
static int FUNC() pps(CodedBitstreamContext *ctx, RWContext *rw, H264RawPPS *current)
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:739
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
Definition: h264dec.h:300
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:277
#define FRAME_MBAFF(h)
Definition: h264dec.h:73
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1154
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:1147
#define LBOT
Definition: h264dec.h:77
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:197
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
Definition: h264dec.h:666
int8_t * ref_index[2]
Definition: h264dec.h:145
int ff_h264_ref_picture(H264Context *h, H264Picture *dst, H264Picture *src)
Definition: h264_picture.c:66
A reference counted buffer type.
int pixel_shift
0 for 8-bit H.264, 1 for high-bit-depth H.264
Definition: h264dec.h:362
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
int mmco_reset
MMCO_RESET set this 1.
Definition: h264dec.h:150
int content_interpretation_type
Definition: h264_sei.h:155
H264Picture * cur_pic_ptr
Definition: h264dec.h:352
#define LIST_NOT_USED
Definition: h264dec.h:396
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ptrdiff_t mb_uvlinesize
Definition: h264dec.h:234
static int h264_slice_header_init(H264Context *h)
Definition: h264_slice.c:930
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
int mb_mbaff
mb_aff_frame && mb_field_decoding_flag
Definition: h264dec.h:248
enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Wrapper around get_format() for frame-multithreaded codecs.
int enable_er
Definition: h264dec.h:551
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:396
#define IS_INTER(a)
Definition: mpegutils.h:79
#define FF_COMPLIANCE_STRICT
Strictly conform to all the things in the spec no matter what consequences.
Definition: avcodec.h:1591
const SPS * sps
Definition: h264_ps.h:150
unsigned int sps_id
Definition: h264_ps.h:112
#define TRANSPOSE(x)
H264SEIPictureTiming picture_timing
Definition: h264_sei.h:185
int width_from_caller
Definition: h264dec.h:548
int log2_max_poc_lsb
log2_max_pic_order_cnt_lsb_minus4
Definition: h264_ps.h:52
H264SEIRecoveryPoint recovery_point
Definition: h264_sei.h:189
ptrdiff_t mb_linesize
may be equal to s->linesize or s->linesize * 2, for mbaff
Definition: h264dec.h:233
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer. ...
Definition: pixfmt.h:137
int16_t slice_row[MAX_SLICES]
to detect when MAX_SLICES is too low
Definition: h264dec.h:544
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:300
3: top field, bottom field, in that order
Definition: h264_sei.h:50
static int alloc_picture(H264Context *h, H264Picture *pic)
Definition: h264_slice.c:187
ptrdiff_t linesize
Definition: h264dec.h:232
int block_offset[2 *(16 *3)]
block_offset[ 0..23] for frame macroblocks block_offset[24..47] for field macroblocks ...
Definition: h264dec.h:403
uint32_t time_scale
Definition: h264_ps.h:84
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:408
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:416
int transform_8x8_mode
transform_8x8_mode_flag
Definition: h264_ps.h:126
ptrdiff_t uvlinesize
Definition: h264dec.h:232
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:400
static int h264_slice_init(H264Context *h, H264SliceContext *sl, const H2645NAL *nal)
Definition: h264_slice.c:1957
int pic_struct_present_flag
Definition: h264_ps.h:94
#define CHROMA444(h)
Definition: h264dec.h:99
unsigned int list_count
Definition: h264dec.h:274
uint8_t zigzag_scan[16]
Definition: h264dec.h:427
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:406
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:322
AVBufferRef * pps_buf
Definition: h264dec.h:165
int prev_poc_lsb
poc_lsb of the last reference pic for POC type 0
Definition: h264_parse.h:50
static void release_unused_pictures(H264Context *h, int remove_current)
Definition: h264_slice.c:115
int ff_h264_alloc_tables(H264Context *h)
Allocate tables.
Definition: h264dec.c:181
#define AV_ZERO128(d)
Definition: intreadwrite.h:637
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:313
Narrow or limited range content.
Definition: pixfmt.h:569
int mb_stride
Definition: h264dec.h:169
int left_type[LEFT_MBS]
Definition: h264dec.h:222
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
int nb_slice_ctx_queued
Definition: h264dec.h:358
discard all non reference
Definition: avcodec.h:232
int ff_h264_field_end(H264Context *h, H264SliceContext *sl, int in_setup)
Definition: h264_picture.c:159
AVBufferPool * qscale_table_pool
Definition: h264dec.h:555
H264Picture * next_output_pic
Definition: h264dec.h:473
int mb_height
Definition: h264_ps.h:61
AVBufferPool * motion_val_pool
Definition: h264dec.h:557
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
int delta_poc_bottom
Definition: h264dec.h:334
#define IS_8x8DCT(a)
Definition: h264dec.h:104
common internal api header.
AVBufferPool * av_buffer_pool_init(int size, AVBufferRef *(*alloc)(int size))
Allocate and initialize a buffer pool.
Definition: buffer.c:240
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
#define AV_COPY128(d, s)
Definition: intreadwrite.h:609
static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
Definition: h264_slice.c:777
AVBufferRef * pps_ref
Definition: h264_ps.h:147
int log2_max_frame_num
log2_max_frame_num_minus4 + 4
Definition: h264_ps.h:50
int missing_fields
Definition: h264dec.h:534
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
Definition: pixdesc.c:2942
H264ParamSets ps
Definition: h264dec.h:462
H264SEIFramePacking frame_packing
Definition: h264_sei.h:191
H.264 / AVC / MPEG-4 part10 motion vector prediction.
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
Bi-dir predicted.
Definition: avutil.h:276
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
Stereoscopic video.
Views are packed per column.
Definition: stereo3d.h:141
int cur_chroma_format_idc
Definition: h264dec.h:542
int8_t * intra4x4_pred_mode
Definition: h264dec.h:212
unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:2191
enum AVDiscard skip_loop_filter
Skip loop filtering for selected frames.
Definition: avcodec.h:1990
int den
Denominator.
Definition: rational.h:60
static void predict_field_decoding_flag(const H264Context *h, H264SliceContext *sl)
Definition: h264_slice.c:2532
int ff_h264_decode_mb_cavlc(const H264Context *h, H264SliceContext *sl)
Decode a macroblock.
Definition: h264_cavlc.c:702
GetBitContext gb
Definition: h2645_parse.h:47
int bit_depth_luma
bit_depth_luma_minus8 + 8
Definition: h264_ps.h:100
AVBufferRef ** buf_ref
Definition: h264_sei.h:129
#define IS_INTRA(x, y)
int present
Definition: h264_sei.h:119
int delta_poc[2]
Definition: h264_parse.h:47
uint32_t av_timecode_get_smpte(AVRational rate, int drop, int hh, int mm, int ss, int ff)
Convert sei info to SMPTE 12M binary representation.
Definition: timecode.c:71
void ff_h264_free_tables(H264Context *h)
Definition: h264dec.c:138
void * priv_data
Definition: avcodec.h:553
#define LTOP
Definition: h264dec.h:76
#define PICT_FRAME
Definition: mpegutils.h:39
static av_always_inline void backup_mb_border(const H264Context *h, H264SliceContext *sl, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int simple)
Definition: h264_slice.c:577
uint8_t zigzag_scan8x8_cavlc_q0[64]
Definition: h264dec.h:435
int8_t ref_cache[2][5 *8]
Definition: h264dec.h:306
#define AV_CODEC_FLAG_OUTPUT_CORRUPT
Output even those frames that might be corrupted.
Definition: avcodec.h:283
unsigned int pps_id
Definition: h264dec.h:284
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:460
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
Definition: avcodec.h:2520
#define CHROMA422(h)
Definition: h264dec.h:98
#define FF_BUG_TRUNCATED
Definition: avcodec.h:1574
H264Picture cur_pic
Definition: h264dec.h:353
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:386
#define AV_ZERO32(d)
Definition: intreadwrite.h:629
int mb_width
Definition: h264dec.h:441
static int find_unused_picture(H264Context *h)
Definition: h264_slice.c:261
int current_slice
current slice number, used to initialize slice_num of each thread/context
Definition: h264dec.h:494
int ff_h264_execute_ref_pic_marking(H264Context *h)
Execute the reference picture marking (memory management control operations).
Definition: h264_refs.c:610
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:693
int ff_h264_pred_weight_table(GetBitContext *gb, const SPS *sps, const int *ref_count, int slice_type_nos, H264PredWeightTable *pwt, int picture_structure, void *logctx)
Definition: h264_parse.c:27
int mb_width
pic_width_in_mbs_minus1 + 1
Definition: h264_ps.h:59
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:613
uint32_t * mb2b_xy
Definition: h264dec.h:405
H264Ref ref_list[2][48]
0..15: frame refs, 16..47: mbaff field refs.
Definition: h264dec.h:275
uint8_t field_scan8x8_cavlc_q0[64]
Definition: h264dec.h:438
int cur_bit_depth_luma
Definition: h264dec.h:543
int crop_top
Definition: h264dec.h:388
atomic_int error_count
AVBufferRef * ref_index_buf[2]
Definition: h264dec.h:144
av_cold void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
Definition: h264dsp.c:67
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:1217
H264DSPContext h264dsp
Definition: h264dec.h:347
void ff_er_frame_start(ERContext *s)
int height
Definition: frame.h:366
#define AV_CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
Definition: avcodec.h:376
FILE * out
Definition: movenc.c:54
uint8_t(*[2] mvd_table)[2]
Definition: h264dec.h:319
uint8_t field_scan8x8[64]
Definition: h264dec.h:431
int slice_type_fixed
Definition: h264dec.h:191
static av_always_inline void fill_filter_caches_inter(const H264Context *h, H264SliceContext *sl, int mb_type, int top_xy, int left_xy[LEFT_MBS], int top_type, int left_type[LEFT_MBS], int mb_xy, int list)
Definition: h264_slice.c:2236
int mb_width
Definition: h264dec.h:168
#define av_freep(p)
const SPS * sps
Definition: h264_ps.h:140
int prev_frame_num_offset
for POC type 2
Definition: h264_parse.h:52
#define av_always_inline
Definition: attributes.h:45
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:229
int slice_beta_offset
Definition: h264dec.h:201
int8_t * intra4x4_pred_mode
Definition: h264dec.h:391
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:338
#define ER_AC_END
static int decode_slice(struct AVCodecContext *avctx, void *arg)
Definition: h264_slice.c:2590
int delta_poc[2]
Definition: h264dec.h:335
void ff_h264_direct_dist_scale_factor(const H264Context *const h, H264SliceContext *sl)
Definition: h264_direct.c:62
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2489
int ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl)
Init context Allocate buffers which are not shared amongst multiple threads.
Definition: h264dec.c:222
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:216
uint8_t field_scan_q0[16]
Definition: h264dec.h:436
int mb_field_decoding_flag
Definition: h264dec.h:247
uint8_t(* non_zero_count)[48]
Definition: h264dec.h:394
static void loop_filter(const H264Context *h, H264SliceContext *sl, int start_x, int end_x)
Definition: h264_slice.c:2462
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:1825
unsigned int crop_bottom
frame_cropping_rect_bottom_offset
Definition: h264_ps.h:71
exp golomb vlc stuff
uint8_t * bipred_scratchpad
Definition: h264dec.h:289
void ff_h264_unref_picture(H264Context *h, H264Picture *pic)
Definition: h264_picture.c:44
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
av_cold void ff_h264qpel_init(H264QpelContext *c, int bit_depth)
Definition: h264qpel.c:49
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:509
int droppable
Definition: h264dec.h:368
int level_idc
Definition: h264_ps.h:47
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1589
int crop_right
Definition: h264dec.h:387
void ff_h264_filter_mb_fast(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
int nal_ref_idc
Definition: h264dec.h:448
GetBitContext gb
Definition: h264dec.h:185
uint8_t field_scan[16]
Definition: h264dec.h:430
int cabac_init_idc
Definition: h264dec.h:326
#define FRAME_RECOVERED_IDR
We have seen an IDR, so all the following frames in coded order are correctly decodable.
Definition: h264dec.h:523
for(j=16;j >0;--j)
6: bottom field, top field, bottom field repeated, in that order
Definition: h264_sei.h:53
int i
Definition: input.c:407
#define FFMAX3(a, b, c)
Definition: common.h:95
int b_stride
Definition: h264dec.h:407
Predicted.
Definition: avutil.h:275
#define tb
Definition: regdef.h:68
Context Adaptive Binary Arithmetic Coder.
#define AV_TIMECODE_STR_SIZE
Definition: timecode.h:33
#define H264_MAX_PICTURE_COUNT
Definition: h264dec.h:52
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
void ff_h264_init_cabac_states(const H264Context *h, H264SliceContext *sl)
Definition: h264_cabac.c:1262
int short_ref_count
number of actual short term references
Definition: h264dec.h:485
static uint8_t tmp[11]
Definition: aes_ctr.c:26
enum AVColorSpace colorspace
Definition: h264_ps.h:79