FFmpeg
h264_slice.c
Go to the documentation of this file.
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... decoder
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * H.264 / AVC / MPEG-4 part10 codec.
25  * @author Michael Niedermayer <michaelni@gmx.at>
26  */
27 
28 #include "libavutil/avassert.h"
29 #include "libavutil/display.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/stereo3d.h"
32 #include "libavutil/timer.h"
33 #include "internal.h"
34 #include "cabac.h"
35 #include "cabac_functions.h"
36 #include "error_resilience.h"
37 #include "avcodec.h"
38 #include "h264.h"
39 #include "h264dec.h"
40 #include "h264data.h"
41 #include "h264chroma.h"
42 #include "h264_mvpred.h"
43 #include "h264_ps.h"
44 #include "golomb.h"
45 #include "mathops.h"
46 #include "mpegutils.h"
47 #include "mpegvideo.h"
48 #include "rectangle.h"
49 #include "thread.h"
50 
51 static const uint8_t field_scan[16+1] = {
52  0 + 0 * 4, 0 + 1 * 4, 1 + 0 * 4, 0 + 2 * 4,
53  0 + 3 * 4, 1 + 1 * 4, 1 + 2 * 4, 1 + 3 * 4,
54  2 + 0 * 4, 2 + 1 * 4, 2 + 2 * 4, 2 + 3 * 4,
55  3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4, 3 + 3 * 4,
56 };
57 
58 static const uint8_t field_scan8x8[64+1] = {
59  0 + 0 * 8, 0 + 1 * 8, 0 + 2 * 8, 1 + 0 * 8,
60  1 + 1 * 8, 0 + 3 * 8, 0 + 4 * 8, 1 + 2 * 8,
61  2 + 0 * 8, 1 + 3 * 8, 0 + 5 * 8, 0 + 6 * 8,
62  0 + 7 * 8, 1 + 4 * 8, 2 + 1 * 8, 3 + 0 * 8,
63  2 + 2 * 8, 1 + 5 * 8, 1 + 6 * 8, 1 + 7 * 8,
64  2 + 3 * 8, 3 + 1 * 8, 4 + 0 * 8, 3 + 2 * 8,
65  2 + 4 * 8, 2 + 5 * 8, 2 + 6 * 8, 2 + 7 * 8,
66  3 + 3 * 8, 4 + 1 * 8, 5 + 0 * 8, 4 + 2 * 8,
67  3 + 4 * 8, 3 + 5 * 8, 3 + 6 * 8, 3 + 7 * 8,
68  4 + 3 * 8, 5 + 1 * 8, 6 + 0 * 8, 5 + 2 * 8,
69  4 + 4 * 8, 4 + 5 * 8, 4 + 6 * 8, 4 + 7 * 8,
70  5 + 3 * 8, 6 + 1 * 8, 6 + 2 * 8, 5 + 4 * 8,
71  5 + 5 * 8, 5 + 6 * 8, 5 + 7 * 8, 6 + 3 * 8,
72  7 + 0 * 8, 7 + 1 * 8, 6 + 4 * 8, 6 + 5 * 8,
73  6 + 6 * 8, 6 + 7 * 8, 7 + 2 * 8, 7 + 3 * 8,
74  7 + 4 * 8, 7 + 5 * 8, 7 + 6 * 8, 7 + 7 * 8,
75 };
76 
77 static const uint8_t field_scan8x8_cavlc[64+1] = {
78  0 + 0 * 8, 1 + 1 * 8, 2 + 0 * 8, 0 + 7 * 8,
79  2 + 2 * 8, 2 + 3 * 8, 2 + 4 * 8, 3 + 3 * 8,
80  3 + 4 * 8, 4 + 3 * 8, 4 + 4 * 8, 5 + 3 * 8,
81  5 + 5 * 8, 7 + 0 * 8, 6 + 6 * 8, 7 + 4 * 8,
82  0 + 1 * 8, 0 + 3 * 8, 1 + 3 * 8, 1 + 4 * 8,
83  1 + 5 * 8, 3 + 1 * 8, 2 + 5 * 8, 4 + 1 * 8,
84  3 + 5 * 8, 5 + 1 * 8, 4 + 5 * 8, 6 + 1 * 8,
85  5 + 6 * 8, 7 + 1 * 8, 6 + 7 * 8, 7 + 5 * 8,
86  0 + 2 * 8, 0 + 4 * 8, 0 + 5 * 8, 2 + 1 * 8,
87  1 + 6 * 8, 4 + 0 * 8, 2 + 6 * 8, 5 + 0 * 8,
88  3 + 6 * 8, 6 + 0 * 8, 4 + 6 * 8, 6 + 2 * 8,
89  5 + 7 * 8, 6 + 4 * 8, 7 + 2 * 8, 7 + 6 * 8,
90  1 + 0 * 8, 1 + 2 * 8, 0 + 6 * 8, 3 + 0 * 8,
91  1 + 7 * 8, 3 + 2 * 8, 2 + 7 * 8, 4 + 2 * 8,
92  3 + 7 * 8, 5 + 2 * 8, 4 + 7 * 8, 5 + 4 * 8,
93  6 + 3 * 8, 6 + 5 * 8, 7 + 3 * 8, 7 + 7 * 8,
94 };
95 
96 // zigzag_scan8x8_cavlc[i] = zigzag_scan8x8[(i/4) + 16*(i%4)]
97 static const uint8_t zigzag_scan8x8_cavlc[64+1] = {
98  0 + 0 * 8, 1 + 1 * 8, 1 + 2 * 8, 2 + 2 * 8,
99  4 + 1 * 8, 0 + 5 * 8, 3 + 3 * 8, 7 + 0 * 8,
100  3 + 4 * 8, 1 + 7 * 8, 5 + 3 * 8, 6 + 3 * 8,
101  2 + 7 * 8, 6 + 4 * 8, 5 + 6 * 8, 7 + 5 * 8,
102  1 + 0 * 8, 2 + 0 * 8, 0 + 3 * 8, 3 + 1 * 8,
103  3 + 2 * 8, 0 + 6 * 8, 4 + 2 * 8, 6 + 1 * 8,
104  2 + 5 * 8, 2 + 6 * 8, 6 + 2 * 8, 5 + 4 * 8,
105  3 + 7 * 8, 7 + 3 * 8, 4 + 7 * 8, 7 + 6 * 8,
106  0 + 1 * 8, 3 + 0 * 8, 0 + 4 * 8, 4 + 0 * 8,
107  2 + 3 * 8, 1 + 5 * 8, 5 + 1 * 8, 5 + 2 * 8,
108  1 + 6 * 8, 3 + 5 * 8, 7 + 1 * 8, 4 + 5 * 8,
109  4 + 6 * 8, 7 + 4 * 8, 5 + 7 * 8, 6 + 7 * 8,
110  0 + 2 * 8, 2 + 1 * 8, 1 + 3 * 8, 5 + 0 * 8,
111  1 + 4 * 8, 2 + 4 * 8, 6 + 0 * 8, 4 + 3 * 8,
112  0 + 7 * 8, 4 + 4 * 8, 7 + 2 * 8, 3 + 6 * 8,
113  5 + 5 * 8, 6 + 5 * 8, 6 + 6 * 8, 7 + 7 * 8,
114 };
115 
116 static void release_unused_pictures(H264Context *h, int remove_current)
117 {
118  int i;
119 
120  /* release non reference frames */
121  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
122  if (h->DPB[i].f->buf[0] && !h->DPB[i].reference &&
123  (remove_current || &h->DPB[i] != h->cur_pic_ptr)) {
124  ff_h264_unref_picture(h, &h->DPB[i]);
125  }
126  }
127 }
128 
129 static int alloc_scratch_buffers(H264SliceContext *sl, int linesize)
130 {
131  const H264Context *h = sl->h264;
132  int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
133 
134  av_fast_malloc(&sl->bipred_scratchpad, &sl->bipred_scratchpad_allocated, 16 * 6 * alloc_size);
135  // edge emu needs blocksize + filter length - 1
136  // (= 21x21 for H.264)
137  av_fast_malloc(&sl->edge_emu_buffer, &sl->edge_emu_buffer_allocated, alloc_size * 2 * 21);
138 
140  h->mb_width * 16 * 3 * sizeof(uint8_t) * 2);
142  h->mb_width * 16 * 3 * sizeof(uint8_t) * 2);
143 
144  if (!sl->bipred_scratchpad || !sl->edge_emu_buffer ||
145  !sl->top_borders[0] || !sl->top_borders[1]) {
148  av_freep(&sl->top_borders[0]);
149  av_freep(&sl->top_borders[1]);
150 
153  sl->top_borders_allocated[0] = 0;
154  sl->top_borders_allocated[1] = 0;
155  return AVERROR(ENOMEM);
156  }
157 
158  return 0;
159 }
160 
162 {
163  const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
164  const int mb_array_size = h->mb_stride * h->mb_height;
165  const int b4_stride = h->mb_width * 4 + 1;
166  const int b4_array_size = b4_stride * h->mb_height * 4;
167 
168  h->qscale_table_pool = av_buffer_pool_init(big_mb_num + h->mb_stride,
170  h->mb_type_pool = av_buffer_pool_init((big_mb_num + h->mb_stride) *
171  sizeof(uint32_t), av_buffer_allocz);
172  h->motion_val_pool = av_buffer_pool_init(2 * (b4_array_size + 4) *
173  sizeof(int16_t), av_buffer_allocz);
174  h->ref_index_pool = av_buffer_pool_init(4 * mb_array_size, av_buffer_allocz);
175 
176  if (!h->qscale_table_pool || !h->mb_type_pool || !h->motion_val_pool ||
177  !h->ref_index_pool) {
178  av_buffer_pool_uninit(&h->qscale_table_pool);
179  av_buffer_pool_uninit(&h->mb_type_pool);
180  av_buffer_pool_uninit(&h->motion_val_pool);
181  av_buffer_pool_uninit(&h->ref_index_pool);
182  return AVERROR(ENOMEM);
183  }
184 
185  return 0;
186 }
187 
189 {
190  int i, ret = 0;
191 
192  av_assert0(!pic->f->data[0]);
193 
194  pic->tf.f = pic->f;
195  ret = ff_thread_get_buffer(h->avctx, &pic->tf, pic->reference ?
197  if (ret < 0)
198  goto fail;
199 
200  if (h->avctx->hwaccel) {
201  const AVHWAccel *hwaccel = h->avctx->hwaccel;
203  if (hwaccel->frame_priv_data_size) {
205  if (!pic->hwaccel_priv_buf)
206  return AVERROR(ENOMEM);
208  }
209  }
210  if (CONFIG_GRAY && !h->avctx->hwaccel && h->flags & AV_CODEC_FLAG_GRAY && pic->f->data[2]) {
211  int h_chroma_shift, v_chroma_shift;
213  &h_chroma_shift, &v_chroma_shift);
214 
215  for(i=0; i<AV_CEIL_RSHIFT(pic->f->height, v_chroma_shift); i++) {
216  memset(pic->f->data[1] + pic->f->linesize[1]*i,
217  0x80, AV_CEIL_RSHIFT(pic->f->width, h_chroma_shift));
218  memset(pic->f->data[2] + pic->f->linesize[2]*i,
219  0x80, AV_CEIL_RSHIFT(pic->f->width, h_chroma_shift));
220  }
221  }
222 
223  if (!h->qscale_table_pool) {
225  if (ret < 0)
226  goto fail;
227  }
228 
229  pic->qscale_table_buf = av_buffer_pool_get(h->qscale_table_pool);
230  pic->mb_type_buf = av_buffer_pool_get(h->mb_type_pool);
231  if (!pic->qscale_table_buf || !pic->mb_type_buf)
232  goto fail;
233 
234  pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
235  pic->qscale_table = pic->qscale_table_buf->data + 2 * h->mb_stride + 1;
236 
237  for (i = 0; i < 2; i++) {
238  pic->motion_val_buf[i] = av_buffer_pool_get(h->motion_val_pool);
239  pic->ref_index_buf[i] = av_buffer_pool_get(h->ref_index_pool);
240  if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
241  goto fail;
242 
243  pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
244  pic->ref_index[i] = pic->ref_index_buf[i]->data;
245  }
246 
247  return 0;
248 fail:
249  ff_h264_unref_picture(h, pic);
250  return (ret < 0) ? ret : AVERROR(ENOMEM);
251 }
252 
254 {
255  int i;
256 
257  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
258  if (!h->DPB[i].f->buf[0])
259  return i;
260  }
261  return AVERROR_INVALIDDATA;
262 }
263 
264 
265 #define IN_RANGE(a, b, size) (((void*)(a) >= (void*)(b)) && ((void*)(a) < (void*)((b) + (size))))
266 
267 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
268  (((pic) && (pic) >= (old_ctx)->DPB && \
269  (pic) < (old_ctx)->DPB + H264_MAX_PICTURE_COUNT) ? \
270  &(new_ctx)->DPB[(pic) - (old_ctx)->DPB] : NULL)
271 
273  H264Context *new_base,
274  H264Context *old_base)
275 {
276  int i;
277 
278  for (i = 0; i < count; i++) {
279  av_assert1(!from[i] ||
280  IN_RANGE(from[i], old_base, 1) ||
281  IN_RANGE(from[i], old_base->DPB, H264_MAX_PICTURE_COUNT));
282  to[i] = REBASE_PICTURE(from[i], new_base, old_base);
283  }
284 }
285 
287 
289  const AVCodecContext *src)
290 {
291  H264Context *h = dst->priv_data, *h1 = src->priv_data;
292  int inited = h->context_initialized, err = 0;
293  int need_reinit = 0;
294  int i, ret;
295 
296  if (dst == src)
297  return 0;
298 
299  if (inited && !h1->ps.sps)
300  return AVERROR_INVALIDDATA;
301 
302  if (inited &&
303  (h->width != h1->width ||
304  h->height != h1->height ||
305  h->mb_width != h1->mb_width ||
306  h->mb_height != h1->mb_height ||
307  !h->ps.sps ||
308  h->ps.sps->bit_depth_luma != h1->ps.sps->bit_depth_luma ||
309  h->ps.sps->chroma_format_idc != h1->ps.sps->chroma_format_idc ||
310  h->ps.sps->colorspace != h1->ps.sps->colorspace)) {
311  need_reinit = 1;
312  }
313 
314  /* copy block_offset since frame_start may not be called */
315  memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset));
316 
317  // SPS/PPS
318  for (i = 0; i < FF_ARRAY_ELEMS(h->ps.sps_list); i++) {
319  av_buffer_unref(&h->ps.sps_list[i]);
320  if (h1->ps.sps_list[i]) {
321  h->ps.sps_list[i] = av_buffer_ref(h1->ps.sps_list[i]);
322  if (!h->ps.sps_list[i])
323  return AVERROR(ENOMEM);
324  }
325  }
326  for (i = 0; i < FF_ARRAY_ELEMS(h->ps.pps_list); i++) {
327  av_buffer_unref(&h->ps.pps_list[i]);
328  if (h1->ps.pps_list[i]) {
329  h->ps.pps_list[i] = av_buffer_ref(h1->ps.pps_list[i]);
330  if (!h->ps.pps_list[i])
331  return AVERROR(ENOMEM);
332  }
333  }
334 
335  av_buffer_unref(&h->ps.pps_ref);
336  av_buffer_unref(&h->ps.sps_ref);
337  h->ps.pps = NULL;
338  h->ps.sps = NULL;
339  if (h1->ps.pps_ref) {
340  h->ps.pps_ref = av_buffer_ref(h1->ps.pps_ref);
341  if (!h->ps.pps_ref)
342  return AVERROR(ENOMEM);
343  h->ps.pps = (const PPS*)h->ps.pps_ref->data;
344  }
345  if (h1->ps.sps_ref) {
346  h->ps.sps_ref = av_buffer_ref(h1->ps.sps_ref);
347  if (!h->ps.sps_ref)
348  return AVERROR(ENOMEM);
349  h->ps.sps = (const SPS*)h->ps.sps_ref->data;
350  }
351 
352  if (need_reinit || !inited) {
353  h->width = h1->width;
354  h->height = h1->height;
355  h->mb_height = h1->mb_height;
356  h->mb_width = h1->mb_width;
357  h->mb_num = h1->mb_num;
358  h->mb_stride = h1->mb_stride;
359  h->b_stride = h1->b_stride;
360  h->x264_build = h1->x264_build;
361 
362  if (h->context_initialized || h1->context_initialized) {
363  if ((err = h264_slice_header_init(h)) < 0) {
364  av_log(h->avctx, AV_LOG_ERROR, "h264_slice_header_init() failed");
365  return err;
366  }
367  }
368 
369  /* copy block_offset since frame_start may not be called */
370  memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset));
371  }
372 
373  h->avctx->coded_height = h1->avctx->coded_height;
374  h->avctx->coded_width = h1->avctx->coded_width;
375  h->avctx->width = h1->avctx->width;
376  h->avctx->height = h1->avctx->height;
377  h->width_from_caller = h1->width_from_caller;
378  h->height_from_caller = h1->height_from_caller;
379  h->coded_picture_number = h1->coded_picture_number;
380  h->first_field = h1->first_field;
381  h->picture_structure = h1->picture_structure;
382  h->mb_aff_frame = h1->mb_aff_frame;
383  h->droppable = h1->droppable;
384 
385  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
386  ff_h264_unref_picture(h, &h->DPB[i]);
387  if (h1->DPB[i].f->buf[0] &&
388  (ret = ff_h264_ref_picture(h, &h->DPB[i], &h1->DPB[i])) < 0)
389  return ret;
390  }
391 
392  h->cur_pic_ptr = REBASE_PICTURE(h1->cur_pic_ptr, h, h1);
393  ff_h264_unref_picture(h, &h->cur_pic);
394  if (h1->cur_pic.f->buf[0]) {
395  ret = ff_h264_ref_picture(h, &h->cur_pic, &h1->cur_pic);
396  if (ret < 0)
397  return ret;
398  }
399 
400  h->enable_er = h1->enable_er;
401  h->workaround_bugs = h1->workaround_bugs;
402  h->droppable = h1->droppable;
403 
404  // extradata/NAL handling
405  h->is_avc = h1->is_avc;
406  h->nal_length_size = h1->nal_length_size;
407 
408  memcpy(&h->poc, &h1->poc, sizeof(h->poc));
409 
410  memcpy(h->short_ref, h1->short_ref, sizeof(h->short_ref));
411  memcpy(h->long_ref, h1->long_ref, sizeof(h->long_ref));
412  memcpy(h->delayed_pic, h1->delayed_pic, sizeof(h->delayed_pic));
413  memcpy(h->last_pocs, h1->last_pocs, sizeof(h->last_pocs));
414 
415  h->next_output_pic = h1->next_output_pic;
416  h->next_outputed_poc = h1->next_outputed_poc;
417 
418  memcpy(h->mmco, h1->mmco, sizeof(h->mmco));
419  h->nb_mmco = h1->nb_mmco;
420  h->mmco_reset = h1->mmco_reset;
421  h->explicit_ref_marking = h1->explicit_ref_marking;
422  h->long_ref_count = h1->long_ref_count;
423  h->short_ref_count = h1->short_ref_count;
424 
425  copy_picture_range(h->short_ref, h1->short_ref, 32, h, h1);
426  copy_picture_range(h->long_ref, h1->long_ref, 32, h, h1);
427  copy_picture_range(h->delayed_pic, h1->delayed_pic,
428  MAX_DELAYED_PIC_COUNT + 2, h, h1);
429 
430  h->frame_recovered = h1->frame_recovered;
431 
432  av_buffer_unref(&h->sei.a53_caption.buf_ref);
433  if (h1->sei.a53_caption.buf_ref) {
434  h->sei.a53_caption.buf_ref = av_buffer_ref(h1->sei.a53_caption.buf_ref);
435  if (!h->sei.a53_caption.buf_ref)
436  return AVERROR(ENOMEM);
437  }
438 
439  if (!h->cur_pic_ptr)
440  return 0;
441 
442  if (!h->droppable) {
444  h->poc.prev_poc_msb = h->poc.poc_msb;
445  h->poc.prev_poc_lsb = h->poc.poc_lsb;
446  }
447  h->poc.prev_frame_num_offset = h->poc.frame_num_offset;
448  h->poc.prev_frame_num = h->poc.frame_num;
449 
450  h->recovery_frame = h1->recovery_frame;
451 
452  return err;
453 }
454 
456 {
457  H264Picture *pic;
458  int i, ret;
459  const int pixel_shift = h->pixel_shift;
460  int c[4] = {
461  1<<(h->ps.sps->bit_depth_luma-1),
462  1<<(h->ps.sps->bit_depth_chroma-1),
463  1<<(h->ps.sps->bit_depth_chroma-1),
464  -1
465  };
466 
467  if (!ff_thread_can_start_frame(h->avctx)) {
468  av_log(h->avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
469  return -1;
470  }
471 
473  h->cur_pic_ptr = NULL;
474 
476  if (i < 0) {
477  av_log(h->avctx, AV_LOG_ERROR, "no frame buffer available\n");
478  return i;
479  }
480  pic = &h->DPB[i];
481 
482  pic->reference = h->droppable ? 0 : h->picture_structure;
483  pic->f->coded_picture_number = h->coded_picture_number++;
484  pic->field_picture = h->picture_structure != PICT_FRAME;
485  pic->frame_num = h->poc.frame_num;
486  /*
487  * Zero key_frame here; IDR markings per slice in frame or fields are ORed
488  * in later.
489  * See decode_nal_units().
490  */
491  pic->f->key_frame = 0;
492  pic->mmco_reset = 0;
493  pic->recovered = 0;
494  pic->invalid_gap = 0;
495  pic->sei_recovery_frame_cnt = h->sei.recovery_point.recovery_frame_cnt;
496 
497  pic->f->pict_type = h->slice_ctx[0].slice_type;
498 
499  pic->f->crop_left = h->crop_left;
500  pic->f->crop_right = h->crop_right;
501  pic->f->crop_top = h->crop_top;
502  pic->f->crop_bottom = h->crop_bottom;
503 
504  if ((ret = alloc_picture(h, pic)) < 0)
505  return ret;
506  if(!h->frame_recovered && !h->avctx->hwaccel)
507  ff_color_frame(pic->f, c);
508 
509  h->cur_pic_ptr = pic;
510  ff_h264_unref_picture(h, &h->cur_pic);
511  if (CONFIG_ERROR_RESILIENCE) {
512  ff_h264_set_erpic(&h->slice_ctx[0].er.cur_pic, NULL);
513  }
514 
515  if ((ret = ff_h264_ref_picture(h, &h->cur_pic, h->cur_pic_ptr)) < 0)
516  return ret;
517 
518  for (i = 0; i < h->nb_slice_ctx; i++) {
519  h->slice_ctx[i].linesize = h->cur_pic_ptr->f->linesize[0];
520  h->slice_ctx[i].uvlinesize = h->cur_pic_ptr->f->linesize[1];
521  }
522 
523  if (CONFIG_ERROR_RESILIENCE && h->enable_er) {
524  ff_er_frame_start(&h->slice_ctx[0].er);
525  ff_h264_set_erpic(&h->slice_ctx[0].er.last_pic, NULL);
526  ff_h264_set_erpic(&h->slice_ctx[0].er.next_pic, NULL);
527  }
528 
529  for (i = 0; i < 16; i++) {
530  h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
531  h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
532  }
533  for (i = 0; i < 16; i++) {
534  h->block_offset[16 + i] =
535  h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
536  h->block_offset[48 + 16 + i] =
537  h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
538  }
539 
540  /* We mark the current picture as non-reference after allocating it, so
541  * that if we break out due to an error it can be released automatically
542  * in the next ff_mpv_frame_start().
543  */
544  h->cur_pic_ptr->reference = 0;
545 
546  h->cur_pic_ptr->field_poc[0] = h->cur_pic_ptr->field_poc[1] = INT_MAX;
547 
548  h->next_output_pic = NULL;
549 
550  h->postpone_filter = 0;
551 
552  h->mb_aff_frame = h->ps.sps->mb_aff && (h->picture_structure == PICT_FRAME);
553 
554  if (h->sei.unregistered.x264_build >= 0)
555  h->x264_build = h->sei.unregistered.x264_build;
556 
557  assert(h->cur_pic_ptr->long_ref == 0);
558 
559  return 0;
560 }
561 
563  uint8_t *src_y,
564  uint8_t *src_cb, uint8_t *src_cr,
565  int linesize, int uvlinesize,
566  int simple)
567 {
568  uint8_t *top_border;
569  int top_idx = 1;
570  const int pixel_shift = h->pixel_shift;
571  int chroma444 = CHROMA444(h);
572  int chroma422 = CHROMA422(h);
573 
574  src_y -= linesize;
575  src_cb -= uvlinesize;
576  src_cr -= uvlinesize;
577 
578  if (!simple && FRAME_MBAFF(h)) {
579  if (sl->mb_y & 1) {
580  if (!MB_MBAFF(sl)) {
581  top_border = sl->top_borders[0][sl->mb_x];
582  AV_COPY128(top_border, src_y + 15 * linesize);
583  if (pixel_shift)
584  AV_COPY128(top_border + 16, src_y + 15 * linesize + 16);
585  if (simple || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
586  if (chroma444) {
587  if (pixel_shift) {
588  AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
589  AV_COPY128(top_border + 48, src_cb + 15 * uvlinesize + 16);
590  AV_COPY128(top_border + 64, src_cr + 15 * uvlinesize);
591  AV_COPY128(top_border + 80, src_cr + 15 * uvlinesize + 16);
592  } else {
593  AV_COPY128(top_border + 16, src_cb + 15 * uvlinesize);
594  AV_COPY128(top_border + 32, src_cr + 15 * uvlinesize);
595  }
596  } else if (chroma422) {
597  if (pixel_shift) {
598  AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
599  AV_COPY128(top_border + 48, src_cr + 15 * uvlinesize);
600  } else {
601  AV_COPY64(top_border + 16, src_cb + 15 * uvlinesize);
602  AV_COPY64(top_border + 24, src_cr + 15 * uvlinesize);
603  }
604  } else {
605  if (pixel_shift) {
606  AV_COPY128(top_border + 32, src_cb + 7 * uvlinesize);
607  AV_COPY128(top_border + 48, src_cr + 7 * uvlinesize);
608  } else {
609  AV_COPY64(top_border + 16, src_cb + 7 * uvlinesize);
610  AV_COPY64(top_border + 24, src_cr + 7 * uvlinesize);
611  }
612  }
613  }
614  }
615  } else if (MB_MBAFF(sl)) {
616  top_idx = 0;
617  } else
618  return;
619  }
620 
621  top_border = sl->top_borders[top_idx][sl->mb_x];
622  /* There are two lines saved, the line above the top macroblock
623  * of a pair, and the line above the bottom macroblock. */
624  AV_COPY128(top_border, src_y + 16 * linesize);
625  if (pixel_shift)
626  AV_COPY128(top_border + 16, src_y + 16 * linesize + 16);
627 
628  if (simple || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
629  if (chroma444) {
630  if (pixel_shift) {
631  AV_COPY128(top_border + 32, src_cb + 16 * linesize);
632  AV_COPY128(top_border + 48, src_cb + 16 * linesize + 16);
633  AV_COPY128(top_border + 64, src_cr + 16 * linesize);
634  AV_COPY128(top_border + 80, src_cr + 16 * linesize + 16);
635  } else {
636  AV_COPY128(top_border + 16, src_cb + 16 * linesize);
637  AV_COPY128(top_border + 32, src_cr + 16 * linesize);
638  }
639  } else if (chroma422) {
640  if (pixel_shift) {
641  AV_COPY128(top_border + 32, src_cb + 16 * uvlinesize);
642  AV_COPY128(top_border + 48, src_cr + 16 * uvlinesize);
643  } else {
644  AV_COPY64(top_border + 16, src_cb + 16 * uvlinesize);
645  AV_COPY64(top_border + 24, src_cr + 16 * uvlinesize);
646  }
647  } else {
648  if (pixel_shift) {
649  AV_COPY128(top_border + 32, src_cb + 8 * uvlinesize);
650  AV_COPY128(top_border + 48, src_cr + 8 * uvlinesize);
651  } else {
652  AV_COPY64(top_border + 16, src_cb + 8 * uvlinesize);
653  AV_COPY64(top_border + 24, src_cr + 8 * uvlinesize);
654  }
655  }
656  }
657 }
658 
659 /**
660  * Initialize implicit_weight table.
661  * @param field 0/1 initialize the weight for interlaced MBAFF
662  * -1 initializes the rest
663  */
665 {
666  int ref0, ref1, i, cur_poc, ref_start, ref_count0, ref_count1;
667 
668  for (i = 0; i < 2; i++) {
669  sl->pwt.luma_weight_flag[i] = 0;
670  sl->pwt.chroma_weight_flag[i] = 0;
671  }
672 
673  if (field < 0) {
674  if (h->picture_structure == PICT_FRAME) {
675  cur_poc = h->cur_pic_ptr->poc;
676  } else {
677  cur_poc = h->cur_pic_ptr->field_poc[h->picture_structure - 1];
678  }
679  if (sl->ref_count[0] == 1 && sl->ref_count[1] == 1 && !FRAME_MBAFF(h) &&
680  sl->ref_list[0][0].poc + (int64_t)sl->ref_list[1][0].poc == 2LL * cur_poc) {
681  sl->pwt.use_weight = 0;
682  sl->pwt.use_weight_chroma = 0;
683  return;
684  }
685  ref_start = 0;
686  ref_count0 = sl->ref_count[0];
687  ref_count1 = sl->ref_count[1];
688  } else {
689  cur_poc = h->cur_pic_ptr->field_poc[field];
690  ref_start = 16;
691  ref_count0 = 16 + 2 * sl->ref_count[0];
692  ref_count1 = 16 + 2 * sl->ref_count[1];
693  }
694 
695  sl->pwt.use_weight = 2;
696  sl->pwt.use_weight_chroma = 2;
697  sl->pwt.luma_log2_weight_denom = 5;
699 
700  for (ref0 = ref_start; ref0 < ref_count0; ref0++) {
701  int64_t poc0 = sl->ref_list[0][ref0].poc;
702  for (ref1 = ref_start; ref1 < ref_count1; ref1++) {
703  int w = 32;
704  if (!sl->ref_list[0][ref0].parent->long_ref && !sl->ref_list[1][ref1].parent->long_ref) {
705  int poc1 = sl->ref_list[1][ref1].poc;
706  int td = av_clip_int8(poc1 - poc0);
707  if (td) {
708  int tb = av_clip_int8(cur_poc - poc0);
709  int tx = (16384 + (FFABS(td) >> 1)) / td;
710  int dist_scale_factor = (tb * tx + 32) >> 8;
711  if (dist_scale_factor >= -64 && dist_scale_factor <= 128)
712  w = 64 - dist_scale_factor;
713  }
714  }
715  if (field < 0) {
716  sl->pwt.implicit_weight[ref0][ref1][0] =
717  sl->pwt.implicit_weight[ref0][ref1][1] = w;
718  } else {
719  sl->pwt.implicit_weight[ref0][ref1][field] = w;
720  }
721  }
722  }
723 }
724 
725 /**
726  * initialize scan tables
727  */
729 {
730  int i;
731  for (i = 0; i < 16; i++) {
732 #define TRANSPOSE(x) ((x) >> 2) | (((x) << 2) & 0xF)
733  h->zigzag_scan[i] = TRANSPOSE(ff_zigzag_scan[i]);
734  h->field_scan[i] = TRANSPOSE(field_scan[i]);
735 #undef TRANSPOSE
736  }
737  for (i = 0; i < 64; i++) {
738 #define TRANSPOSE(x) ((x) >> 3) | (((x) & 7) << 3)
739  h->zigzag_scan8x8[i] = TRANSPOSE(ff_zigzag_direct[i]);
740  h->zigzag_scan8x8_cavlc[i] = TRANSPOSE(zigzag_scan8x8_cavlc[i]);
741  h->field_scan8x8[i] = TRANSPOSE(field_scan8x8[i]);
742  h->field_scan8x8_cavlc[i] = TRANSPOSE(field_scan8x8_cavlc[i]);
743 #undef TRANSPOSE
744  }
745  if (h->ps.sps->transform_bypass) { // FIXME same ugly
746  memcpy(h->zigzag_scan_q0 , ff_zigzag_scan , sizeof(h->zigzag_scan_q0 ));
747  memcpy(h->zigzag_scan8x8_q0 , ff_zigzag_direct , sizeof(h->zigzag_scan8x8_q0 ));
748  memcpy(h->zigzag_scan8x8_cavlc_q0 , zigzag_scan8x8_cavlc , sizeof(h->zigzag_scan8x8_cavlc_q0));
749  memcpy(h->field_scan_q0 , field_scan , sizeof(h->field_scan_q0 ));
750  memcpy(h->field_scan8x8_q0 , field_scan8x8 , sizeof(h->field_scan8x8_q0 ));
751  memcpy(h->field_scan8x8_cavlc_q0 , field_scan8x8_cavlc , sizeof(h->field_scan8x8_cavlc_q0 ));
752  } else {
753  memcpy(h->zigzag_scan_q0 , h->zigzag_scan , sizeof(h->zigzag_scan_q0 ));
754  memcpy(h->zigzag_scan8x8_q0 , h->zigzag_scan8x8 , sizeof(h->zigzag_scan8x8_q0 ));
755  memcpy(h->zigzag_scan8x8_cavlc_q0 , h->zigzag_scan8x8_cavlc , sizeof(h->zigzag_scan8x8_cavlc_q0));
756  memcpy(h->field_scan_q0 , h->field_scan , sizeof(h->field_scan_q0 ));
757  memcpy(h->field_scan8x8_q0 , h->field_scan8x8 , sizeof(h->field_scan8x8_q0 ));
758  memcpy(h->field_scan8x8_cavlc_q0 , h->field_scan8x8_cavlc , sizeof(h->field_scan8x8_cavlc_q0 ));
759  }
760 }
761 
762 static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
763 {
764 #define HWACCEL_MAX (CONFIG_H264_DXVA2_HWACCEL + \
765  (CONFIG_H264_D3D11VA_HWACCEL * 2) + \
766  CONFIG_H264_NVDEC_HWACCEL + \
767  CONFIG_H264_VAAPI_HWACCEL + \
768  CONFIG_H264_VIDEOTOOLBOX_HWACCEL + \
769  CONFIG_H264_VDPAU_HWACCEL)
771  const enum AVPixelFormat *choices = pix_fmts;
772  int i;
773 
774  switch (h->ps.sps->bit_depth_luma) {
775  case 9:
776  if (CHROMA444(h)) {
777  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
778  *fmt++ = AV_PIX_FMT_GBRP9;
779  } else
781  } else if (CHROMA422(h))
783  else
785  break;
786  case 10:
787  if (CHROMA444(h)) {
788  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
789  *fmt++ = AV_PIX_FMT_GBRP10;
790  } else
792  } else if (CHROMA422(h))
794  else
796  break;
797  case 12:
798  if (CHROMA444(h)) {
799  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
800  *fmt++ = AV_PIX_FMT_GBRP12;
801  } else
803  } else if (CHROMA422(h))
805  else
807  break;
808  case 14:
809  if (CHROMA444(h)) {
810  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
811  *fmt++ = AV_PIX_FMT_GBRP14;
812  } else
814  } else if (CHROMA422(h))
816  else
818  break;
819  case 8:
820 #if CONFIG_H264_VDPAU_HWACCEL
821  *fmt++ = AV_PIX_FMT_VDPAU;
822 #endif
823 #if CONFIG_H264_NVDEC_HWACCEL
824  *fmt++ = AV_PIX_FMT_CUDA;
825 #endif
826  if (CHROMA444(h)) {
827  if (h->avctx->colorspace == AVCOL_SPC_RGB)
828  *fmt++ = AV_PIX_FMT_GBRP;
829  else if (h->avctx->color_range == AVCOL_RANGE_JPEG)
831  else
832  *fmt++ = AV_PIX_FMT_YUV444P;
833  } else if (CHROMA422(h)) {
834  if (h->avctx->color_range == AVCOL_RANGE_JPEG)
836  else
837  *fmt++ = AV_PIX_FMT_YUV422P;
838  } else {
839 #if CONFIG_H264_DXVA2_HWACCEL
841 #endif
842 #if CONFIG_H264_D3D11VA_HWACCEL
844  *fmt++ = AV_PIX_FMT_D3D11;
845 #endif
846 #if CONFIG_H264_VAAPI_HWACCEL
847  *fmt++ = AV_PIX_FMT_VAAPI;
848 #endif
849 #if CONFIG_H264_VIDEOTOOLBOX_HWACCEL
851 #endif
852  if (h->avctx->codec->pix_fmts)
853  choices = h->avctx->codec->pix_fmts;
854  else if (h->avctx->color_range == AVCOL_RANGE_JPEG)
856  else
857  *fmt++ = AV_PIX_FMT_YUV420P;
858  }
859  break;
860  default:
861  av_log(h->avctx, AV_LOG_ERROR,
862  "Unsupported bit depth %d\n", h->ps.sps->bit_depth_luma);
863  return AVERROR_INVALIDDATA;
864  }
865 
866  *fmt = AV_PIX_FMT_NONE;
867 
868  for (i=0; choices[i] != AV_PIX_FMT_NONE; i++)
869  if (choices[i] == h->avctx->pix_fmt && !force_callback)
870  return choices[i];
871  return ff_thread_get_format(h->avctx, choices);
872 }
873 
874 /* export coded and cropped frame dimensions to AVCodecContext */
876 {
877  const SPS *sps = (const SPS*)h->ps.sps;
878  int cr = sps->crop_right;
879  int cl = sps->crop_left;
880  int ct = sps->crop_top;
881  int cb = sps->crop_bottom;
882  int width = h->width - (cr + cl);
883  int height = h->height - (ct + cb);
884  av_assert0(sps->crop_right + sps->crop_left < (unsigned)h->width);
885  av_assert0(sps->crop_top + sps->crop_bottom < (unsigned)h->height);
886 
887  /* handle container cropping */
888  if (h->width_from_caller > 0 && h->height_from_caller > 0 &&
889  !sps->crop_top && !sps->crop_left &&
890  FFALIGN(h->width_from_caller, 16) == FFALIGN(width, 16) &&
891  FFALIGN(h->height_from_caller, 16) == FFALIGN(height, 16) &&
892  h->width_from_caller <= width &&
893  h->height_from_caller <= height) {
894  width = h->width_from_caller;
895  height = h->height_from_caller;
896  cl = 0;
897  ct = 0;
898  cr = h->width - width;
899  cb = h->height - height;
900  } else {
901  h->width_from_caller = 0;
902  h->height_from_caller = 0;
903  }
904 
905  h->avctx->coded_width = h->width;
906  h->avctx->coded_height = h->height;
907  h->avctx->width = width;
908  h->avctx->height = height;
909  h->crop_right = cr;
910  h->crop_left = cl;
911  h->crop_top = ct;
912  h->crop_bottom = cb;
913 
914  return 0;
915 }
916 
918 {
919  const SPS *sps = h->ps.sps;
920  int i, ret;
921 
922  if (!sps) {
924  goto fail;
925  }
926 
927  ff_set_sar(h->avctx, sps->sar);
928  av_pix_fmt_get_chroma_sub_sample(h->avctx->pix_fmt,
929  &h->chroma_x_shift, &h->chroma_y_shift);
930 
931  if (sps->timing_info_present_flag) {
932  int64_t den = sps->time_scale;
933  if (h->x264_build < 44U)
934  den *= 2;
935  av_reduce(&h->avctx->framerate.den, &h->avctx->framerate.num,
936  sps->num_units_in_tick * h->avctx->ticks_per_frame, den, 1 << 30);
937  }
938 
940 
941  h->first_field = 0;
942  h->prev_interlaced_frame = 1;
943 
946  if (ret < 0) {
947  av_log(h->avctx, AV_LOG_ERROR, "Could not allocate memory\n");
948  goto fail;
949  }
950 
951  if (sps->bit_depth_luma < 8 || sps->bit_depth_luma > 14 ||
952  sps->bit_depth_luma == 11 || sps->bit_depth_luma == 13
953  ) {
954  av_log(h->avctx, AV_LOG_ERROR, "Unsupported bit depth %d\n",
955  sps->bit_depth_luma);
957  goto fail;
958  }
959 
960  h->cur_bit_depth_luma =
961  h->avctx->bits_per_raw_sample = sps->bit_depth_luma;
962  h->cur_chroma_format_idc = sps->chroma_format_idc;
963  h->pixel_shift = sps->bit_depth_luma > 8;
964  h->chroma_format_idc = sps->chroma_format_idc;
965  h->bit_depth_luma = sps->bit_depth_luma;
966 
967  ff_h264dsp_init(&h->h264dsp, sps->bit_depth_luma,
968  sps->chroma_format_idc);
969  ff_h264chroma_init(&h->h264chroma, sps->bit_depth_chroma);
970  ff_h264qpel_init(&h->h264qpel, sps->bit_depth_luma);
971  ff_h264_pred_init(&h->hpc, h->avctx->codec_id, sps->bit_depth_luma,
972  sps->chroma_format_idc);
973  ff_videodsp_init(&h->vdsp, sps->bit_depth_luma);
974 
975  if (!HAVE_THREADS || !(h->avctx->active_thread_type & FF_THREAD_SLICE)) {
976  ret = ff_h264_slice_context_init(h, &h->slice_ctx[0]);
977  if (ret < 0) {
978  av_log(h->avctx, AV_LOG_ERROR, "context_init() failed.\n");
979  goto fail;
980  }
981  } else {
982  for (i = 0; i < h->nb_slice_ctx; i++) {
983  H264SliceContext *sl = &h->slice_ctx[i];
984 
985  sl->h264 = h;
986  sl->intra4x4_pred_mode = h->intra4x4_pred_mode + i * 8 * 2 * h->mb_stride;
987  sl->mvd_table[0] = h->mvd_table[0] + i * 8 * 2 * h->mb_stride;
988  sl->mvd_table[1] = h->mvd_table[1] + i * 8 * 2 * h->mb_stride;
989 
990  if ((ret = ff_h264_slice_context_init(h, sl)) < 0) {
991  av_log(h->avctx, AV_LOG_ERROR, "context_init() failed.\n");
992  goto fail;
993  }
994  }
995  }
996 
997  h->context_initialized = 1;
998 
999  return 0;
1000 fail:
1002  h->context_initialized = 0;
1003  return ret;
1004 }
1005 
1007 {
1008  switch (a) {
1012  default:
1013  return a;
1014  }
1015 }
1016 
1017 static int h264_init_ps(H264Context *h, const H264SliceContext *sl, int first_slice)
1018 {
1019  const SPS *sps;
1020  int needs_reinit = 0, must_reinit, ret;
1021 
1022  if (first_slice) {
1023  av_buffer_unref(&h->ps.pps_ref);
1024  h->ps.pps = NULL;
1025  h->ps.pps_ref = av_buffer_ref(h->ps.pps_list[sl->pps_id]);
1026  if (!h->ps.pps_ref)
1027  return AVERROR(ENOMEM);
1028  h->ps.pps = (const PPS*)h->ps.pps_ref->data;
1029  }
1030 
1031  if (h->ps.sps != (const SPS*)h->ps.sps_list[h->ps.pps->sps_id]->data) {
1032  av_buffer_unref(&h->ps.sps_ref);
1033  h->ps.sps = NULL;
1034  h->ps.sps_ref = av_buffer_ref(h->ps.sps_list[h->ps.pps->sps_id]);
1035  if (!h->ps.sps_ref)
1036  return AVERROR(ENOMEM);
1037  h->ps.sps = (const SPS*)h->ps.sps_ref->data;
1038 
1039  if (h->mb_width != h->ps.sps->mb_width ||
1040  h->mb_height != h->ps.sps->mb_height ||
1041  h->cur_bit_depth_luma != h->ps.sps->bit_depth_luma ||
1042  h->cur_chroma_format_idc != h->ps.sps->chroma_format_idc
1043  )
1044  needs_reinit = 1;
1045 
1046  if (h->bit_depth_luma != h->ps.sps->bit_depth_luma ||
1047  h->chroma_format_idc != h->ps.sps->chroma_format_idc)
1048  needs_reinit = 1;
1049  }
1050  sps = h->ps.sps;
1051 
1052  must_reinit = (h->context_initialized &&
1053  ( 16*sps->mb_width != h->avctx->coded_width
1054  || 16*sps->mb_height != h->avctx->coded_height
1055  || h->cur_bit_depth_luma != sps->bit_depth_luma
1056  || h->cur_chroma_format_idc != sps->chroma_format_idc
1057  || h->mb_width != sps->mb_width
1058  || h->mb_height != sps->mb_height
1059  ));
1060  if (h->avctx->pix_fmt == AV_PIX_FMT_NONE
1061  || (non_j_pixfmt(h->avctx->pix_fmt) != non_j_pixfmt(get_pixel_format(h, 0))))
1062  must_reinit = 1;
1063 
1064  if (first_slice && av_cmp_q(sps->sar, h->avctx->sample_aspect_ratio))
1065  must_reinit = 1;
1066 
1067  if (!h->setup_finished) {
1068  h->avctx->profile = ff_h264_get_profile(sps);
1069  h->avctx->level = sps->level_idc;
1070  h->avctx->refs = sps->ref_frame_count;
1071 
1072  h->mb_width = sps->mb_width;
1073  h->mb_height = sps->mb_height;
1074  h->mb_num = h->mb_width * h->mb_height;
1075  h->mb_stride = h->mb_width + 1;
1076 
1077  h->b_stride = h->mb_width * 4;
1078 
1079  h->chroma_y_shift = sps->chroma_format_idc <= 1; // 400 uses yuv420p
1080 
1081  h->width = 16 * h->mb_width;
1082  h->height = 16 * h->mb_height;
1083 
1084  ret = init_dimensions(h);
1085  if (ret < 0)
1086  return ret;
1087 
1088  if (sps->video_signal_type_present_flag) {
1089  h->avctx->color_range = sps->full_range > 0 ? AVCOL_RANGE_JPEG
1090  : AVCOL_RANGE_MPEG;
1091  if (sps->colour_description_present_flag) {
1092  if (h->avctx->colorspace != sps->colorspace)
1093  needs_reinit = 1;
1094  h->avctx->color_primaries = sps->color_primaries;
1095  h->avctx->color_trc = sps->color_trc;
1096  h->avctx->colorspace = sps->colorspace;
1097  }
1098  }
1099 
1100  if (h->sei.alternative_transfer.present &&
1101  av_color_transfer_name(h->sei.alternative_transfer.preferred_transfer_characteristics) &&
1102  h->sei.alternative_transfer.preferred_transfer_characteristics != AVCOL_TRC_UNSPECIFIED) {
1103  h->avctx->color_trc = h->sei.alternative_transfer.preferred_transfer_characteristics;
1104  }
1105  }
1106 
1107  if (!h->context_initialized || must_reinit || needs_reinit) {
1108  int flush_changes = h->context_initialized;
1109  h->context_initialized = 0;
1110  if (sl != h->slice_ctx) {
1111  av_log(h->avctx, AV_LOG_ERROR,
1112  "changing width %d -> %d / height %d -> %d on "
1113  "slice %d\n",
1114  h->width, h->avctx->coded_width,
1115  h->height, h->avctx->coded_height,
1116  h->current_slice + 1);
1117  return AVERROR_INVALIDDATA;
1118  }
1119 
1120  av_assert1(first_slice);
1121 
1122  if (flush_changes)
1124 
1125  if ((ret = get_pixel_format(h, 1)) < 0)
1126  return ret;
1127  h->avctx->pix_fmt = ret;
1128 
1129  av_log(h->avctx, AV_LOG_VERBOSE, "Reinit context to %dx%d, "
1130  "pix_fmt: %s\n", h->width, h->height, av_get_pix_fmt_name(h->avctx->pix_fmt));
1131 
1132  if ((ret = h264_slice_header_init(h)) < 0) {
1133  av_log(h->avctx, AV_LOG_ERROR,
1134  "h264_slice_header_init() failed\n");
1135  return ret;
1136  }
1137  }
1138 
1139  return 0;
1140 }
1141 
1143 {
1144  const SPS *sps = h->ps.sps;
1145  H264Picture *cur = h->cur_pic_ptr;
1146 
1147  cur->f->interlaced_frame = 0;
1148  cur->f->repeat_pict = 0;
1149 
1150  /* Signal interlacing information externally. */
1151  /* Prioritize picture timing SEI information over used
1152  * decoding process if it exists. */
1153 
1154  if (sps->pic_struct_present_flag && h->sei.picture_timing.present) {
1155  H264SEIPictureTiming *pt = &h->sei.picture_timing;
1156  switch (pt->pic_struct) {
1158  break;
1161  cur->f->interlaced_frame = 1;
1162  break;
1166  cur->f->interlaced_frame = 1;
1167  else
1168  // try to flag soft telecine progressive
1169  cur->f->interlaced_frame = h->prev_interlaced_frame;
1170  break;
1173  /* Signal the possibility of telecined film externally
1174  * (pic_struct 5,6). From these hints, let the applications
1175  * decide if they apply deinterlacing. */
1176  cur->f->repeat_pict = 1;
1177  break;
1179  cur->f->repeat_pict = 2;
1180  break;
1182  cur->f->repeat_pict = 4;
1183  break;
1184  }
1185 
1186  if ((pt->ct_type & 3) &&
1187  pt->pic_struct <= H264_SEI_PIC_STRUCT_BOTTOM_TOP)
1188  cur->f->interlaced_frame = (pt->ct_type & (1 << 1)) != 0;
1189  } else {
1190  /* Derive interlacing flag from used decoding process. */
1192  }
1193  h->prev_interlaced_frame = cur->f->interlaced_frame;
1194 
1195  if (cur->field_poc[0] != cur->field_poc[1]) {
1196  /* Derive top_field_first from field pocs. */
1197  cur->f->top_field_first = cur->field_poc[0] < cur->field_poc[1];
1198  } else {
1199  if (sps->pic_struct_present_flag && h->sei.picture_timing.present) {
1200  /* Use picture timing SEI information. Even if it is a
1201  * information of a past frame, better than nothing. */
1202  if (h->sei.picture_timing.pic_struct == H264_SEI_PIC_STRUCT_TOP_BOTTOM ||
1203  h->sei.picture_timing.pic_struct == H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP)
1204  cur->f->top_field_first = 1;
1205  else
1206  cur->f->top_field_first = 0;
1207  } else if (cur->f->interlaced_frame) {
1208  /* Default to top field first when pic_struct_present_flag
1209  * is not set but interlaced frame detected */
1210  cur->f->top_field_first = 1;
1211  } else {
1212  /* Most likely progressive */
1213  cur->f->top_field_first = 0;
1214  }
1215  }
1216 
1217  if (h->sei.frame_packing.present &&
1218  h->sei.frame_packing.arrangement_type <= 6 &&
1219  h->sei.frame_packing.content_interpretation_type > 0 &&
1220  h->sei.frame_packing.content_interpretation_type < 3) {
1221  H264SEIFramePacking *fp = &h->sei.frame_packing;
1222  AVStereo3D *stereo = av_stereo3d_create_side_data(cur->f);
1223  if (stereo) {
1224  switch (fp->arrangement_type) {
1226  stereo->type = AV_STEREO3D_CHECKERBOARD;
1227  break;
1229  stereo->type = AV_STEREO3D_COLUMNS;
1230  break;
1232  stereo->type = AV_STEREO3D_LINES;
1233  break;
1235  if (fp->quincunx_sampling_flag)
1237  else
1238  stereo->type = AV_STEREO3D_SIDEBYSIDE;
1239  break;
1241  stereo->type = AV_STEREO3D_TOPBOTTOM;
1242  break;
1244  stereo->type = AV_STEREO3D_FRAMESEQUENCE;
1245  break;
1246  case H264_SEI_FPA_TYPE_2D:
1247  stereo->type = AV_STEREO3D_2D;
1248  break;
1249  }
1250 
1251  if (fp->content_interpretation_type == 2)
1252  stereo->flags = AV_STEREO3D_FLAG_INVERT;
1253 
1254  if (fp->arrangement_type == H264_SEI_FPA_TYPE_INTERLEAVE_TEMPORAL) {
1255  if (fp->current_frame_is_frame0_flag)
1256  stereo->view = AV_STEREO3D_VIEW_LEFT;
1257  else
1258  stereo->view = AV_STEREO3D_VIEW_RIGHT;
1259  }
1260  }
1261  }
1262 
1263  if (h->sei.display_orientation.present &&
1264  (h->sei.display_orientation.anticlockwise_rotation ||
1265  h->sei.display_orientation.hflip ||
1266  h->sei.display_orientation.vflip)) {
1267  H264SEIDisplayOrientation *o = &h->sei.display_orientation;
1268  double angle = o->anticlockwise_rotation * 360 / (double) (1 << 16);
1269  AVFrameSideData *rotation = av_frame_new_side_data(cur->f,
1271  sizeof(int32_t) * 9);
1272  if (rotation) {
1273  av_display_rotation_set((int32_t *)rotation->data, angle);
1274  av_display_matrix_flip((int32_t *)rotation->data,
1275  o->hflip, o->vflip);
1276  }
1277  }
1278 
1279  if (h->sei.afd.present) {
1281  sizeof(uint8_t));
1282 
1283  if (sd) {
1284  *sd->data = h->sei.afd.active_format_description;
1285  h->sei.afd.present = 0;
1286  }
1287  }
1288 
1289  if (h->sei.a53_caption.buf_ref) {
1290  H264SEIA53Caption *a53 = &h->sei.a53_caption;
1291 
1293  if (!sd)
1294  av_buffer_unref(&a53->buf_ref);
1295  a53->buf_ref = NULL;
1296 
1297  h->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
1298  }
1299 
1300  if (h->sei.picture_timing.timecode_cnt > 0) {
1301  uint32_t tc = 0;
1302  uint32_t *tc_sd;
1303 
1304  AVFrameSideData *tcside = av_frame_new_side_data(cur->f,
1306  sizeof(uint32_t)*4);
1307  if (!tcside)
1308  return AVERROR(ENOMEM);
1309 
1310  tc_sd = (uint32_t*)tcside->data;
1311  tc_sd[0] = h->sei.picture_timing.timecode_cnt;
1312 
1313  for (int i = 0; i < tc_sd[0]; i++) {
1314  uint32_t frames;
1315 
1316  /* For SMPTE 12-M timecodes, frame count is a special case if > 30 FPS.
1317  See SMPTE ST 12-1:2014 Sec 12.1 for more info. */
1318  if (av_cmp_q(h->avctx->framerate, (AVRational) {30, 1}) == 1) {
1319  frames = h->sei.picture_timing.timecode[i].frame / 2;
1320  if (h->sei.picture_timing.timecode[i].frame % 2 == 1) {
1321  if (av_cmp_q(h->avctx->framerate, (AVRational) {50, 1}) == 0)
1322  tc |= (1 << 7);
1323  else
1324  tc |= (1 << 23);
1325  }
1326  } else {
1327  frames = h->sei.picture_timing.timecode[i].frame;
1328  }
1329 
1330  tc |= h->sei.picture_timing.timecode[i].dropframe << 30;
1331  tc |= (frames / 10) << 28;
1332  tc |= (frames % 10) << 24;
1333  tc |= (h->sei.picture_timing.timecode[i].seconds / 10) << 20;
1334  tc |= (h->sei.picture_timing.timecode[i].seconds % 10) << 16;
1335  tc |= (h->sei.picture_timing.timecode[i].minutes / 10) << 12;
1336  tc |= (h->sei.picture_timing.timecode[i].minutes % 10) << 8;
1337  tc |= (h->sei.picture_timing.timecode[i].hours / 10) << 4;
1338  tc |= (h->sei.picture_timing.timecode[i].hours % 10);
1339 
1340  tc_sd[i + 1] = tc;
1341  }
1342  h->sei.picture_timing.timecode_cnt = 0;
1343  }
1344 
1345  return 0;
1346 }
1347 
1349 {
1350  const SPS *sps = h->ps.sps;
1351  H264Picture *out = h->cur_pic_ptr;
1352  H264Picture *cur = h->cur_pic_ptr;
1353  int i, pics, out_of_order, out_idx;
1354 
1355  cur->mmco_reset = h->mmco_reset;
1356  h->mmco_reset = 0;
1357 
1358  if (sps->bitstream_restriction_flag ||
1359  h->avctx->strict_std_compliance >= FF_COMPLIANCE_STRICT) {
1360  h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, sps->num_reorder_frames);
1361  }
1362 
1363  for (i = 0; 1; i++) {
1364  if(i == MAX_DELAYED_PIC_COUNT || cur->poc < h->last_pocs[i]){
1365  if(i)
1366  h->last_pocs[i-1] = cur->poc;
1367  break;
1368  } else if(i) {
1369  h->last_pocs[i-1]= h->last_pocs[i];
1370  }
1371  }
1372  out_of_order = MAX_DELAYED_PIC_COUNT - i;
1373  if( cur->f->pict_type == AV_PICTURE_TYPE_B
1374  || (h->last_pocs[MAX_DELAYED_PIC_COUNT-2] > INT_MIN && h->last_pocs[MAX_DELAYED_PIC_COUNT-1] - (int64_t)h->last_pocs[MAX_DELAYED_PIC_COUNT-2] > 2))
1375  out_of_order = FFMAX(out_of_order, 1);
1376  if (out_of_order == MAX_DELAYED_PIC_COUNT) {
1377  av_log(h->avctx, AV_LOG_VERBOSE, "Invalid POC %d<%d\n", cur->poc, h->last_pocs[0]);
1378  for (i = 1; i < MAX_DELAYED_PIC_COUNT; i++)
1379  h->last_pocs[i] = INT_MIN;
1380  h->last_pocs[0] = cur->poc;
1381  cur->mmco_reset = 1;
1382  } else if(h->avctx->has_b_frames < out_of_order && !sps->bitstream_restriction_flag){
1383  int loglevel = h->avctx->frame_number > 1 ? AV_LOG_WARNING : AV_LOG_VERBOSE;
1384  av_log(h->avctx, loglevel, "Increasing reorder buffer to %d\n", out_of_order);
1385  h->avctx->has_b_frames = out_of_order;
1386  }
1387 
1388  pics = 0;
1389  while (h->delayed_pic[pics])
1390  pics++;
1391 
1393 
1394  h->delayed_pic[pics++] = cur;
1395  if (cur->reference == 0)
1396  cur->reference = DELAYED_PIC_REF;
1397 
1398  out = h->delayed_pic[0];
1399  out_idx = 0;
1400  for (i = 1; h->delayed_pic[i] &&
1401  !h->delayed_pic[i]->f->key_frame &&
1402  !h->delayed_pic[i]->mmco_reset;
1403  i++)
1404  if (h->delayed_pic[i]->poc < out->poc) {
1405  out = h->delayed_pic[i];
1406  out_idx = i;
1407  }
1408  if (h->avctx->has_b_frames == 0 &&
1409  (h->delayed_pic[0]->f->key_frame || h->delayed_pic[0]->mmco_reset))
1410  h->next_outputed_poc = INT_MIN;
1411  out_of_order = out->poc < h->next_outputed_poc;
1412 
1413  if (out_of_order || pics > h->avctx->has_b_frames) {
1414  out->reference &= ~DELAYED_PIC_REF;
1415  for (i = out_idx; h->delayed_pic[i]; i++)
1416  h->delayed_pic[i] = h->delayed_pic[i + 1];
1417  }
1418  if (!out_of_order && pics > h->avctx->has_b_frames) {
1419  h->next_output_pic = out;
1420  if (out_idx == 0 && h->delayed_pic[0] && (h->delayed_pic[0]->f->key_frame || h->delayed_pic[0]->mmco_reset)) {
1421  h->next_outputed_poc = INT_MIN;
1422  } else
1423  h->next_outputed_poc = out->poc;
1424 
1425  if (out->recovered) {
1426  // We have reached an recovery point and all frames after it in
1427  // display order are "recovered".
1428  h->frame_recovered |= FRAME_RECOVERED_SEI;
1429  }
1430  out->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_SEI);
1431 
1432  if (!out->recovered) {
1433  if (!(h->avctx->flags & AV_CODEC_FLAG_OUTPUT_CORRUPT) &&
1434  !(h->avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL)) {
1435  h->next_output_pic = NULL;
1436  } else {
1437  out->f->flags |= AV_FRAME_FLAG_CORRUPT;
1438  }
1439  }
1440  } else {
1441  av_log(h->avctx, AV_LOG_DEBUG, "no picture %s\n", out_of_order ? "ooo" : "");
1442  }
1443 
1444  return 0;
1445 }
1446 
1447 /* This function is called right after decoding the slice header for a first
1448  * slice in a field (or a frame). It decides whether we are decoding a new frame
1449  * or a second field in a pair and does the necessary setup.
1450  */
1452  const H2645NAL *nal, int first_slice)
1453 {
1454  int i;
1455  const SPS *sps;
1456 
1457  int last_pic_structure, last_pic_droppable, ret;
1458 
1459  ret = h264_init_ps(h, sl, first_slice);
1460  if (ret < 0)
1461  return ret;
1462 
1463  sps = h->ps.sps;
1464 
1465  if (sps && sps->bitstream_restriction_flag &&
1466  h->avctx->has_b_frames < sps->num_reorder_frames) {
1467  h->avctx->has_b_frames = sps->num_reorder_frames;
1468  }
1469 
1470  last_pic_droppable = h->droppable;
1471  last_pic_structure = h->picture_structure;
1472  h->droppable = (nal->ref_idc == 0);
1473  h->picture_structure = sl->picture_structure;
1474 
1475  h->poc.frame_num = sl->frame_num;
1476  h->poc.poc_lsb = sl->poc_lsb;
1477  h->poc.delta_poc_bottom = sl->delta_poc_bottom;
1478  h->poc.delta_poc[0] = sl->delta_poc[0];
1479  h->poc.delta_poc[1] = sl->delta_poc[1];
1480 
1481  /* Shorten frame num gaps so we don't have to allocate reference
1482  * frames just to throw them away */
1483  if (h->poc.frame_num != h->poc.prev_frame_num) {
1484  int unwrap_prev_frame_num = h->poc.prev_frame_num;
1485  int max_frame_num = 1 << sps->log2_max_frame_num;
1486 
1487  if (unwrap_prev_frame_num > h->poc.frame_num)
1488  unwrap_prev_frame_num -= max_frame_num;
1489 
1490  if ((h->poc.frame_num - unwrap_prev_frame_num) > sps->ref_frame_count) {
1491  unwrap_prev_frame_num = (h->poc.frame_num - sps->ref_frame_count) - 1;
1492  if (unwrap_prev_frame_num < 0)
1493  unwrap_prev_frame_num += max_frame_num;
1494 
1495  h->poc.prev_frame_num = unwrap_prev_frame_num;
1496  }
1497  }
1498 
1499  /* See if we have a decoded first field looking for a pair...
1500  * Here, we're using that to see if we should mark previously
1501  * decode frames as "finished".
1502  * We have to do that before the "dummy" in-between frame allocation,
1503  * since that can modify h->cur_pic_ptr. */
1504  if (h->first_field) {
1505  int last_field = last_pic_structure == PICT_BOTTOM_FIELD;
1506  av_assert0(h->cur_pic_ptr);
1507  av_assert0(h->cur_pic_ptr->f->buf[0]);
1508  assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF);
1509 
1510  /* Mark old field/frame as completed */
1511  if (h->cur_pic_ptr->tf.owner[last_field] == h->avctx) {
1512  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, last_field);
1513  }
1514 
1515  /* figure out if we have a complementary field pair */
1516  if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
1517  /* Previous field is unmatched. Don't display it, but let it
1518  * remain for reference if marked as such. */
1519  if (last_pic_structure != PICT_FRAME) {
1520  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
1521  last_pic_structure == PICT_TOP_FIELD);
1522  }
1523  } else {
1524  if (h->cur_pic_ptr->frame_num != h->poc.frame_num) {
1525  /* This and previous field were reference, but had
1526  * different frame_nums. Consider this field first in
1527  * pair. Throw away previous field except for reference
1528  * purposes. */
1529  if (last_pic_structure != PICT_FRAME) {
1530  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
1531  last_pic_structure == PICT_TOP_FIELD);
1532  }
1533  } else {
1534  /* Second field in complementary pair */
1535  if (!((last_pic_structure == PICT_TOP_FIELD &&
1536  h->picture_structure == PICT_BOTTOM_FIELD) ||
1537  (last_pic_structure == PICT_BOTTOM_FIELD &&
1538  h->picture_structure == PICT_TOP_FIELD))) {
1539  av_log(h->avctx, AV_LOG_ERROR,
1540  "Invalid field mode combination %d/%d\n",
1541  last_pic_structure, h->picture_structure);
1542  h->picture_structure = last_pic_structure;
1543  h->droppable = last_pic_droppable;
1544  return AVERROR_INVALIDDATA;
1545  } else if (last_pic_droppable != h->droppable) {
1546  avpriv_request_sample(h->avctx,
1547  "Found reference and non-reference fields in the same frame, which");
1548  h->picture_structure = last_pic_structure;
1549  h->droppable = last_pic_droppable;
1550  return AVERROR_PATCHWELCOME;
1551  }
1552  }
1553  }
1554  }
1555 
1556  while (h->poc.frame_num != h->poc.prev_frame_num && !h->first_field &&
1557  h->poc.frame_num != (h->poc.prev_frame_num + 1) % (1 << sps->log2_max_frame_num)) {
1558  H264Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
1559  av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",
1560  h->poc.frame_num, h->poc.prev_frame_num);
1561  if (!sps->gaps_in_frame_num_allowed_flag)
1562  for(i=0; i<FF_ARRAY_ELEMS(h->last_pocs); i++)
1563  h->last_pocs[i] = INT_MIN;
1564  ret = h264_frame_start(h);
1565  if (ret < 0) {
1566  h->first_field = 0;
1567  return ret;
1568  }
1569 
1570  h->poc.prev_frame_num++;
1571  h->poc.prev_frame_num %= 1 << sps->log2_max_frame_num;
1572  h->cur_pic_ptr->frame_num = h->poc.prev_frame_num;
1573  h->cur_pic_ptr->invalid_gap = !sps->gaps_in_frame_num_allowed_flag;
1574  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
1575  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
1576 
1577  h->explicit_ref_marking = 0;
1579  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1580  return ret;
1581  /* Error concealment: If a ref is missing, copy the previous ref
1582  * in its place.
1583  * FIXME: Avoiding a memcpy would be nice, but ref handling makes
1584  * many assumptions about there being no actual duplicates.
1585  * FIXME: This does not copy padding for out-of-frame motion
1586  * vectors. Given we are concealing a lost frame, this probably
1587  * is not noticeable by comparison, but it should be fixed. */
1588  if (h->short_ref_count) {
1589  if (prev &&
1590  h->short_ref[0]->f->width == prev->f->width &&
1591  h->short_ref[0]->f->height == prev->f->height &&
1592  h->short_ref[0]->f->format == prev->f->format) {
1593  ff_thread_await_progress(&prev->tf, INT_MAX, 0);
1594  if (prev->field_picture)
1595  ff_thread_await_progress(&prev->tf, INT_MAX, 1);
1596  av_image_copy(h->short_ref[0]->f->data,
1597  h->short_ref[0]->f->linesize,
1598  (const uint8_t **)prev->f->data,
1599  prev->f->linesize,
1600  prev->f->format,
1601  prev->f->width,
1602  prev->f->height);
1603  h->short_ref[0]->poc = prev->poc + 2;
1604  }
1605  h->short_ref[0]->frame_num = h->poc.prev_frame_num;
1606  }
1607  }
1608 
1609  /* See if we have a decoded first field looking for a pair...
1610  * We're using that to see whether to continue decoding in that
1611  * frame, or to allocate a new one. */
1612  if (h->first_field) {
1613  av_assert0(h->cur_pic_ptr);
1614  av_assert0(h->cur_pic_ptr->f->buf[0]);
1615  assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF);
1616 
1617  /* figure out if we have a complementary field pair */
1618  if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
1619  /* Previous field is unmatched. Don't display it, but let it
1620  * remain for reference if marked as such. */
1621  h->missing_fields ++;
1622  h->cur_pic_ptr = NULL;
1623  h->first_field = FIELD_PICTURE(h);
1624  } else {
1625  h->missing_fields = 0;
1626  if (h->cur_pic_ptr->frame_num != h->poc.frame_num) {
1627  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
1628  h->picture_structure==PICT_BOTTOM_FIELD);
1629  /* This and the previous field had different frame_nums.
1630  * Consider this field first in pair. Throw away previous
1631  * one except for reference purposes. */
1632  h->first_field = 1;
1633  h->cur_pic_ptr = NULL;
1634  } else if (h->cur_pic_ptr->reference & DELAYED_PIC_REF) {
1635  /* This frame was already output, we cannot draw into it
1636  * anymore.
1637  */
1638  h->first_field = 1;
1639  h->cur_pic_ptr = NULL;
1640  } else {
1641  /* Second field in complementary pair */
1642  h->first_field = 0;
1643  }
1644  }
1645  } else {
1646  /* Frame or first field in a potentially complementary pair */
1647  h->first_field = FIELD_PICTURE(h);
1648  }
1649 
1650  if (!FIELD_PICTURE(h) || h->first_field) {
1651  if (h264_frame_start(h) < 0) {
1652  h->first_field = 0;
1653  return AVERROR_INVALIDDATA;
1654  }
1655  } else {
1656  int field = h->picture_structure == PICT_BOTTOM_FIELD;
1658  h->cur_pic_ptr->tf.owner[field] = h->avctx;
1659  }
1660  /* Some macroblocks can be accessed before they're available in case
1661  * of lost slices, MBAFF or threading. */
1662  if (FIELD_PICTURE(h)) {
1663  for(i = (h->picture_structure == PICT_BOTTOM_FIELD); i<h->mb_height; i++)
1664  memset(h->slice_table + i*h->mb_stride, -1, (h->mb_stride - (i+1==h->mb_height)) * sizeof(*h->slice_table));
1665  } else {
1666  memset(h->slice_table, -1,
1667  (h->mb_height * h->mb_stride - 1) * sizeof(*h->slice_table));
1668  }
1669 
1670  ret = ff_h264_init_poc(h->cur_pic_ptr->field_poc, &h->cur_pic_ptr->poc,
1671  h->ps.sps, &h->poc, h->picture_structure, nal->ref_idc);
1672  if (ret < 0)
1673  return ret;
1674 
1675  memcpy(h->mmco, sl->mmco, sl->nb_mmco * sizeof(*h->mmco));
1676  h->nb_mmco = sl->nb_mmco;
1677  h->explicit_ref_marking = sl->explicit_ref_marking;
1678 
1679  h->picture_idr = nal->type == H264_NAL_IDR_SLICE;
1680 
1681  if (h->sei.recovery_point.recovery_frame_cnt >= 0) {
1682  const int sei_recovery_frame_cnt = h->sei.recovery_point.recovery_frame_cnt;
1683 
1684  if (h->poc.frame_num != sei_recovery_frame_cnt || sl->slice_type_nos != AV_PICTURE_TYPE_I)
1685  h->valid_recovery_point = 1;
1686 
1687  if ( h->recovery_frame < 0
1688  || av_mod_uintp2(h->recovery_frame - h->poc.frame_num, h->ps.sps->log2_max_frame_num) > sei_recovery_frame_cnt) {
1689  h->recovery_frame = av_mod_uintp2(h->poc.frame_num + sei_recovery_frame_cnt, h->ps.sps->log2_max_frame_num);
1690 
1691  if (!h->valid_recovery_point)
1692  h->recovery_frame = h->poc.frame_num;
1693  }
1694  }
1695 
1696  h->cur_pic_ptr->f->key_frame |= (nal->type == H264_NAL_IDR_SLICE);
1697 
1698  if (nal->type == H264_NAL_IDR_SLICE ||
1699  (h->recovery_frame == h->poc.frame_num && nal->ref_idc)) {
1700  h->recovery_frame = -1;
1701  h->cur_pic_ptr->recovered = 1;
1702  }
1703  // If we have an IDR, all frames after it in decoded order are
1704  // "recovered".
1705  if (nal->type == H264_NAL_IDR_SLICE)
1706  h->frame_recovered |= FRAME_RECOVERED_IDR;
1707 #if 1
1708  h->cur_pic_ptr->recovered |= h->frame_recovered;
1709 #else
1710  h->cur_pic_ptr->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_IDR);
1711 #endif
1712 
1713  /* Set the frame properties/side data. Only done for the second field in
1714  * field coded frames, since some SEI information is present for each field
1715  * and is merged by the SEI parsing code. */
1716  if (!FIELD_PICTURE(h) || !h->first_field || h->missing_fields > 1) {
1718  if (ret < 0)
1719  return ret;
1720 
1722  if (ret < 0)
1723  return ret;
1724  }
1725 
1726  return 0;
1727 }
1728 
1730  const H2645NAL *nal)
1731 {
1732  const SPS *sps;
1733  const PPS *pps;
1734  int ret;
1735  unsigned int slice_type, tmp, i;
1736  int field_pic_flag, bottom_field_flag;
1737  int first_slice = sl == h->slice_ctx && !h->current_slice;
1738  int picture_structure;
1739 
1740  if (first_slice)
1741  av_assert0(!h->setup_finished);
1742 
1743  sl->first_mb_addr = get_ue_golomb_long(&sl->gb);
1744 
1745  slice_type = get_ue_golomb_31(&sl->gb);
1746  if (slice_type > 9) {
1747  av_log(h->avctx, AV_LOG_ERROR,
1748  "slice type %d too large at %d\n",
1749  slice_type, sl->first_mb_addr);
1750  return AVERROR_INVALIDDATA;
1751  }
1752  if (slice_type > 4) {
1753  slice_type -= 5;
1754  sl->slice_type_fixed = 1;
1755  } else
1756  sl->slice_type_fixed = 0;
1757 
1758  slice_type = ff_h264_golomb_to_pict_type[slice_type];
1759  sl->slice_type = slice_type;
1760  sl->slice_type_nos = slice_type & 3;
1761 
1762  if (nal->type == H264_NAL_IDR_SLICE &&
1764  av_log(h->avctx, AV_LOG_ERROR, "A non-intra slice in an IDR NAL unit.\n");
1765  return AVERROR_INVALIDDATA;
1766  }
1767 
1768  sl->pps_id = get_ue_golomb(&sl->gb);
1769  if (sl->pps_id >= MAX_PPS_COUNT) {
1770  av_log(h->avctx, AV_LOG_ERROR, "pps_id %u out of range\n", sl->pps_id);
1771  return AVERROR_INVALIDDATA;
1772  }
1773  if (!h->ps.pps_list[sl->pps_id]) {
1774  av_log(h->avctx, AV_LOG_ERROR,
1775  "non-existing PPS %u referenced\n",
1776  sl->pps_id);
1777  return AVERROR_INVALIDDATA;
1778  }
1779  pps = (const PPS*)h->ps.pps_list[sl->pps_id]->data;
1780 
1781  if (!h->ps.sps_list[pps->sps_id]) {
1782  av_log(h->avctx, AV_LOG_ERROR,
1783  "non-existing SPS %u referenced\n", pps->sps_id);
1784  return AVERROR_INVALIDDATA;
1785  }
1786  sps = (const SPS*)h->ps.sps_list[pps->sps_id]->data;
1787 
1788  sl->frame_num = get_bits(&sl->gb, sps->log2_max_frame_num);
1789  if (!first_slice) {
1790  if (h->poc.frame_num != sl->frame_num) {
1791  av_log(h->avctx, AV_LOG_ERROR, "Frame num change from %d to %d\n",
1792  h->poc.frame_num, sl->frame_num);
1793  return AVERROR_INVALIDDATA;
1794  }
1795  }
1796 
1797  sl->mb_mbaff = 0;
1798 
1799  if (sps->frame_mbs_only_flag) {
1800  picture_structure = PICT_FRAME;
1801  } else {
1802  if (!sps->direct_8x8_inference_flag && slice_type == AV_PICTURE_TYPE_B) {
1803  av_log(h->avctx, AV_LOG_ERROR, "This stream was generated by a broken encoder, invalid 8x8 inference\n");
1804  return -1;
1805  }
1806  field_pic_flag = get_bits1(&sl->gb);
1807  if (field_pic_flag) {
1808  bottom_field_flag = get_bits1(&sl->gb);
1809  picture_structure = PICT_TOP_FIELD + bottom_field_flag;
1810  } else {
1811  picture_structure = PICT_FRAME;
1812  }
1813  }
1814  sl->picture_structure = picture_structure;
1815  sl->mb_field_decoding_flag = picture_structure != PICT_FRAME;
1816 
1817  if (picture_structure == PICT_FRAME) {
1818  sl->curr_pic_num = sl->frame_num;
1819  sl->max_pic_num = 1 << sps->log2_max_frame_num;
1820  } else {
1821  sl->curr_pic_num = 2 * sl->frame_num + 1;
1822  sl->max_pic_num = 1 << (sps->log2_max_frame_num + 1);
1823  }
1824 
1825  if (nal->type == H264_NAL_IDR_SLICE)
1826  get_ue_golomb_long(&sl->gb); /* idr_pic_id */
1827 
1828  if (sps->poc_type == 0) {
1829  sl->poc_lsb = get_bits(&sl->gb, sps->log2_max_poc_lsb);
1830 
1831  if (pps->pic_order_present == 1 && picture_structure == PICT_FRAME)
1832  sl->delta_poc_bottom = get_se_golomb(&sl->gb);
1833  }
1834 
1835  if (sps->poc_type == 1 && !sps->delta_pic_order_always_zero_flag) {
1836  sl->delta_poc[0] = get_se_golomb(&sl->gb);
1837 
1838  if (pps->pic_order_present == 1 && picture_structure == PICT_FRAME)
1839  sl->delta_poc[1] = get_se_golomb(&sl->gb);
1840  }
1841 
1842  sl->redundant_pic_count = 0;
1843  if (pps->redundant_pic_cnt_present)
1844  sl->redundant_pic_count = get_ue_golomb(&sl->gb);
1845 
1846  if (sl->slice_type_nos == AV_PICTURE_TYPE_B)
1847  sl->direct_spatial_mv_pred = get_bits1(&sl->gb);
1848 
1850  &sl->gb, pps, sl->slice_type_nos,
1851  picture_structure, h->avctx);
1852  if (ret < 0)
1853  return ret;
1854 
1855  if (sl->slice_type_nos != AV_PICTURE_TYPE_I) {
1857  if (ret < 0) {
1858  sl->ref_count[1] = sl->ref_count[0] = 0;
1859  return ret;
1860  }
1861  }
1862 
1863  sl->pwt.use_weight = 0;
1864  for (i = 0; i < 2; i++) {
1865  sl->pwt.luma_weight_flag[i] = 0;
1866  sl->pwt.chroma_weight_flag[i] = 0;
1867  }
1868  if ((pps->weighted_pred && sl->slice_type_nos == AV_PICTURE_TYPE_P) ||
1869  (pps->weighted_bipred_idc == 1 &&
1872  sl->slice_type_nos, &sl->pwt,
1873  picture_structure, h->avctx);
1874  if (ret < 0)
1875  return ret;
1876  }
1877 
1878  sl->explicit_ref_marking = 0;
1879  if (nal->ref_idc) {
1880  ret = ff_h264_decode_ref_pic_marking(sl, &sl->gb, nal, h->avctx);
1881  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1882  return AVERROR_INVALIDDATA;
1883  }
1884 
1885  if (sl->slice_type_nos != AV_PICTURE_TYPE_I && pps->cabac) {
1886  tmp = get_ue_golomb_31(&sl->gb);
1887  if (tmp > 2) {
1888  av_log(h->avctx, AV_LOG_ERROR, "cabac_init_idc %u overflow\n", tmp);
1889  return AVERROR_INVALIDDATA;
1890  }
1891  sl->cabac_init_idc = tmp;
1892  }
1893 
1894  sl->last_qscale_diff = 0;
1895  tmp = pps->init_qp + (unsigned)get_se_golomb(&sl->gb);
1896  if (tmp > 51 + 6 * (sps->bit_depth_luma - 8)) {
1897  av_log(h->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp);
1898  return AVERROR_INVALIDDATA;
1899  }
1900  sl->qscale = tmp;
1901  sl->chroma_qp[0] = get_chroma_qp(pps, 0, sl->qscale);
1902  sl->chroma_qp[1] = get_chroma_qp(pps, 1, sl->qscale);
1903  // FIXME qscale / qp ... stuff
1904  if (sl->slice_type == AV_PICTURE_TYPE_SP)
1905  get_bits1(&sl->gb); /* sp_for_switch_flag */
1906  if (sl->slice_type == AV_PICTURE_TYPE_SP ||
1908  get_se_golomb(&sl->gb); /* slice_qs_delta */
1909 
1910  sl->deblocking_filter = 1;
1911  sl->slice_alpha_c0_offset = 0;
1912  sl->slice_beta_offset = 0;
1913  if (pps->deblocking_filter_parameters_present) {
1914  tmp = get_ue_golomb_31(&sl->gb);
1915  if (tmp > 2) {
1916  av_log(h->avctx, AV_LOG_ERROR,
1917  "deblocking_filter_idc %u out of range\n", tmp);
1918  return AVERROR_INVALIDDATA;
1919  }
1920  sl->deblocking_filter = tmp;
1921  if (sl->deblocking_filter < 2)
1922  sl->deblocking_filter ^= 1; // 1<->0
1923 
1924  if (sl->deblocking_filter) {
1925  int slice_alpha_c0_offset_div2 = get_se_golomb(&sl->gb);
1926  int slice_beta_offset_div2 = get_se_golomb(&sl->gb);
1927  if (slice_alpha_c0_offset_div2 > 6 ||
1928  slice_alpha_c0_offset_div2 < -6 ||
1929  slice_beta_offset_div2 > 6 ||
1930  slice_beta_offset_div2 < -6) {
1931  av_log(h->avctx, AV_LOG_ERROR,
1932  "deblocking filter parameters %d %d out of range\n",
1933  slice_alpha_c0_offset_div2, slice_beta_offset_div2);
1934  return AVERROR_INVALIDDATA;
1935  }
1936  sl->slice_alpha_c0_offset = slice_alpha_c0_offset_div2 * 2;
1937  sl->slice_beta_offset = slice_beta_offset_div2 * 2;
1938  }
1939  }
1940 
1941  return 0;
1942 }
1943 
1944 /* do all the per-slice initialization needed before we can start decoding the
1945  * actual MBs */
1947  const H2645NAL *nal)
1948 {
1949  int i, j, ret = 0;
1950 
1951  if (h->picture_idr && nal->type != H264_NAL_IDR_SLICE) {
1952  av_log(h->avctx, AV_LOG_ERROR, "Invalid mix of IDR and non-IDR slices\n");
1953  return AVERROR_INVALIDDATA;
1954  }
1955 
1956  av_assert1(h->mb_num == h->mb_width * h->mb_height);
1957  if (sl->first_mb_addr << FIELD_OR_MBAFF_PICTURE(h) >= h->mb_num ||
1958  sl->first_mb_addr >= h->mb_num) {
1959  av_log(h->avctx, AV_LOG_ERROR, "first_mb_in_slice overflow\n");
1960  return AVERROR_INVALIDDATA;
1961  }
1962  sl->resync_mb_x = sl->mb_x = sl->first_mb_addr % h->mb_width;
1963  sl->resync_mb_y = sl->mb_y = (sl->first_mb_addr / h->mb_width) <<
1965  if (h->picture_structure == PICT_BOTTOM_FIELD)
1966  sl->resync_mb_y = sl->mb_y = sl->mb_y + 1;
1967  av_assert1(sl->mb_y < h->mb_height);
1968 
1969  ret = ff_h264_build_ref_list(h, sl);
1970  if (ret < 0)
1971  return ret;
1972 
1973  if (h->ps.pps->weighted_bipred_idc == 2 &&
1975  implicit_weight_table(h, sl, -1);
1976  if (FRAME_MBAFF(h)) {
1977  implicit_weight_table(h, sl, 0);
1978  implicit_weight_table(h, sl, 1);
1979  }
1980  }
1981 
1984  if (!h->setup_finished)
1986 
1987  if (h->avctx->skip_loop_filter >= AVDISCARD_ALL ||
1988  (h->avctx->skip_loop_filter >= AVDISCARD_NONKEY &&
1989  h->nal_unit_type != H264_NAL_IDR_SLICE) ||
1990  (h->avctx->skip_loop_filter >= AVDISCARD_NONINTRA &&
1992  (h->avctx->skip_loop_filter >= AVDISCARD_BIDIR &&
1994  (h->avctx->skip_loop_filter >= AVDISCARD_NONREF &&
1995  nal->ref_idc == 0))
1996  sl->deblocking_filter = 0;
1997 
1998  if (sl->deblocking_filter == 1 && h->nb_slice_ctx > 1) {
1999  if (h->avctx->flags2 & AV_CODEC_FLAG2_FAST) {
2000  /* Cheat slightly for speed:
2001  * Do not bother to deblock across slices. */
2002  sl->deblocking_filter = 2;
2003  } else {
2004  h->postpone_filter = 1;
2005  }
2006  }
2007  sl->qp_thresh = 15 -
2009  FFMAX3(0,
2010  h->ps.pps->chroma_qp_index_offset[0],
2011  h->ps.pps->chroma_qp_index_offset[1]) +
2012  6 * (h->ps.sps->bit_depth_luma - 8);
2013 
2014  sl->slice_num = ++h->current_slice;
2015 
2016  if (sl->slice_num)
2017  h->slice_row[(sl->slice_num-1)&(MAX_SLICES-1)]= sl->resync_mb_y;
2018  if ( h->slice_row[sl->slice_num&(MAX_SLICES-1)] + 3 >= sl->resync_mb_y
2019  && h->slice_row[sl->slice_num&(MAX_SLICES-1)] <= sl->resync_mb_y
2020  && sl->slice_num >= MAX_SLICES) {
2021  //in case of ASO this check needs to be updated depending on how we decide to assign slice numbers in this case
2022  av_log(h->avctx, AV_LOG_WARNING, "Possibly too many slices (%d >= %d), increase MAX_SLICES and recompile if there are artifacts\n", sl->slice_num, MAX_SLICES);
2023  }
2024 
2025  for (j = 0; j < 2; j++) {
2026  int id_list[16];
2027  int *ref2frm = h->ref2frm[sl->slice_num & (MAX_SLICES - 1)][j];
2028  for (i = 0; i < 16; i++) {
2029  id_list[i] = 60;
2030  if (j < sl->list_count && i < sl->ref_count[j] &&
2031  sl->ref_list[j][i].parent->f->buf[0]) {
2032  int k;
2033  AVBuffer *buf = sl->ref_list[j][i].parent->f->buf[0]->buffer;
2034  for (k = 0; k < h->short_ref_count; k++)
2035  if (h->short_ref[k]->f->buf[0]->buffer == buf) {
2036  id_list[i] = k;
2037  break;
2038  }
2039  for (k = 0; k < h->long_ref_count; k++)
2040  if (h->long_ref[k] && h->long_ref[k]->f->buf[0]->buffer == buf) {
2041  id_list[i] = h->short_ref_count + k;
2042  break;
2043  }
2044  }
2045  }
2046 
2047  ref2frm[0] =
2048  ref2frm[1] = -1;
2049  for (i = 0; i < 16; i++)
2050  ref2frm[i + 2] = 4 * id_list[i] + (sl->ref_list[j][i].reference & 3);
2051  ref2frm[18 + 0] =
2052  ref2frm[18 + 1] = -1;
2053  for (i = 16; i < 48; i++)
2054  ref2frm[i + 4] = 4 * id_list[(i - 16) >> 1] +
2055  (sl->ref_list[j][i].reference & 3);
2056  }
2057 
2058  if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
2059  av_log(h->avctx, AV_LOG_DEBUG,
2060  "slice:%d %s mb:%d %c%s%s frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n",
2061  sl->slice_num,
2062  (h->picture_structure == PICT_FRAME ? "F" : h->picture_structure == PICT_TOP_FIELD ? "T" : "B"),
2063  sl->mb_y * h->mb_width + sl->mb_x,
2065  sl->slice_type_fixed ? " fix" : "",
2066  nal->type == H264_NAL_IDR_SLICE ? " IDR" : "",
2067  h->poc.frame_num,
2068  h->cur_pic_ptr->field_poc[0],
2069  h->cur_pic_ptr->field_poc[1],
2070  sl->ref_count[0], sl->ref_count[1],
2071  sl->qscale,
2072  sl->deblocking_filter,
2074  sl->pwt.use_weight,
2075  sl->pwt.use_weight == 1 && sl->pwt.use_weight_chroma ? "c" : "",
2076  sl->slice_type == AV_PICTURE_TYPE_B ? (sl->direct_spatial_mv_pred ? "SPAT" : "TEMP") : "");
2077  }
2078 
2079  return 0;
2080 }
2081 
2083 {
2084  H264SliceContext *sl = h->slice_ctx + h->nb_slice_ctx_queued;
2085  int first_slice = sl == h->slice_ctx && !h->current_slice;
2086  int ret;
2087 
2088  sl->gb = nal->gb;
2089 
2090  ret = h264_slice_header_parse(h, sl, nal);
2091  if (ret < 0)
2092  return ret;
2093 
2094  // discard redundant pictures
2095  if (sl->redundant_pic_count > 0) {
2096  sl->ref_count[0] = sl->ref_count[1] = 0;
2097  return 0;
2098  }
2099 
2100  if (sl->first_mb_addr == 0 || !h->current_slice) {
2101  if (h->setup_finished) {
2102  av_log(h->avctx, AV_LOG_ERROR, "Too many fields\n");
2103  return AVERROR_INVALIDDATA;
2104  }
2105  }
2106 
2107  if (sl->first_mb_addr == 0) { // FIXME better field boundary detection
2108  if (h->current_slice) {
2109  // this slice starts a new field
2110  // first decode any pending queued slices
2111  if (h->nb_slice_ctx_queued) {
2112  H264SliceContext tmp_ctx;
2113 
2115  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
2116  return ret;
2117 
2118  memcpy(&tmp_ctx, h->slice_ctx, sizeof(tmp_ctx));
2119  memcpy(h->slice_ctx, sl, sizeof(tmp_ctx));
2120  memcpy(sl, &tmp_ctx, sizeof(tmp_ctx));
2121  sl = h->slice_ctx;
2122  }
2123 
2124  if (h->cur_pic_ptr && FIELD_PICTURE(h) && h->first_field) {
2125  ret = ff_h264_field_end(h, h->slice_ctx, 1);
2126  if (ret < 0)
2127  return ret;
2128  } else if (h->cur_pic_ptr && !FIELD_PICTURE(h) && !h->first_field && h->nal_unit_type == H264_NAL_IDR_SLICE) {
2129  av_log(h, AV_LOG_WARNING, "Broken frame packetizing\n");
2130  ret = ff_h264_field_end(h, h->slice_ctx, 1);
2131  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
2132  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
2133  h->cur_pic_ptr = NULL;
2134  if (ret < 0)
2135  return ret;
2136  } else
2137  return AVERROR_INVALIDDATA;
2138  }
2139 
2140  if (!h->first_field) {
2141  if (h->cur_pic_ptr && !h->droppable) {
2142  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
2143  h->picture_structure == PICT_BOTTOM_FIELD);
2144  }
2145  h->cur_pic_ptr = NULL;
2146  }
2147  }
2148 
2149  if (!h->current_slice)
2150  av_assert0(sl == h->slice_ctx);
2151 
2152  if (h->current_slice == 0 && !h->first_field) {
2153  if (
2154  (h->avctx->skip_frame >= AVDISCARD_NONREF && !h->nal_ref_idc) ||
2155  (h->avctx->skip_frame >= AVDISCARD_BIDIR && sl->slice_type_nos == AV_PICTURE_TYPE_B) ||
2156  (h->avctx->skip_frame >= AVDISCARD_NONINTRA && sl->slice_type_nos != AV_PICTURE_TYPE_I) ||
2157  (h->avctx->skip_frame >= AVDISCARD_NONKEY && h->nal_unit_type != H264_NAL_IDR_SLICE && h->sei.recovery_point.recovery_frame_cnt < 0) ||
2158  h->avctx->skip_frame >= AVDISCARD_ALL) {
2159  return 0;
2160  }
2161  }
2162 
2163  if (!first_slice) {
2164  const PPS *pps = (const PPS*)h->ps.pps_list[sl->pps_id]->data;
2165 
2166  if (h->ps.pps->sps_id != pps->sps_id ||
2167  h->ps.pps->transform_8x8_mode != pps->transform_8x8_mode /*||
2168  (h->setup_finished && h->ps.pps != pps)*/) {
2169  av_log(h->avctx, AV_LOG_ERROR, "PPS changed between slices\n");
2170  return AVERROR_INVALIDDATA;
2171  }
2172  if (h->ps.sps != (const SPS*)h->ps.sps_list[h->ps.pps->sps_id]->data) {
2173  av_log(h->avctx, AV_LOG_ERROR,
2174  "SPS changed in the middle of the frame\n");
2175  return AVERROR_INVALIDDATA;
2176  }
2177  }
2178 
2179  if (h->current_slice == 0) {
2180  ret = h264_field_start(h, sl, nal, first_slice);
2181  if (ret < 0)
2182  return ret;
2183  } else {
2184  if (h->picture_structure != sl->picture_structure ||
2185  h->droppable != (nal->ref_idc == 0)) {
2186  av_log(h->avctx, AV_LOG_ERROR,
2187  "Changing field mode (%d -> %d) between slices is not allowed\n",
2188  h->picture_structure, sl->picture_structure);
2189  return AVERROR_INVALIDDATA;
2190  } else if (!h->cur_pic_ptr) {
2191  av_log(h->avctx, AV_LOG_ERROR,
2192  "unset cur_pic_ptr on slice %d\n",
2193  h->current_slice + 1);
2194  return AVERROR_INVALIDDATA;
2195  }
2196  }
2197 
2198  ret = h264_slice_init(h, sl, nal);
2199  if (ret < 0)
2200  return ret;
2201 
2202  h->nb_slice_ctx_queued++;
2203 
2204  return 0;
2205 }
2206 
2208 {
2209  switch (sl->slice_type) {
2210  case AV_PICTURE_TYPE_P:
2211  return 0;
2212  case AV_PICTURE_TYPE_B:
2213  return 1;
2214  case AV_PICTURE_TYPE_I:
2215  return 2;
2216  case AV_PICTURE_TYPE_SP:
2217  return 3;
2218  case AV_PICTURE_TYPE_SI:
2219  return 4;
2220  default:
2221  return AVERROR_INVALIDDATA;
2222  }
2223 }
2224 
2226  H264SliceContext *sl,
2227  int mb_type, int top_xy,
2228  int left_xy[LEFT_MBS],
2229  int top_type,
2230  int left_type[LEFT_MBS],
2231  int mb_xy, int list)
2232 {
2233  int b_stride = h->b_stride;
2234  int16_t(*mv_dst)[2] = &sl->mv_cache[list][scan8[0]];
2235  int8_t *ref_cache = &sl->ref_cache[list][scan8[0]];
2236  if (IS_INTER(mb_type) || IS_DIRECT(mb_type)) {
2237  if (USES_LIST(top_type, list)) {
2238  const int b_xy = h->mb2b_xy[top_xy] + 3 * b_stride;
2239  const int b8_xy = 4 * top_xy + 2;
2240  const int *ref2frm = &h->ref2frm[h->slice_table[top_xy] & (MAX_SLICES - 1)][list][(MB_MBAFF(sl) ? 20 : 2)];
2241  AV_COPY128(mv_dst - 1 * 8, h->cur_pic.motion_val[list][b_xy + 0]);
2242  ref_cache[0 - 1 * 8] =
2243  ref_cache[1 - 1 * 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 0]];
2244  ref_cache[2 - 1 * 8] =
2245  ref_cache[3 - 1 * 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 1]];
2246  } else {
2247  AV_ZERO128(mv_dst - 1 * 8);
2248  AV_WN32A(&ref_cache[0 - 1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2249  }
2250 
2251  if (!IS_INTERLACED(mb_type ^ left_type[LTOP])) {
2252  if (USES_LIST(left_type[LTOP], list)) {
2253  const int b_xy = h->mb2b_xy[left_xy[LTOP]] + 3;
2254  const int b8_xy = 4 * left_xy[LTOP] + 1;
2255  const int *ref2frm = &h->ref2frm[h->slice_table[left_xy[LTOP]] & (MAX_SLICES - 1)][list][(MB_MBAFF(sl) ? 20 : 2)];
2256  AV_COPY32(mv_dst - 1 + 0, h->cur_pic.motion_val[list][b_xy + b_stride * 0]);
2257  AV_COPY32(mv_dst - 1 + 8, h->cur_pic.motion_val[list][b_xy + b_stride * 1]);
2258  AV_COPY32(mv_dst - 1 + 16, h->cur_pic.motion_val[list][b_xy + b_stride * 2]);
2259  AV_COPY32(mv_dst - 1 + 24, h->cur_pic.motion_val[list][b_xy + b_stride * 3]);
2260  ref_cache[-1 + 0] =
2261  ref_cache[-1 + 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 2 * 0]];
2262  ref_cache[-1 + 16] =
2263  ref_cache[-1 + 24] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 2 * 1]];
2264  } else {
2265  AV_ZERO32(mv_dst - 1 + 0);
2266  AV_ZERO32(mv_dst - 1 + 8);
2267  AV_ZERO32(mv_dst - 1 + 16);
2268  AV_ZERO32(mv_dst - 1 + 24);
2269  ref_cache[-1 + 0] =
2270  ref_cache[-1 + 8] =
2271  ref_cache[-1 + 16] =
2272  ref_cache[-1 + 24] = LIST_NOT_USED;
2273  }
2274  }
2275  }
2276 
2277  if (!USES_LIST(mb_type, list)) {
2278  fill_rectangle(mv_dst, 4, 4, 8, pack16to32(0, 0), 4);
2279  AV_WN32A(&ref_cache[0 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2280  AV_WN32A(&ref_cache[1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2281  AV_WN32A(&ref_cache[2 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2282  AV_WN32A(&ref_cache[3 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2283  return;
2284  }
2285 
2286  {
2287  int8_t *ref = &h->cur_pic.ref_index[list][4 * mb_xy];
2288  const int *ref2frm = &h->ref2frm[sl->slice_num & (MAX_SLICES - 1)][list][(MB_MBAFF(sl) ? 20 : 2)];
2289  uint32_t ref01 = (pack16to32(ref2frm[ref[0]], ref2frm[ref[1]]) & 0x00FF00FF) * 0x0101;
2290  uint32_t ref23 = (pack16to32(ref2frm[ref[2]], ref2frm[ref[3]]) & 0x00FF00FF) * 0x0101;
2291  AV_WN32A(&ref_cache[0 * 8], ref01);
2292  AV_WN32A(&ref_cache[1 * 8], ref01);
2293  AV_WN32A(&ref_cache[2 * 8], ref23);
2294  AV_WN32A(&ref_cache[3 * 8], ref23);
2295  }
2296 
2297  {
2298  int16_t(*mv_src)[2] = &h->cur_pic.motion_val[list][4 * sl->mb_x + 4 * sl->mb_y * b_stride];
2299  AV_COPY128(mv_dst + 8 * 0, mv_src + 0 * b_stride);
2300  AV_COPY128(mv_dst + 8 * 1, mv_src + 1 * b_stride);
2301  AV_COPY128(mv_dst + 8 * 2, mv_src + 2 * b_stride);
2302  AV_COPY128(mv_dst + 8 * 3, mv_src + 3 * b_stride);
2303  }
2304 }
2305 
2306 /**
2307  * @return non zero if the loop filter can be skipped
2308  */
2309 static int fill_filter_caches(const H264Context *h, H264SliceContext *sl, int mb_type)
2310 {
2311  const int mb_xy = sl->mb_xy;
2312  int top_xy, left_xy[LEFT_MBS];
2313  int top_type, left_type[LEFT_MBS];
2314  uint8_t *nnz;
2315  uint8_t *nnz_cache;
2316 
2317  top_xy = mb_xy - (h->mb_stride << MB_FIELD(sl));
2318 
2319  left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1;
2320  if (FRAME_MBAFF(h)) {
2321  const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.mb_type[mb_xy - 1]);
2322  const int curr_mb_field_flag = IS_INTERLACED(mb_type);
2323  if (sl->mb_y & 1) {
2324  if (left_mb_field_flag != curr_mb_field_flag)
2325  left_xy[LTOP] -= h->mb_stride;
2326  } else {
2327  if (curr_mb_field_flag)
2328  top_xy += h->mb_stride &
2329  (((h->cur_pic.mb_type[top_xy] >> 7) & 1) - 1);
2330  if (left_mb_field_flag != curr_mb_field_flag)
2331  left_xy[LBOT] += h->mb_stride;
2332  }
2333  }
2334 
2335  sl->top_mb_xy = top_xy;
2336  sl->left_mb_xy[LTOP] = left_xy[LTOP];
2337  sl->left_mb_xy[LBOT] = left_xy[LBOT];
2338  {
2339  /* For sufficiently low qp, filtering wouldn't do anything.
2340  * This is a conservative estimate: could also check beta_offset
2341  * and more accurate chroma_qp. */
2342  int qp_thresh = sl->qp_thresh; // FIXME strictly we should store qp_thresh for each mb of a slice
2343  int qp = h->cur_pic.qscale_table[mb_xy];
2344  if (qp <= qp_thresh &&
2345  (left_xy[LTOP] < 0 ||
2346  ((qp + h->cur_pic.qscale_table[left_xy[LTOP]] + 1) >> 1) <= qp_thresh) &&
2347  (top_xy < 0 ||
2348  ((qp + h->cur_pic.qscale_table[top_xy] + 1) >> 1) <= qp_thresh)) {
2349  if (!FRAME_MBAFF(h))
2350  return 1;
2351  if ((left_xy[LTOP] < 0 ||
2352  ((qp + h->cur_pic.qscale_table[left_xy[LBOT]] + 1) >> 1) <= qp_thresh) &&
2353  (top_xy < h->mb_stride ||
2354  ((qp + h->cur_pic.qscale_table[top_xy - h->mb_stride] + 1) >> 1) <= qp_thresh))
2355  return 1;
2356  }
2357  }
2358 
2359  top_type = h->cur_pic.mb_type[top_xy];
2360  left_type[LTOP] = h->cur_pic.mb_type[left_xy[LTOP]];
2361  left_type[LBOT] = h->cur_pic.mb_type[left_xy[LBOT]];
2362  if (sl->deblocking_filter == 2) {
2363  if (h->slice_table[top_xy] != sl->slice_num)
2364  top_type = 0;
2365  if (h->slice_table[left_xy[LBOT]] != sl->slice_num)
2366  left_type[LTOP] = left_type[LBOT] = 0;
2367  } else {
2368  if (h->slice_table[top_xy] == 0xFFFF)
2369  top_type = 0;
2370  if (h->slice_table[left_xy[LBOT]] == 0xFFFF)
2371  left_type[LTOP] = left_type[LBOT] = 0;
2372  }
2373  sl->top_type = top_type;
2374  sl->left_type[LTOP] = left_type[LTOP];
2375  sl->left_type[LBOT] = left_type[LBOT];
2376 
2377  if (IS_INTRA(mb_type))
2378  return 0;
2379 
2380  fill_filter_caches_inter(h, sl, mb_type, top_xy, left_xy,
2381  top_type, left_type, mb_xy, 0);
2382  if (sl->list_count == 2)
2383  fill_filter_caches_inter(h, sl, mb_type, top_xy, left_xy,
2384  top_type, left_type, mb_xy, 1);
2385 
2386  nnz = h->non_zero_count[mb_xy];
2387  nnz_cache = sl->non_zero_count_cache;
2388  AV_COPY32(&nnz_cache[4 + 8 * 1], &nnz[0]);
2389  AV_COPY32(&nnz_cache[4 + 8 * 2], &nnz[4]);
2390  AV_COPY32(&nnz_cache[4 + 8 * 3], &nnz[8]);
2391  AV_COPY32(&nnz_cache[4 + 8 * 4], &nnz[12]);
2392  sl->cbp = h->cbp_table[mb_xy];
2393 
2394  if (top_type) {
2395  nnz = h->non_zero_count[top_xy];
2396  AV_COPY32(&nnz_cache[4 + 8 * 0], &nnz[3 * 4]);
2397  }
2398 
2399  if (left_type[LTOP]) {
2400  nnz = h->non_zero_count[left_xy[LTOP]];
2401  nnz_cache[3 + 8 * 1] = nnz[3 + 0 * 4];
2402  nnz_cache[3 + 8 * 2] = nnz[3 + 1 * 4];
2403  nnz_cache[3 + 8 * 3] = nnz[3 + 2 * 4];
2404  nnz_cache[3 + 8 * 4] = nnz[3 + 3 * 4];
2405  }
2406 
2407  /* CAVLC 8x8dct requires NNZ values for residual decoding that differ
2408  * from what the loop filter needs */
2409  if (!CABAC(h) && h->ps.pps->transform_8x8_mode) {
2410  if (IS_8x8DCT(top_type)) {
2411  nnz_cache[4 + 8 * 0] =
2412  nnz_cache[5 + 8 * 0] = (h->cbp_table[top_xy] & 0x4000) >> 12;
2413  nnz_cache[6 + 8 * 0] =
2414  nnz_cache[7 + 8 * 0] = (h->cbp_table[top_xy] & 0x8000) >> 12;
2415  }
2416  if (IS_8x8DCT(left_type[LTOP])) {
2417  nnz_cache[3 + 8 * 1] =
2418  nnz_cache[3 + 8 * 2] = (h->cbp_table[left_xy[LTOP]] & 0x2000) >> 12; // FIXME check MBAFF
2419  }
2420  if (IS_8x8DCT(left_type[LBOT])) {
2421  nnz_cache[3 + 8 * 3] =
2422  nnz_cache[3 + 8 * 4] = (h->cbp_table[left_xy[LBOT]] & 0x8000) >> 12; // FIXME check MBAFF
2423  }
2424 
2425  if (IS_8x8DCT(mb_type)) {
2426  nnz_cache[scan8[0]] =
2427  nnz_cache[scan8[1]] =
2428  nnz_cache[scan8[2]] =
2429  nnz_cache[scan8[3]] = (sl->cbp & 0x1000) >> 12;
2430 
2431  nnz_cache[scan8[0 + 4]] =
2432  nnz_cache[scan8[1 + 4]] =
2433  nnz_cache[scan8[2 + 4]] =
2434  nnz_cache[scan8[3 + 4]] = (sl->cbp & 0x2000) >> 12;
2435 
2436  nnz_cache[scan8[0 + 8]] =
2437  nnz_cache[scan8[1 + 8]] =
2438  nnz_cache[scan8[2 + 8]] =
2439  nnz_cache[scan8[3 + 8]] = (sl->cbp & 0x4000) >> 12;
2440 
2441  nnz_cache[scan8[0 + 12]] =
2442  nnz_cache[scan8[1 + 12]] =
2443  nnz_cache[scan8[2 + 12]] =
2444  nnz_cache[scan8[3 + 12]] = (sl->cbp & 0x8000) >> 12;
2445  }
2446  }
2447 
2448  return 0;
2449 }
2450 
2451 static void loop_filter(const H264Context *h, H264SliceContext *sl, int start_x, int end_x)
2452 {
2453  uint8_t *dest_y, *dest_cb, *dest_cr;
2454  int linesize, uvlinesize, mb_x, mb_y;
2455  const int end_mb_y = sl->mb_y + FRAME_MBAFF(h);
2456  const int old_slice_type = sl->slice_type;
2457  const int pixel_shift = h->pixel_shift;
2458  const int block_h = 16 >> h->chroma_y_shift;
2459 
2460  if (h->postpone_filter)
2461  return;
2462 
2463  if (sl->deblocking_filter) {
2464  for (mb_x = start_x; mb_x < end_x; mb_x++)
2465  for (mb_y = end_mb_y - FRAME_MBAFF(h); mb_y <= end_mb_y; mb_y++) {
2466  int mb_xy, mb_type;
2467  mb_xy = sl->mb_xy = mb_x + mb_y * h->mb_stride;
2468  mb_type = h->cur_pic.mb_type[mb_xy];
2469 
2470  if (FRAME_MBAFF(h))
2471  sl->mb_mbaff =
2472  sl->mb_field_decoding_flag = !!IS_INTERLACED(mb_type);
2473 
2474  sl->mb_x = mb_x;
2475  sl->mb_y = mb_y;
2476  dest_y = h->cur_pic.f->data[0] +
2477  ((mb_x << pixel_shift) + mb_y * sl->linesize) * 16;
2478  dest_cb = h->cur_pic.f->data[1] +
2479  (mb_x << pixel_shift) * (8 << CHROMA444(h)) +
2480  mb_y * sl->uvlinesize * block_h;
2481  dest_cr = h->cur_pic.f->data[2] +
2482  (mb_x << pixel_shift) * (8 << CHROMA444(h)) +
2483  mb_y * sl->uvlinesize * block_h;
2484  // FIXME simplify above
2485 
2486  if (MB_FIELD(sl)) {
2487  linesize = sl->mb_linesize = sl->linesize * 2;
2488  uvlinesize = sl->mb_uvlinesize = sl->uvlinesize * 2;
2489  if (mb_y & 1) { // FIXME move out of this function?
2490  dest_y -= sl->linesize * 15;
2491  dest_cb -= sl->uvlinesize * (block_h - 1);
2492  dest_cr -= sl->uvlinesize * (block_h - 1);
2493  }
2494  } else {
2495  linesize = sl->mb_linesize = sl->linesize;
2496  uvlinesize = sl->mb_uvlinesize = sl->uvlinesize;
2497  }
2498  backup_mb_border(h, sl, dest_y, dest_cb, dest_cr, linesize,
2499  uvlinesize, 0);
2500  if (fill_filter_caches(h, sl, mb_type))
2501  continue;
2502  sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0, h->cur_pic.qscale_table[mb_xy]);
2503  sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1, h->cur_pic.qscale_table[mb_xy]);
2504 
2505  if (FRAME_MBAFF(h)) {
2506  ff_h264_filter_mb(h, sl, mb_x, mb_y, dest_y, dest_cb, dest_cr,
2507  linesize, uvlinesize);
2508  } else {
2509  ff_h264_filter_mb_fast(h, sl, mb_x, mb_y, dest_y, dest_cb,
2510  dest_cr, linesize, uvlinesize);
2511  }
2512  }
2513  }
2514  sl->slice_type = old_slice_type;
2515  sl->mb_x = end_x;
2516  sl->mb_y = end_mb_y - FRAME_MBAFF(h);
2517  sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0, sl->qscale);
2518  sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1, sl->qscale);
2519 }
2520 
2522 {
2523  const int mb_xy = sl->mb_x + sl->mb_y * h->mb_stride;
2524  int mb_type = (h->slice_table[mb_xy - 1] == sl->slice_num) ?
2525  h->cur_pic.mb_type[mb_xy - 1] :
2526  (h->slice_table[mb_xy - h->mb_stride] == sl->slice_num) ?
2527  h->cur_pic.mb_type[mb_xy - h->mb_stride] : 0;
2528  sl->mb_mbaff = sl->mb_field_decoding_flag = IS_INTERLACED(mb_type) ? 1 : 0;
2529 }
2530 
2531 /**
2532  * Draw edges and report progress for the last MB row.
2533  */
2535 {
2536  int top = 16 * (sl->mb_y >> FIELD_PICTURE(h));
2537  int pic_height = 16 * h->mb_height >> FIELD_PICTURE(h);
2538  int height = 16 << FRAME_MBAFF(h);
2539  int deblock_border = (16 + 4) << FRAME_MBAFF(h);
2540 
2541  if (sl->deblocking_filter) {
2542  if ((top + height) >= pic_height)
2543  height += deblock_border;
2544  top -= deblock_border;
2545  }
2546 
2547  if (top >= pic_height || (top + height) < 0)
2548  return;
2549 
2550  height = FFMIN(height, pic_height - top);
2551  if (top < 0) {
2552  height = top + height;
2553  top = 0;
2554  }
2555 
2556  ff_h264_draw_horiz_band(h, sl, top, height);
2557 
2558  if (h->droppable || sl->h264->slice_ctx[0].er.error_occurred)
2559  return;
2560 
2561  ff_thread_report_progress(&h->cur_pic_ptr->tf, top + height - 1,
2562  h->picture_structure == PICT_BOTTOM_FIELD);
2563 }
2564 
2566  int startx, int starty,
2567  int endx, int endy, int status)
2568 {
2569  if (!sl->h264->enable_er)
2570  return;
2571 
2572  if (CONFIG_ERROR_RESILIENCE) {
2573  ERContext *er = &sl->h264->slice_ctx[0].er;
2574 
2575  ff_er_add_slice(er, startx, starty, endx, endy, status);
2576  }
2577 }
2578 
2579 static int decode_slice(struct AVCodecContext *avctx, void *arg)
2580 {
2581  H264SliceContext *sl = arg;
2582  const H264Context *h = sl->h264;
2583  int lf_x_start = sl->mb_x;
2584  int orig_deblock = sl->deblocking_filter;
2585  int ret;
2586 
2587  sl->linesize = h->cur_pic_ptr->f->linesize[0];
2588  sl->uvlinesize = h->cur_pic_ptr->f->linesize[1];
2589 
2590  ret = alloc_scratch_buffers(sl, sl->linesize);
2591  if (ret < 0)
2592  return ret;
2593 
2594  sl->mb_skip_run = -1;
2595 
2596  av_assert0(h->block_offset[15] == (4 * ((scan8[15] - scan8[0]) & 7) << h->pixel_shift) + 4 * sl->linesize * ((scan8[15] - scan8[0]) >> 3));
2597 
2598  if (h->postpone_filter)
2599  sl->deblocking_filter = 0;
2600 
2601  sl->is_complex = FRAME_MBAFF(h) || h->picture_structure != PICT_FRAME ||
2602  (CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY));
2603 
2604  if (!(h->avctx->active_thread_type & FF_THREAD_SLICE) && h->picture_structure == PICT_FRAME && h->slice_ctx[0].er.error_status_table) {
2605  const int start_i = av_clip(sl->resync_mb_x + sl->resync_mb_y * h->mb_width, 0, h->mb_num - 1);
2606  if (start_i) {
2607  int prev_status = h->slice_ctx[0].er.error_status_table[h->slice_ctx[0].er.mb_index2xy[start_i - 1]];
2608  prev_status &= ~ VP_START;
2609  if (prev_status != (ER_MV_END | ER_DC_END | ER_AC_END))
2610  h->slice_ctx[0].er.error_occurred = 1;
2611  }
2612  }
2613 
2614  if (h->ps.pps->cabac) {
2615  /* realign */
2616  align_get_bits(&sl->gb);
2617 
2618  /* init cabac */
2620  sl->gb.buffer + get_bits_count(&sl->gb) / 8,
2621  (get_bits_left(&sl->gb) + 7) / 8);
2622  if (ret < 0)
2623  return ret;
2624 
2626 
2627  for (;;) {
2628  // START_TIMER
2629  int ret, eos;
2630  if (sl->mb_x + sl->mb_y * h->mb_width >= sl->next_slice_idx) {
2631  av_log(h->avctx, AV_LOG_ERROR, "Slice overlaps with next at %d\n",
2632  sl->next_slice_idx);
2633  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2634  sl->mb_y, ER_MB_ERROR);
2635  return AVERROR_INVALIDDATA;
2636  }
2637 
2638  ret = ff_h264_decode_mb_cabac(h, sl);
2639  // STOP_TIMER("decode_mb_cabac")
2640 
2641  if (ret >= 0)
2642  ff_h264_hl_decode_mb(h, sl);
2643 
2644  // FIXME optimal? or let mb_decode decode 16x32 ?
2645  if (ret >= 0 && FRAME_MBAFF(h)) {
2646  sl->mb_y++;
2647 
2648  ret = ff_h264_decode_mb_cabac(h, sl);
2649 
2650  if (ret >= 0)
2651  ff_h264_hl_decode_mb(h, sl);
2652  sl->mb_y--;
2653  }
2654  eos = get_cabac_terminate(&sl->cabac);
2655 
2656  if ((h->workaround_bugs & FF_BUG_TRUNCATED) &&
2657  sl->cabac.bytestream > sl->cabac.bytestream_end + 2) {
2658  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x - 1,
2659  sl->mb_y, ER_MB_END);
2660  if (sl->mb_x >= lf_x_start)
2661  loop_filter(h, sl, lf_x_start, sl->mb_x + 1);
2662  goto finish;
2663  }
2664  if (sl->cabac.bytestream > sl->cabac.bytestream_end + 2 )
2665  av_log(h->avctx, AV_LOG_DEBUG, "bytestream overread %"PTRDIFF_SPECIFIER"\n", sl->cabac.bytestream_end - sl->cabac.bytestream);
2666  if (ret < 0 || sl->cabac.bytestream > sl->cabac.bytestream_end + 4) {
2667  av_log(h->avctx, AV_LOG_ERROR,
2668  "error while decoding MB %d %d, bytestream %"PTRDIFF_SPECIFIER"\n",
2669  sl->mb_x, sl->mb_y,
2670  sl->cabac.bytestream_end - sl->cabac.bytestream);
2671  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2672  sl->mb_y, ER_MB_ERROR);
2673  return AVERROR_INVALIDDATA;
2674  }
2675 
2676  if (++sl->mb_x >= h->mb_width) {
2677  loop_filter(h, sl, lf_x_start, sl->mb_x);
2678  sl->mb_x = lf_x_start = 0;
2679  decode_finish_row(h, sl);
2680  ++sl->mb_y;
2681  if (FIELD_OR_MBAFF_PICTURE(h)) {
2682  ++sl->mb_y;
2683  if (FRAME_MBAFF(h) && sl->mb_y < h->mb_height)
2685  }
2686  }
2687 
2688  if (eos || sl->mb_y >= h->mb_height) {
2689  ff_tlog(h->avctx, "slice end %d %d\n",
2690  get_bits_count(&sl->gb), sl->gb.size_in_bits);
2691  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x - 1,
2692  sl->mb_y, ER_MB_END);
2693  if (sl->mb_x > lf_x_start)
2694  loop_filter(h, sl, lf_x_start, sl->mb_x);
2695  goto finish;
2696  }
2697  }
2698  } else {
2699  for (;;) {
2700  int ret;
2701 
2702  if (sl->mb_x + sl->mb_y * h->mb_width >= sl->next_slice_idx) {
2703  av_log(h->avctx, AV_LOG_ERROR, "Slice overlaps with next at %d\n",
2704  sl->next_slice_idx);
2705  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2706  sl->mb_y, ER_MB_ERROR);
2707  return AVERROR_INVALIDDATA;
2708  }
2709 
2710  ret = ff_h264_decode_mb_cavlc(h, sl);
2711 
2712  if (ret >= 0)
2713  ff_h264_hl_decode_mb(h, sl);
2714 
2715  // FIXME optimal? or let mb_decode decode 16x32 ?
2716  if (ret >= 0 && FRAME_MBAFF(h)) {
2717  sl->mb_y++;
2718  ret = ff_h264_decode_mb_cavlc(h, sl);
2719 
2720  if (ret >= 0)
2721  ff_h264_hl_decode_mb(h, sl);
2722  sl->mb_y--;
2723  }
2724 
2725  if (ret < 0) {
2726  av_log(h->avctx, AV_LOG_ERROR,
2727  "error while decoding MB %d %d\n", sl->mb_x, sl->mb_y);
2728  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2729  sl->mb_y, ER_MB_ERROR);
2730  return ret;
2731  }
2732 
2733  if (++sl->mb_x >= h->mb_width) {
2734  loop_filter(h, sl, lf_x_start, sl->mb_x);
2735  sl->mb_x = lf_x_start = 0;
2736  decode_finish_row(h, sl);
2737  ++sl->mb_y;
2738  if (FIELD_OR_MBAFF_PICTURE(h)) {
2739  ++sl->mb_y;
2740  if (FRAME_MBAFF(h) && sl->mb_y < h->mb_height)
2742  }
2743  if (sl->mb_y >= h->mb_height) {
2744  ff_tlog(h->avctx, "slice end %d %d\n",
2745  get_bits_count(&sl->gb), sl->gb.size_in_bits);
2746 
2747  if ( get_bits_left(&sl->gb) == 0
2748  || get_bits_left(&sl->gb) > 0 && !(h->avctx->err_recognition & AV_EF_AGGRESSIVE)) {
2749  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
2750  sl->mb_x - 1, sl->mb_y, ER_MB_END);
2751 
2752  goto finish;
2753  } else {
2754  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
2755  sl->mb_x, sl->mb_y, ER_MB_END);
2756 
2757  return AVERROR_INVALIDDATA;
2758  }
2759  }
2760  }
2761 
2762  if (get_bits_left(&sl->gb) <= 0 && sl->mb_skip_run <= 0) {
2763  ff_tlog(h->avctx, "slice end %d %d\n",
2764  get_bits_count(&sl->gb), sl->gb.size_in_bits);
2765 
2766  if (get_bits_left(&sl->gb) == 0) {
2767  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
2768  sl->mb_x - 1, sl->mb_y, ER_MB_END);
2769  if (sl->mb_x > lf_x_start)
2770  loop_filter(h, sl, lf_x_start, sl->mb_x);
2771 
2772  goto finish;
2773  } else {
2774  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2775  sl->mb_y, ER_MB_ERROR);
2776 
2777  return AVERROR_INVALIDDATA;
2778  }
2779  }
2780  }
2781  }
2782 
2783 finish:
2784  sl->deblocking_filter = orig_deblock;
2785  return 0;
2786 }
2787 
2788 /**
2789  * Call decode_slice() for each context.
2790  *
2791  * @param h h264 master context
2792  */
2794 {
2795  AVCodecContext *const avctx = h->avctx;
2796  H264SliceContext *sl;
2797  int context_count = h->nb_slice_ctx_queued;
2798  int ret = 0;
2799  int i, j;
2800 
2801  h->slice_ctx[0].next_slice_idx = INT_MAX;
2802 
2803  if (h->avctx->hwaccel || context_count < 1)
2804  return 0;
2805 
2806  av_assert0(context_count && h->slice_ctx[context_count - 1].mb_y < h->mb_height);
2807 
2808  if (context_count == 1) {
2809 
2810  h->slice_ctx[0].next_slice_idx = h->mb_width * h->mb_height;
2811  h->postpone_filter = 0;
2812 
2813  ret = decode_slice(avctx, &h->slice_ctx[0]);
2814  h->mb_y = h->slice_ctx[0].mb_y;
2815  if (ret < 0)
2816  goto finish;
2817  } else {
2818  av_assert0(context_count > 0);
2819  for (i = 0; i < context_count; i++) {
2820  int next_slice_idx = h->mb_width * h->mb_height;
2821  int slice_idx;
2822 
2823  sl = &h->slice_ctx[i];
2824  if (CONFIG_ERROR_RESILIENCE) {
2825  sl->er.error_count = 0;
2826  }
2827 
2828  /* make sure none of those slices overlap */
2829  slice_idx = sl->mb_y * h->mb_width + sl->mb_x;
2830  for (j = 0; j < context_count; j++) {
2831  H264SliceContext *sl2 = &h->slice_ctx[j];
2832  int slice_idx2 = sl2->mb_y * h->mb_width + sl2->mb_x;
2833 
2834  if (i == j || slice_idx2 < slice_idx)
2835  continue;
2836  next_slice_idx = FFMIN(next_slice_idx, slice_idx2);
2837  }
2838  sl->next_slice_idx = next_slice_idx;
2839  }
2840 
2841  avctx->execute(avctx, decode_slice, h->slice_ctx,
2842  NULL, context_count, sizeof(h->slice_ctx[0]));
2843 
2844  /* pull back stuff from slices to master context */
2845  sl = &h->slice_ctx[context_count - 1];
2846  h->mb_y = sl->mb_y;
2847  if (CONFIG_ERROR_RESILIENCE) {
2848  for (i = 1; i < context_count; i++)
2849  h->slice_ctx[0].er.error_count += h->slice_ctx[i].er.error_count;
2850  }
2851 
2852  if (h->postpone_filter) {
2853  h->postpone_filter = 0;
2854 
2855  for (i = 0; i < context_count; i++) {
2856  int y_end, x_end;
2857 
2858  sl = &h->slice_ctx[i];
2859  y_end = FFMIN(sl->mb_y + 1, h->mb_height);
2860  x_end = (sl->mb_y >= h->mb_height) ? h->mb_width : sl->mb_x;
2861 
2862  for (j = sl->resync_mb_y; j < y_end; j += 1 + FIELD_OR_MBAFF_PICTURE(h)) {
2863  sl->mb_y = j;
2864  loop_filter(h, sl, j > sl->resync_mb_y ? 0 : sl->resync_mb_x,
2865  j == y_end - 1 ? x_end : h->mb_width);
2866  }
2867  }
2868  }
2869  }
2870 
2871 finish:
2872  h->nb_slice_ctx_queued = 0;
2873  return ret;
2874 }
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:39
er_add_slice
static void er_add_slice(H264SliceContext *sl, int startx, int starty, int endx, int endy, int status)
Definition: h264_slice.c:2565
ff_h264_filter_mb_fast
void ff_h264_filter_mb_fast(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
Definition: h264_loopfilter.c:418
h264_slice_header_init
static int h264_slice_header_init(H264Context *h)
Definition: h264_slice.c:917
implicit_weight_table
static void implicit_weight_table(const H264Context *h, H264SliceContext *sl, int field)
Initialize implicit_weight table.
Definition: h264_slice.c:664
H264SliceContext::mb_xy
int mb_xy
Definition: h264dec.h:231
ff_h264_unref_picture
void ff_h264_unref_picture(H264Context *h, H264Picture *pic)
Definition: h264_picture.c:45
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:235
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
H264SliceContext::ref_cache
int8_t ref_cache[2][5 *8]
Definition: h264dec.h:300
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
ff_h264_free_tables
void ff_h264_free_tables(H264Context *h)
Definition: h264dec.c:137
H264SEIDisplayOrientation::hflip
int hflip
Definition: h264_sei.h:156
AV_STEREO3D_VIEW_LEFT
@ AV_STEREO3D_VIEW_LEFT
Frame contains only the left view.
Definition: stereo3d.h:156
h264_init_ps
static int h264_init_ps(H264Context *h, const H264SliceContext *sl, int first_slice)
Definition: h264_slice.c:1017
H264SliceContext::max_pic_num
int max_pic_num
Definition: h264dec.h:331
H264SliceContext::nb_mmco
int nb_mmco
Definition: h264dec.h:323
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
CHROMA422
#define CHROMA422(h)
Definition: h264dec.h:98
FF_BUG_TRUNCATED
#define FF_BUG_TRUNCATED
Definition: avcodec.h:2613
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
cabac.h
AV_STEREO3D_SIDEBYSIDE_QUINCUNX
@ AV_STEREO3D_SIDEBYSIDE_QUINCUNX
Views are next to each other, but when upscaling apply a checkerboard pattern.
Definition: stereo3d.h:117
H264Picture::poc
int poc
frame POC
Definition: h264dec.h:148
h264_export_frame_props
static int h264_export_frame_props(H264Context *h)
Definition: h264_slice.c:1142
H264Picture::f
AVFrame * f
Definition: h264dec.h:129
out
FILE * out
Definition: movenc.c:54
cb
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:112
zigzag_scan8x8_cavlc
static const uint8_t zigzag_scan8x8_cavlc[64+1]
Definition: h264_slice.c:97
ff_thread_can_start_frame
int ff_thread_can_start_frame(AVCodecContext *avctx)
Definition: pthread_frame.c:872
H264Context::slice_ctx
H264SliceContext * slice_ctx
Definition: h264dec.h:350
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:89
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
H264Picture::ref_index
int8_t * ref_index[2]
Definition: h264dec.h:145
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:722
HWACCEL_MAX
#define HWACCEL_MAX
ff_h264_slice_context_init
int ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl)
Init context Allocate buffers which are not shared amongst multiple threads.
Definition: h264dec.c:238
AVFrame::coded_picture_number
int coded_picture_number
picture number in bitstream order
Definition: frame.h:409
MB_MBAFF
#define MB_MBAFF(h)
Definition: h264dec.h:71
H264SliceContext::mvd_table
uint8_t(*[2] mvd_table)[2]
Definition: h264dec.h:313
ff_h264_set_erpic
void ff_h264_set_erpic(ERPicture *dst, H264Picture *src)
Definition: h264_picture.c:131
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
H264_SEI_PIC_STRUCT_TOP_BOTTOM
@ H264_SEI_PIC_STRUCT_TOP_BOTTOM
3: top field, bottom field, in that order
Definition: h264_sei.h:49
count
void INT64 INT64 count
Definition: avisynth_c.h:767
AV_FRAME_DATA_S12M_TIMECODE
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:168
GetBitContext::size_in_bits
int size_in_bits
Definition: get_bits.h:68
H2645NAL::ref_idc
int ref_idc
H.264 only, nal_ref_idc.
Definition: h2645_parse.h:65
predict_field_decoding_flag
static void predict_field_decoding_flag(const H264Context *h, H264SliceContext *sl)
Definition: h264_slice.c:2521
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:26
AVFrame::width
int width
Definition: frame.h:353
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:522
get_ue_golomb
static int get_ue_golomb(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to 8190.
Definition: golomb.h:55
av_display_matrix_flip
void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip)
Flip the input matrix horizontally and/or vertically.
Definition: display.c:65
internal.h
ff_h264_update_thread_context
int ff_h264_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: h264_slice.c:288
alloc_scratch_buffers
static int alloc_scratch_buffers(H264SliceContext *sl, int linesize)
Definition: h264_slice.c:129
AVFrame::top_field_first
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:447
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:470
FRAME_RECOVERED_IDR
#define FRAME_RECOVERED_IDR
We have seen an IDR, so all the following frames in coded order are correctly decodable.
Definition: h264dec.h:517
H264SliceContext::mmco
MMCO mmco[MAX_MMCO_COUNT]
Definition: h264dec.h:322
decode_finish_row
static void decode_finish_row(const H264Context *h, H264SliceContext *sl)
Draw edges and report progress for the last MB row.
Definition: h264_slice.c:2534
H264SliceContext::ref_count
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264dec.h:267
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:387
ff_er_frame_start
void ff_er_frame_start(ERContext *s)
Definition: error_resilience.c:797
H264_SEI_FPA_TYPE_CHECKERBOARD
@ H264_SEI_FPA_TYPE_CHECKERBOARD
Definition: h264_sei.h:61
H264Picture::qscale_table
int8_t * qscale_table
Definition: h264dec.h:133
H264SliceContext::h264
struct H264Context * h264
Definition: h264dec.h:178
H264SliceContext::left_mb_xy
int left_mb_xy[LEFT_MBS]
Definition: h264dec.h:211
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:229
ERContext
Definition: error_resilience.h:53
H264PredWeightTable::use_weight_chroma
int use_weight_chroma
Definition: h264_parse.h:32
av_buffer_allocz
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:83
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:38
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)
Definition: pixfmt.h:497
AV_WN32A
#define AV_WN32A(p, v)
Definition: intreadwrite.h:538
ff_er_add_slice
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
Definition: error_resilience.c:830
av_display_rotation_set
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure counterclockwise rotation by the specified angle...
Definition: display.c:50
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
ff_tlog
#define ff_tlog(ctx,...)
Definition: internal.h:75
mpegvideo.h
H264Picture::ref_index_buf
AVBufferRef * ref_index_buf[2]
Definition: h264dec.h:144
ff_h264_pred_weight_table
int ff_h264_pred_weight_table(GetBitContext *gb, const SPS *sps, const int *ref_count, int slice_type_nos, H264PredWeightTable *pwt, int picture_structure, void *logctx)
Definition: h264_parse.c:27
FRAME_RECOVERED_SEI
#define FRAME_RECOVERED_SEI
Sufficient number of frames have been decoded since a SEI recovery point, so all the following frames...
Definition: h264dec.h:522
H264SliceContext::is_complex
int is_complex
Definition: h264dec.h:238
ER_DC_END
#define ER_DC_END
Definition: error_resilience.h:35
ff_h264_decode_ref_pic_list_reordering
int ff_h264_decode_ref_pic_list_reordering(H264SliceContext *sl, void *logctx)
Definition: h264_refs.c:423
mpegutils.h
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:486
H264Picture::invalid_gap
int invalid_gap
Definition: h264dec.h:162
AV_STEREO3D_VIEW_RIGHT
@ AV_STEREO3D_VIEW_RIGHT
Frame contains only the right view.
Definition: stereo3d.h:161
H264_NAL_IDR_SLICE
@ H264_NAL_IDR_SLICE
Definition: h264.h:39
thread.h
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
ThreadFrame::f
AVFrame * f
Definition: thread.h:35
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:2651
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
H264SliceContext::mb_x
int mb_x
Definition: h264dec.h:230
h264_mvpred.h
H264Picture::frame_num
int frame_num
frame_num (raw frame_num from slice header)
Definition: h264dec.h:149
H264SliceContext::next_slice_idx
int next_slice_idx
Definition: h264dec.h:236
H264SliceContext
Definition: h264dec.h:177
fill_filter_caches_inter
static av_always_inline void fill_filter_caches_inter(const H264Context *h, H264SliceContext *sl, int mb_type, int top_xy, int left_xy[LEFT_MBS], int top_type, int left_type[LEFT_MBS], int mb_xy, int list)
Definition: h264_slice.c:2225
golomb.h
exp golomb vlc stuff
MB_FIELD
#define MB_FIELD(sl)
Definition: h264dec.h:72
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:67
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
ff_h264_filter_mb
void ff_h264_filter_mb(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
Definition: h264_loopfilter.c:718
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(int size, AVBufferRef *(*alloc)(int size))
Allocate and initialize a buffer pool.
Definition: buffer.c:238
H264SliceContext::mv_cache
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
Definition: h264dec.h:299
AV_CODEC_FLAG_OUTPUT_CORRUPT
#define AV_CODEC_FLAG_OUTPUT_CORRUPT
Output even those frames that might be corrupted.
Definition: avcodec.h:858
fmt
const char * fmt
Definition: avisynth_c.h:861
AVHWAccel
Definition: avcodec.h:3649
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:405
finish
static void finish(void)
Definition: movenc.c:345
get_chroma_qp
static av_always_inline int get_chroma_qp(const PPS *pps, int t, int qscale)
Get the chroma qp.
Definition: h264dec.h:681
U
#define U(x)
Definition: vp56_arith.h:37
H264Picture::mmco_reset
int mmco_reset
MMCO_RESET set this 1.
Definition: h264dec.h:150
fail
#define fail()
Definition: checkasm.h:120
copy_picture_range
static void copy_picture_range(H264Picture **to, H264Picture **from, int count, H264Context *new_base, H264Context *old_base)
Definition: h264_slice.c:272
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:55
H264SEIA53Caption
Definition: h264_sei.h:118
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:403
frames
if it could not because there are no more frames
Definition: filter_design.txt:266
h264_select_output_frame
static int h264_select_output_frame(H264Context *h)
Definition: h264_slice.c:1348
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:385
H264SliceContext::er
ERContext er
Definition: h264dec.h:180
USES_LIST
#define USES_LIST(a, list)
Definition: mpegutils.h:99
CABACContext::bytestream
const uint8_t * bytestream
Definition: cabac.h:48
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:373
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2550
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
IN_RANGE
#define IN_RANGE(a, b, size)
Definition: h264_slice.c:265
ff_h264_flush_change
void ff_h264_flush_change(H264Context *h)
Definition: h264dec.c:483
ff_h264qpel_init
av_cold void ff_h264qpel_init(H264QpelContext *c, int bit_depth)
Definition: h264qpel.c:49
h264_frame_start
static int h264_frame_start(H264Context *h)
Definition: h264_slice.c:455
H264SliceContext::deblocking_filter
int deblocking_filter
disable_deblocking_filter_idc with 1 <-> 0
Definition: h264dec.h:193
H264PredWeightTable::luma_log2_weight_denom
int luma_log2_weight_denom
Definition: h264_parse.h:33
H264SliceContext::picture_structure
int picture_structure
Definition: h264dec.h:240
ff_h264_golomb_to_pict_type
const uint8_t ff_h264_golomb_to_pict_type[5]
Definition: h264data.c:37
release_unused_pictures
static void release_unused_pictures(H264Context *h, int remove_current)
Definition: h264_slice.c:116
H264PredWeightTable::use_weight
int use_weight
Definition: h264_parse.h:31
H264_SEI_FPA_TYPE_SIDE_BY_SIDE
@ H264_SEI_FPA_TYPE_SIDE_BY_SIDE
Definition: h264_sei.h:64
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
H264SliceContext::direct_spatial_mv_pred
int direct_spatial_mv_pred
Definition: h264dec.h:251
src
#define src
Definition: vp8dsp.c:254
H264SliceContext::slice_num
int slice_num
Definition: h264dec.h:182
H264_SEI_FPA_TYPE_INTERLEAVE_TEMPORAL
@ H264_SEI_FPA_TYPE_INTERLEAVE_TEMPORAL
Definition: h264_sei.h:66
non_j_pixfmt
static enum AVPixelFormat non_j_pixfmt(enum AVPixelFormat a)
Definition: h264_slice.c:1006
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:390
ff_h264_init_cabac_states
void ff_h264_init_cabac_states(const H264Context *h, H264SliceContext *sl)
Definition: h264_cabac.c:1263
ff_h264_hl_decode_mb
void ff_h264_hl_decode_mb(const H264Context *h, H264SliceContext *sl)
Definition: h264_mb.c:799
avassert.h
AV_STEREO3D_FRAMESEQUENCE
@ AV_STEREO3D_FRAMESEQUENCE
Views are alternated temporally.
Definition: stereo3d.h:92
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
buf
void * buf
Definition: avisynth_c.h:766
ff_color_frame
void ff_color_frame(AVFrame *frame, const int color[4])
Definition: utils.c:422
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:557
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
av_buffer_pool_get
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:334
ff_h264_queue_decode_slice
int ff_h264_queue_decode_slice(H264Context *h, const H2645NAL *nal)
Submit a slice for decoding.
Definition: h264_slice.c:2082
width
#define width
H264Context::DPB
H264Picture DPB[H264_MAX_PICTURE_COUNT]
Definition: h264dec.h:345
AV_STEREO3D_LINES
@ AV_STEREO3D_LINES
Views are packed per line, as if interlaced.
Definition: stereo3d.h:129
FFMAX3
#define FFMAX3(a, b, c)
Definition: common.h:95
stereo3d.h
H264_SEI_FPA_TYPE_TOP_BOTTOM
@ H264_SEI_FPA_TYPE_TOP_BOTTOM
Definition: h264_sei.h:65
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:137
H264SEIA53Caption::buf_ref
AVBufferRef * buf_ref
Definition: h264_sei.h:119
H264PredWeightTable::chroma_log2_weight_denom
int chroma_log2_weight_denom
Definition: h264_parse.h:34
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
AV_ZERO32
#define AV_ZERO32(d)
Definition: intreadwrite.h:629
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1176
FIELD_PICTURE
#define FIELD_PICTURE(h)
Definition: h264dec.h:74
ff_h264_execute_ref_pic_marking
int ff_h264_execute_ref_pic_marking(H264Context *h)
Execute the reference picture marking (memory management control operations).
Definition: h264_refs.c:610
ff_h264_decode_ref_pic_marking
int ff_h264_decode_ref_pic_marking(H264SliceContext *sl, GetBitContext *gb, const H2645NAL *nal, void *logctx)
Definition: h264_refs.c:834
from
const char * from
Definition: jacosubdec.c:65
to
const char * to
Definition: webvttdec.c:34
h264_slice_header_parse
static int h264_slice_header_parse(const H264Context *h, H264SliceContext *sl, const H2645NAL *nal)
Definition: h264_slice.c:1729
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
H264PredWeightTable::chroma_weight_flag
int chroma_weight_flag[2]
7.4.3.2 chroma_weight_lX_flag
Definition: h264_parse.h:36
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
h264data.h
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:384
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
H264Ref::parent
H264Picture * parent
Definition: h264dec.h:174
PICT_TOP_FIELD
#define PICT_TOP_FIELD
Definition: mpegutils.h:37
field_scan8x8_cavlc
static const uint8_t field_scan8x8_cavlc[64+1]
Definition: h264_slice.c:77
H264SliceContext::slice_alpha_c0_offset
int slice_alpha_c0_offset
Definition: h264dec.h:194
IS_INTRA
#define IS_INTRA(x, y)
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
AVFrame::crop_right
size_t crop_right
Definition: frame.h:658
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
H264SliceContext::slice_type
int slice_type
Definition: h264dec.h:183
H264SliceContext::resync_mb_x
int resync_mb_x
Definition: h264dec.h:232
H264Picture::sei_recovery_frame_cnt
int sei_recovery_frame_cnt
Definition: h264dec.h:163
AVDISCARD_BIDIR
@ AVDISCARD_BIDIR
discard all bidirectional frames
Definition: avcodec.h:808
get_se_golomb
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:239
H2645NAL::type
int type
NAL unit type.
Definition: h2645_parse.h:52
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
H264Context::enable_er
int enable_er
Definition: h264dec.h:545
ff_h264_draw_horiz_band
void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height)
Definition: h264dec.c:102
H264SliceContext::curr_pic_num
int curr_pic_num
Definition: h264dec.h:330
int32_t
int32_t
Definition: audio_convert.c:194
arg
const char * arg
Definition: jacosubdec.c:66
AVStereo3D::flags
int flags
Additional information about the frame packing.
Definition: stereo3d.h:185
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
if
if(ret)
Definition: filter_design.txt:179
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: avcodec.h:811
GetBitContext::buffer
const uint8_t * buffer
Definition: get_bits.h:62
alloc_picture
static int alloc_picture(H264Context *h, H264Picture *pic)
Definition: h264_slice.c:188
H264Picture::motion_val_buf
AVBufferRef * motion_val_buf[2]
Definition: h264dec.h:135
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:263
NULL
#define NULL
Definition: coverity.c:32
AV_COPY128
#define AV_COPY128(d, s)
Definition: intreadwrite.h:609
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:125
AV_COPY64
#define AV_COPY64(d, s)
Definition: intreadwrite.h:605
H264SliceContext::edge_emu_buffer
uint8_t * edge_emu_buffer
Definition: h264dec.h:284
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
SPS
Sequence parameter set.
Definition: h264_ps.h:44
TRANSPOSE
#define TRANSPOSE(x)
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
ER_MB_ERROR
#define ER_MB_ERROR
Definition: error_resilience.h:38
ff_h264_decode_mb_cabac
int ff_h264_decode_mb_cabac(const H264Context *h, H264SliceContext *sl)
Decode a macroblock.
Definition: h264_cabac.c:1914
AV_PICTURE_TYPE_SI
@ AV_PICTURE_TYPE_SI
Switching Intra.
Definition: avutil.h:278
H264SliceContext::chroma_qp
int chroma_qp[2]
Definition: h264dec.h:188
AV_CODEC_FLAG2_FAST
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:923
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
av_buffer_pool_uninit
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:275
PPS
Picture parameter set.
Definition: h264_ps.h:109
av_fast_mallocz
void av_fast_mallocz(void *ptr, unsigned int *size, size_t min_size)
Allocate and clear a buffer, reusing the given one if large enough.
Definition: mem.c:505
mathops.h
list
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
Definition: filter_design.txt:25
IS_INTERLACED
#define IS_INTERLACED(a)
Definition: mpegutils.h:83
timer.h
av_frame_new_side_data_from_buf
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
Definition: frame.c:690
ff_thread_get_buffer
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:964
MAX_PPS_COUNT
#define MAX_PPS_COUNT
Definition: h264_ps.h:38
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:388
H264SliceContext::qscale
int qscale
Definition: h264dec.h:187
get_pixel_format
static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
Definition: h264_slice.c:762
fill_filter_caches
static int fill_filter_caches(const H264Context *h, H264SliceContext *sl, int mb_type)
Definition: h264_slice.c:2309
ERContext::error_occurred
int error_occurred
Definition: error_resilience.h:65
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:2705
fp
#define fp
Definition: regdef.h:44
AV_ZERO128
#define AV_ZERO128(d)
Definition: intreadwrite.h:637
init_scan_tables
static void init_scan_tables(H264Context *h)
initialize scan tables
Definition: h264_slice.c:728
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:402
H264SliceContext::top_borders_allocated
int top_borders_allocated[2]
Definition: h264dec.h:288
AV_FRAME_DATA_AFD
@ AV_FRAME_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:89
AV_PICTURE_TYPE_SP
@ AV_PICTURE_TYPE_SP
Switching Predicted.
Definition: avutil.h:279
FIELD_OR_MBAFF_PICTURE
#define FIELD_OR_MBAFF_PICTURE(h)
Definition: h264dec.h:91
H264SliceContext::mb_skip_run
int mb_skip_run
Definition: h264dec.h:237
h264_ps.h
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
H264SliceContext::top_type
int top_type
Definition: h264dec.h:214
AVFrame::crop_bottom
size_t crop_bottom
Definition: frame.h:656
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
H264SliceContext::resync_mb_y
int resync_mb_y
Definition: h264dec.h:233
H264_SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM
@ H264_SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM
6: bottom field, top field, bottom field repeated, in that order
Definition: h264_sei.h:52
DELAYED_PIC_REF
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output.
Definition: diracdec.c:67
H264SEIPictureTiming
Definition: h264_sei.h:81
H264SliceContext::cabac
CABACContext cabac
Cabac.
Definition: h264dec.h:318
H264SliceContext::redundant_pic_count
int redundant_pic_count
Definition: h264dec.h:244
AVFrame::crop_left
size_t crop_left
Definition: frame.h:657
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: avcodec.h:810
ERContext::error_count
atomic_int error_count
Definition: error_resilience.h:64
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:378
ff_zigzag_scan
const uint8_t ff_zigzag_scan[16+1]
Definition: mathtables.c:109
AV_STEREO3D_CHECKERBOARD
@ AV_STEREO3D_CHECKERBOARD
Views are packed in a checkerboard-like structure per pixel.
Definition: stereo3d.h:104
H264Picture::reference
int reference
Definition: h264dec.h:160
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:883
CABAC
#define CABAC(h)
Definition: h264_cabac.c:28
LEFT_MBS
#define LEFT_MBS
Definition: h264dec.h:75
pps
static int FUNC() pps(CodedBitstreamContext *ctx, RWContext *rw, H264RawPPS *current)
Definition: cbs_h264_syntax_template.c:404
H264SEIFramePacking
Definition: h264_sei.h:142
rectangle.h
FF_COMPLIANCE_STRICT
#define FF_COMPLIANCE_STRICT
Strictly conform to all the things in the spec no matter what consequences.
Definition: avcodec.h:2630
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
H264SliceContext::mb_uvlinesize
ptrdiff_t mb_uvlinesize
Definition: h264dec.h:228
VP_START
#define VP_START
< current MB is the first after a resync marker
Definition: error_resilience.h:30
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:392
H264SliceContext::pwt
H264PredWeightTable pwt
Definition: h264dec.h:197
MAX_DELAYED_PIC_COUNT
#define MAX_DELAYED_PIC_COUNT
Definition: h264dec.h:56
H264Picture::tf
ThreadFrame tf
Definition: h264dec.h:130
H264Picture::mb_type
uint32_t * mb_type
Definition: h264dec.h:139
ff_h264_decode_mb_cavlc
int ff_h264_decode_mb_cavlc(const H264Context *h, H264SliceContext *sl)
Decode a macroblock.
Definition: h264_cavlc.c:702
H264_SEI_PIC_STRUCT_BOTTOM_TOP
@ H264_SEI_PIC_STRUCT_BOTTOM_TOP
4: bottom field, top field, in that order
Definition: h264_sei.h:50
H264Picture::recovered
int recovered
picture at IDR or recovery point + recovery count
Definition: h264dec.h:161
H2645NAL::gb
GetBitContext gb
Definition: h2645_parse.h:47
H264SliceContext::top_mb_xy
int top_mb_xy
Definition: h264dec.h:209
H264SliceContext::qp_thresh
int qp_thresh
QP threshold to skip loopfilter.
Definition: h264dec.h:189
H2645NAL
Definition: h2645_parse.h:32
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:394
H264SliceContext::top_borders
uint8_t(*[2] top_borders)[(16 *3) *2]
Definition: h264dec.h:285
AVFrameSideData::data
uint8_t * data
Definition: frame.h:203
h264chroma.h
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:2836
H264SliceContext::cbp
int cbp
Definition: h264dec.h:255
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:368
H264SliceContext::left_type
int left_type[LEFT_MBS]
Definition: h264dec.h:216
ff_h264_direct_ref_list_init
void ff_h264_direct_ref_list_init(const H264Context *const h, H264SliceContext *sl)
Definition: h264_direct.c:121
H264SliceContext::mb_y
int mb_y
Definition: h264dec.h:230
H264PredWeightTable::implicit_weight
int implicit_weight[48][48][2]
Definition: h264_parse.h:40
height
#define height
decode_slice
static int decode_slice(struct AVCodecContext *avctx, void *arg)
Definition: h264_slice.c:2579
H264SliceContext::explicit_ref_marking
int explicit_ref_marking
Definition: h264dec.h:324
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
H264_SEI_FPA_TYPE_INTERLEAVE_COLUMN
@ H264_SEI_FPA_TYPE_INTERLEAVE_COLUMN
Definition: h264_sei.h:62
pt
int pt
Definition: rtp.c:35
H264SliceContext::uvlinesize
ptrdiff_t uvlinesize
Definition: h264dec.h:226
AVBufferRef::buffer
AVBuffer * buffer
Definition: buffer.h:82
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:313
H264SEIDisplayOrientation::anticlockwise_rotation
int anticlockwise_rotation
Definition: h264_sei.h:155
H264SliceContext::slice_type_nos
int slice_type_nos
S free slice type (SI/SP are remapped to I/P)
Definition: h264dec.h:184
H264SliceContext::delta_poc_bottom
int delta_poc_bottom
Definition: h264dec.h:328
AV_STEREO3D_FLAG_INVERT
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:167
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Definition: pixfmt.h:122
FRAME_MBAFF
#define FRAME_MBAFF(h)
Definition: h264dec.h:73
IS_DIRECT
#define IS_DIRECT(a)
Definition: mpegutils.h:84
H264_SEI_PIC_STRUCT_FRAME
@ H264_SEI_PIC_STRUCT_FRAME
0: frame
Definition: h264_sei.h:46
pack16to32
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
Definition: h264dec.h:660
get_cabac_terminate
static int av_unused get_cabac_terminate(CABACContext *c)
Definition: cabac_functions.h:181
H264_SEI_PIC_STRUCT_FRAME_TRIPLING
@ H264_SEI_PIC_STRUCT_FRAME_TRIPLING
8: frame tripling
Definition: h264_sei.h:54
field_scan
static const uint8_t field_scan[16+1]
Definition: h264_slice.c:51
loop_filter
static void loop_filter(const H264Context *h, H264SliceContext *sl, int start_x, int end_x)
Definition: h264_slice.c:2451
ff_init_cabac_decoder
int ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size)
Definition: cabac.c:177
H264SliceContext::mb_mbaff
int mb_mbaff
mb_aff_frame && mb_field_decoding_flag
Definition: h264dec.h:242
field_scan8x8
static const uint8_t field_scan8x8[64+1]
Definition: h264_slice.c:58
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:197
av_get_picture_type_char
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:88
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:282
AVFrame::interlaced_frame
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:442
LIST_NOT_USED
#define LIST_NOT_USED
Definition: h264dec.h:390
H264Picture::field_picture
int field_picture
whether or not picture was encoded in separate fields
Definition: h264dec.h:158
h264dec.h
H264SliceContext::poc_lsb
int poc_lsb
Definition: h264dec.h:327
H264SliceContext::first_mb_addr
unsigned int first_mb_addr
Definition: h264dec.h:234
ff_h264_direct_dist_scale_factor
void ff_h264_direct_dist_scale_factor(const H264Context *const h, H264SliceContext *sl)
Definition: h264_direct.c:62
AVBuffer
A reference counted buffer type.
Definition: buffer_internal.h:37
H264Context
H264Context.
Definition: h264dec.h:337
AVDISCARD_NONINTRA
@ AVDISCARD_NONINTRA
discard all non intra frames
Definition: avcodec.h:809
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
AV_CODEC_FLAG2_SHOW_ALL
#define AV_CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
Definition: avcodec.h:951
AV_FRAME_FLAG_CORRUPT
#define AV_FRAME_FLAG_CORRUPT
The frame data may be corrupted, e.g.
Definition: frame.h:520
H264_SEI_PIC_STRUCT_FRAME_DOUBLING
@ H264_SEI_PIC_STRUCT_FRAME_DOUBLING
7: frame doubling
Definition: h264_sei.h:53
ff_h264chroma_init
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:41
H264SliceContext::frame_num
int frame_num
Definition: h264dec.h:326
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:404
display.h
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:79
ff_h264_execute_decode_slices
int ff_h264_execute_decode_slices(H264Context *h)
Call decode_slice() for each context.
Definition: h264_slice.c:2793
H264SliceContext::mb_linesize
ptrdiff_t mb_linesize
may be equal to s->linesize or s->linesize * 2, for mbaff
Definition: h264dec.h:227
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
av_always_inline
#define av_always_inline
Definition: attributes.h:43
uint8_t
uint8_t
Definition: audio_convert.c:194
cabac_functions.h
H264Picture::hwaccel_priv_buf
AVBufferRef * hwaccel_priv_buf
Definition: h264dec.h:141
tb
#define tb
Definition: regdef.h:68
AV_COPY32
#define AV_COPY32(d, s)
Definition: intreadwrite.h:601
ff_h264_parse_ref_count
int ff_h264_parse_ref_count(int *plist_count, int ref_count[2], GetBitContext *gb, const PPS *pps, int slice_type_nos, int picture_structure, void *logctx)
Definition: h264_parse.c:219
H264_SEI_FPA_TYPE_INTERLEAVE_ROW
@ H264_SEI_FPA_TYPE_INTERLEAVE_ROW
Definition: h264_sei.h:63
ff_h264_alloc_tables
int ff_h264_alloc_tables(H264Context *h)
Allocate tables.
Definition: h264dec.c:180
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:521
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:386
H264SliceContext::list_count
unsigned int list_count
Definition: h264dec.h:268
avcodec.h
av_cmp_q
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
ff_h264dsp_init
av_cold void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
Definition: h264dsp.c:67
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ff_h264_ref_picture
int ff_h264_ref_picture(H264Context *h, H264Picture *dst, H264Picture *src)
Definition: h264_picture.c:66
ret
ret
Definition: filter_design.txt:187
ff_h264_init_poc
int ff_h264_init_poc(int pic_field_poc[2], int *pic_poc, const SPS *sps, H264POCContext *pc, int picture_structure, int nal_ref_idc)
Definition: h264_parse.c:277
ff_h264_get_profile
int ff_h264_get_profile(const SPS *sps)
Compute profile from profile_idc and constraint_set?_flags.
Definition: h264_parse.c:516
AV_STEREO3D_COLUMNS
@ AV_STEREO3D_COLUMNS
Views are packed per column.
Definition: stereo3d.h:141
h264_field_start
static int h264_field_start(H264Context *h, const H264SliceContext *sl, const H2645NAL *nal, int first_slice)
Definition: h264_slice.c:1451
AVStereo3D::type
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:180
H264SliceContext::last_qscale_diff
int last_qscale_diff
Definition: h264dec.h:190
sps
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
Definition: cbs_h264_syntax_template.c:260
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:693
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:391
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen_template.c:38
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:396
ff_set_sar
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:119
H264SliceContext::pps_id
unsigned int pps_id
Definition: h264dec.h:278
H264SliceContext::linesize
ptrdiff_t linesize
Definition: h264dec.h:226
H264SliceContext::slice_beta_offset
int slice_beta_offset
Definition: h264dec.h:195
AVCodecContext
main external API structure.
Definition: avcodec.h:1565
AVFrame::height
int height
Definition: frame.h:353
get_ue_golomb_31
static int get_ue_golomb_31(GetBitContext *gb)
read unsigned exp golomb code, constraint to a max of 31.
Definition: golomb.h:120
MAX_SLICES
#define MAX_SLICES
Definition: dxva2_hevc.c:29
backup_mb_border
static av_always_inline void backup_mb_border(const H264Context *h, H264SliceContext *sl, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int simple)
Definition: h264_slice.c:562
ff_h264_build_ref_list
int ff_h264_build_ref_list(H264Context *h, H264SliceContext *sl)
Definition: h264_refs.c:299
AVCodecContext::execute
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:2864
av_image_copy
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
Definition: imgutils.c:387
H264SliceContext::bipred_scratchpad
uint8_t * bipred_scratchpad
Definition: h264dec.h:283
ff_h264_pred_init
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:411
H264Picture::field_poc
int field_poc[2]
top/bottom POC
Definition: h264dec.h:147
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
error_resilience.h
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AVHWAccel::frame_priv_data_size
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
Definition: avcodec.h:3759
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:827
H264Picture
Definition: h264dec.h:128
find_unused_picture
static int find_unused_picture(H264Context *h)
Definition: h264_slice.c:253
scan8
static const uint8_t scan8[16 *3+3]
Definition: h264dec.h:644
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
h264_slice_init
static int h264_slice_init(H264Context *h, H264SliceContext *sl, const H2645NAL *nal)
Definition: h264_slice.c:1946
H264SEIDisplayOrientation::vflip
int vflip
Definition: h264_sei.h:156
H264SEIDisplayOrientation
Definition: h264_sei.h:153
FF_CODEC_PROPERTY_CLOSED_CAPTIONS
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
Definition: avcodec.h:3229
ff_h264_field_end
int ff_h264_field_end(H264Context *h, H264SliceContext *sl, int in_setup)
Definition: h264_picture.c:154
av_buffer_ref
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
CABACContext::bytestream_end
const uint8_t * bytestream_end
Definition: cabac.h:49
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
init_table_pools
static int init_table_pools(H264Context *h)
Definition: h264_slice.c:161
H264Picture::mb_type_buf
AVBufferRef * mb_type_buf
Definition: h264dec.h:138
H264SliceContext::ref_list
H264Ref ref_list[2][48]
0..15: frame refs, 16..47: mbaff field refs.
Definition: h264dec.h:269
LBOT
#define LBOT
Definition: h264dec.h:77
AV_EF_AGGRESSIVE
#define AV_EF_AGGRESSIVE
consider things that a sane encoder should not do as an error
Definition: avcodec.h:2710
H264SliceContext::non_zero_count_cache
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
Definition: h264dec.h:294
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
tc
#define tc
Definition: regdef.h:69
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
IS_INTER
#define IS_INTER(a)
Definition: mpegutils.h:79
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
get_ue_golomb_long
static unsigned get_ue_golomb_long(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to UINT32_MAX-1.
Definition: golomb.h:105
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:33
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:39
ER_MB_END
#define ER_MB_END
Definition: error_resilience.h:39
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:201
ff_thread_get_format
enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Wrapper around get_format() for frame-multithreaded codecs.
Definition: pthread_frame.c:938
H264_SEI_PIC_STRUCT_BOTTOM_FIELD
@ H264_SEI_PIC_STRUCT_BOTTOM_FIELD
2: bottom field
Definition: h264_sei.h:48
H264Picture::hwaccel_picture_private
void * hwaccel_picture_private
hardware accelerator private data
Definition: h264dec.h:142
ER_MV_END
#define ER_MV_END
Definition: error_resilience.h:36
AVStereo3D::view
enum AVStereo3DView view
Determines which views are packed.
Definition: stereo3d.h:190
H264_SEI_FPA_TYPE_2D
@ H264_SEI_FPA_TYPE_2D
Definition: h264_sei.h:67
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:1592
cr
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:113
AVFrame::crop_top
size_t crop_top
Definition: frame.h:655
H264SliceContext::gb
GetBitContext gb
Definition: h264dec.h:179
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:500
init_dimensions
static int init_dimensions(H264Context *h)
Definition: h264_slice.c:875
H264SliceContext::intra4x4_pred_mode
int8_t * intra4x4_pred_mode
Definition: h264dec.h:206
LTOP
#define LTOP
Definition: h264dec.h:76
h264.h
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
H264SliceContext::edge_emu_buffer_allocated
int edge_emu_buffer_allocated
Definition: h264dec.h:287
REBASE_PICTURE
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
Definition: h264_slice.c:267
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
CHROMA444
#define CHROMA444(h)
Definition: h264dec.h:99
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
ff_h264_get_slice_type
int ff_h264_get_slice_type(const H264SliceContext *sl)
Reconstruct bitstream slice_type.
Definition: h264_slice.c:2207
h
h
Definition: vp9dsp_template.c:2038
H264SliceContext::cabac_init_idc
int cabac_init_idc
Definition: h264dec.h:320
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:397
H264PredWeightTable::luma_weight_flag
int luma_weight_flag[2]
7.4.3.2 luma_weight_lX_flag
Definition: h264_parse.h:35
H264_MAX_PICTURE_COUNT
#define H264_MAX_PICTURE_COUNT
Definition: h264dec.h:52
ER_AC_END
#define ER_AC_END
Definition: error_resilience.h:34
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:176
H264SliceContext::bipred_scratchpad_allocated
int bipred_scratchpad_allocated
Definition: h264dec.h:286
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: avcodec.h:807
H264SliceContext::slice_type_fixed
int slice_type_fixed
Definition: h264dec.h:185
H264Ref::poc
int poc
Definition: h264dec.h:171
IS_8x8DCT
#define IS_8x8DCT(a)
Definition: h264dec.h:104
H264Picture::qscale_table_buf
AVBufferRef * qscale_table_buf
Definition: h264dec.h:132
H264_SEI_PIC_STRUCT_TOP_FIELD
@ H264_SEI_PIC_STRUCT_TOP_FIELD
1: top field
Definition: h264_sei.h:47
H264SliceContext::delta_poc
int delta_poc[2]
Definition: h264dec.h:329
av_color_transfer_name
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
Definition: pixdesc.c:2891
H264Picture::long_ref
int long_ref
1->long term reference 0->short term reference
Definition: h264dec.h:154
H264Ref::reference
int reference
Definition: h264dec.h:170
AVFrame::repeat_pict
int repeat_pict
When decoding, this signals how much the picture must be delayed.
Definition: frame.h:437
H264Picture::motion_val
int16_t(*[2] motion_val)[2]
Definition: h264dec.h:136
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:395
H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP
@ H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP
5: top field, bottom field, top field repeated, in that order
Definition: h264_sei.h:51
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2438
H264SliceContext::mb_field_decoding_flag
int mb_field_decoding_flag
Definition: h264dec.h:241