FFmpeg
h264_slice.c
Go to the documentation of this file.
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... decoder
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * H.264 / AVC / MPEG-4 part10 codec.
25  * @author Michael Niedermayer <michaelni@gmx.at>
26  */
27 
28 #include "config_components.h"
29 
30 #include "libavutil/avassert.h"
31 #include "libavutil/display.h"
33 #include "libavutil/pixdesc.h"
34 #include "libavutil/stereo3d.h"
35 #include "libavutil/timecode.h"
36 #include "internal.h"
37 #include "cabac.h"
38 #include "cabac_functions.h"
39 #include "decode.h"
40 #include "error_resilience.h"
41 #include "avcodec.h"
42 #include "h264.h"
43 #include "h264dec.h"
44 #include "h264data.h"
45 #include "h264chroma.h"
46 #include "h264_ps.h"
47 #include "golomb.h"
48 #include "mathops.h"
49 #include "mpegutils.h"
50 #include "rectangle.h"
51 #include "thread.h"
52 #include "threadframe.h"
53 
54 static const uint8_t field_scan[16+1] = {
55  0 + 0 * 4, 0 + 1 * 4, 1 + 0 * 4, 0 + 2 * 4,
56  0 + 3 * 4, 1 + 1 * 4, 1 + 2 * 4, 1 + 3 * 4,
57  2 + 0 * 4, 2 + 1 * 4, 2 + 2 * 4, 2 + 3 * 4,
58  3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4, 3 + 3 * 4,
59 };
60 
61 static const uint8_t field_scan8x8[64+1] = {
62  0 + 0 * 8, 0 + 1 * 8, 0 + 2 * 8, 1 + 0 * 8,
63  1 + 1 * 8, 0 + 3 * 8, 0 + 4 * 8, 1 + 2 * 8,
64  2 + 0 * 8, 1 + 3 * 8, 0 + 5 * 8, 0 + 6 * 8,
65  0 + 7 * 8, 1 + 4 * 8, 2 + 1 * 8, 3 + 0 * 8,
66  2 + 2 * 8, 1 + 5 * 8, 1 + 6 * 8, 1 + 7 * 8,
67  2 + 3 * 8, 3 + 1 * 8, 4 + 0 * 8, 3 + 2 * 8,
68  2 + 4 * 8, 2 + 5 * 8, 2 + 6 * 8, 2 + 7 * 8,
69  3 + 3 * 8, 4 + 1 * 8, 5 + 0 * 8, 4 + 2 * 8,
70  3 + 4 * 8, 3 + 5 * 8, 3 + 6 * 8, 3 + 7 * 8,
71  4 + 3 * 8, 5 + 1 * 8, 6 + 0 * 8, 5 + 2 * 8,
72  4 + 4 * 8, 4 + 5 * 8, 4 + 6 * 8, 4 + 7 * 8,
73  5 + 3 * 8, 6 + 1 * 8, 6 + 2 * 8, 5 + 4 * 8,
74  5 + 5 * 8, 5 + 6 * 8, 5 + 7 * 8, 6 + 3 * 8,
75  7 + 0 * 8, 7 + 1 * 8, 6 + 4 * 8, 6 + 5 * 8,
76  6 + 6 * 8, 6 + 7 * 8, 7 + 2 * 8, 7 + 3 * 8,
77  7 + 4 * 8, 7 + 5 * 8, 7 + 6 * 8, 7 + 7 * 8,
78 };
79 
80 static const uint8_t field_scan8x8_cavlc[64+1] = {
81  0 + 0 * 8, 1 + 1 * 8, 2 + 0 * 8, 0 + 7 * 8,
82  2 + 2 * 8, 2 + 3 * 8, 2 + 4 * 8, 3 + 3 * 8,
83  3 + 4 * 8, 4 + 3 * 8, 4 + 4 * 8, 5 + 3 * 8,
84  5 + 5 * 8, 7 + 0 * 8, 6 + 6 * 8, 7 + 4 * 8,
85  0 + 1 * 8, 0 + 3 * 8, 1 + 3 * 8, 1 + 4 * 8,
86  1 + 5 * 8, 3 + 1 * 8, 2 + 5 * 8, 4 + 1 * 8,
87  3 + 5 * 8, 5 + 1 * 8, 4 + 5 * 8, 6 + 1 * 8,
88  5 + 6 * 8, 7 + 1 * 8, 6 + 7 * 8, 7 + 5 * 8,
89  0 + 2 * 8, 0 + 4 * 8, 0 + 5 * 8, 2 + 1 * 8,
90  1 + 6 * 8, 4 + 0 * 8, 2 + 6 * 8, 5 + 0 * 8,
91  3 + 6 * 8, 6 + 0 * 8, 4 + 6 * 8, 6 + 2 * 8,
92  5 + 7 * 8, 6 + 4 * 8, 7 + 2 * 8, 7 + 6 * 8,
93  1 + 0 * 8, 1 + 2 * 8, 0 + 6 * 8, 3 + 0 * 8,
94  1 + 7 * 8, 3 + 2 * 8, 2 + 7 * 8, 4 + 2 * 8,
95  3 + 7 * 8, 5 + 2 * 8, 4 + 7 * 8, 5 + 4 * 8,
96  6 + 3 * 8, 6 + 5 * 8, 7 + 3 * 8, 7 + 7 * 8,
97 };
98 
99 // zigzag_scan8x8_cavlc[i] = zigzag_scan8x8[(i/4) + 16*(i%4)]
100 static const uint8_t zigzag_scan8x8_cavlc[64+1] = {
101  0 + 0 * 8, 1 + 1 * 8, 1 + 2 * 8, 2 + 2 * 8,
102  4 + 1 * 8, 0 + 5 * 8, 3 + 3 * 8, 7 + 0 * 8,
103  3 + 4 * 8, 1 + 7 * 8, 5 + 3 * 8, 6 + 3 * 8,
104  2 + 7 * 8, 6 + 4 * 8, 5 + 6 * 8, 7 + 5 * 8,
105  1 + 0 * 8, 2 + 0 * 8, 0 + 3 * 8, 3 + 1 * 8,
106  3 + 2 * 8, 0 + 6 * 8, 4 + 2 * 8, 6 + 1 * 8,
107  2 + 5 * 8, 2 + 6 * 8, 6 + 2 * 8, 5 + 4 * 8,
108  3 + 7 * 8, 7 + 3 * 8, 4 + 7 * 8, 7 + 6 * 8,
109  0 + 1 * 8, 3 + 0 * 8, 0 + 4 * 8, 4 + 0 * 8,
110  2 + 3 * 8, 1 + 5 * 8, 5 + 1 * 8, 5 + 2 * 8,
111  1 + 6 * 8, 3 + 5 * 8, 7 + 1 * 8, 4 + 5 * 8,
112  4 + 6 * 8, 7 + 4 * 8, 5 + 7 * 8, 6 + 7 * 8,
113  0 + 2 * 8, 2 + 1 * 8, 1 + 3 * 8, 5 + 0 * 8,
114  1 + 4 * 8, 2 + 4 * 8, 6 + 0 * 8, 4 + 3 * 8,
115  0 + 7 * 8, 4 + 4 * 8, 7 + 2 * 8, 3 + 6 * 8,
116  5 + 5 * 8, 6 + 5 * 8, 6 + 6 * 8, 7 + 7 * 8,
117 };
118 
119 static void release_unused_pictures(H264Context *h, int remove_current)
120 {
121  int i;
122 
123  /* release non reference frames */
124  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
125  if (h->DPB[i].f->buf[0] && !h->DPB[i].reference &&
126  (remove_current || &h->DPB[i] != h->cur_pic_ptr)) {
127  ff_h264_unref_picture(h, &h->DPB[i]);
128  }
129  }
130 }
131 
132 static int alloc_scratch_buffers(H264SliceContext *sl, int linesize)
133 {
134  const H264Context *h = sl->h264;
135  int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
136 
137  av_fast_malloc(&sl->bipred_scratchpad, &sl->bipred_scratchpad_allocated, 16 * 6 * alloc_size);
138  // edge emu needs blocksize + filter length - 1
139  // (= 21x21 for H.264)
140  av_fast_malloc(&sl->edge_emu_buffer, &sl->edge_emu_buffer_allocated, alloc_size * 2 * 21);
141 
143  h->mb_width * 16 * 3 * sizeof(uint8_t) * 2);
145  h->mb_width * 16 * 3 * sizeof(uint8_t) * 2);
146 
147  if (!sl->bipred_scratchpad || !sl->edge_emu_buffer ||
148  !sl->top_borders[0] || !sl->top_borders[1]) {
151  av_freep(&sl->top_borders[0]);
152  av_freep(&sl->top_borders[1]);
153 
156  sl->top_borders_allocated[0] = 0;
157  sl->top_borders_allocated[1] = 0;
158  return AVERROR(ENOMEM);
159  }
160 
161  return 0;
162 }
163 
165 {
166  const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
167  const int mb_array_size = h->mb_stride * h->mb_height;
168  const int b4_stride = h->mb_width * 4 + 1;
169  const int b4_array_size = b4_stride * h->mb_height * 4;
170 
171  h->qscale_table_pool = av_buffer_pool_init(big_mb_num + h->mb_stride,
173  h->mb_type_pool = av_buffer_pool_init((big_mb_num + h->mb_stride) *
174  sizeof(uint32_t), av_buffer_allocz);
175  h->motion_val_pool = av_buffer_pool_init(2 * (b4_array_size + 4) *
176  sizeof(int16_t), av_buffer_allocz);
177  h->ref_index_pool = av_buffer_pool_init(4 * mb_array_size, av_buffer_allocz);
178 
179  if (!h->qscale_table_pool || !h->mb_type_pool || !h->motion_val_pool ||
180  !h->ref_index_pool) {
181  av_buffer_pool_uninit(&h->qscale_table_pool);
182  av_buffer_pool_uninit(&h->mb_type_pool);
183  av_buffer_pool_uninit(&h->motion_val_pool);
184  av_buffer_pool_uninit(&h->ref_index_pool);
185  return AVERROR(ENOMEM);
186  }
187 
188  return 0;
189 }
190 
192 {
193  int i, ret = 0;
194 
195  av_assert0(!pic->f->data[0]);
196 
197  pic->tf.f = pic->f;
198  ret = ff_thread_get_ext_buffer(h->avctx, &pic->tf,
199  pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
200  if (ret < 0)
201  goto fail;
202 
203  if (pic->needs_fg) {
204  pic->f_grain->format = pic->f->format;
205  pic->f_grain->width = pic->f->width;
206  pic->f_grain->height = pic->f->height;
207  ret = ff_thread_get_buffer(h->avctx, pic->f_grain, 0);
208  if (ret < 0)
209  goto fail;
210  }
211 
212  if (h->avctx->hwaccel) {
213  const AVHWAccel *hwaccel = h->avctx->hwaccel;
215  if (hwaccel->frame_priv_data_size) {
217  if (!pic->hwaccel_priv_buf)
218  return AVERROR(ENOMEM);
220  }
221  }
222  if (CONFIG_GRAY && !h->avctx->hwaccel && h->flags & AV_CODEC_FLAG_GRAY && pic->f->data[2]) {
223  int h_chroma_shift, v_chroma_shift;
225  &h_chroma_shift, &v_chroma_shift);
226 
227  for(i=0; i<AV_CEIL_RSHIFT(pic->f->height, v_chroma_shift); i++) {
228  memset(pic->f->data[1] + pic->f->linesize[1]*i,
229  0x80, AV_CEIL_RSHIFT(pic->f->width, h_chroma_shift));
230  memset(pic->f->data[2] + pic->f->linesize[2]*i,
231  0x80, AV_CEIL_RSHIFT(pic->f->width, h_chroma_shift));
232  }
233  }
234 
235  if (!h->qscale_table_pool) {
237  if (ret < 0)
238  goto fail;
239  }
240 
241  pic->qscale_table_buf = av_buffer_pool_get(h->qscale_table_pool);
242  pic->mb_type_buf = av_buffer_pool_get(h->mb_type_pool);
243  if (!pic->qscale_table_buf || !pic->mb_type_buf)
244  goto fail;
245 
246  pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
247  pic->qscale_table = pic->qscale_table_buf->data + 2 * h->mb_stride + 1;
248 
249  for (i = 0; i < 2; i++) {
250  pic->motion_val_buf[i] = av_buffer_pool_get(h->motion_val_pool);
251  pic->ref_index_buf[i] = av_buffer_pool_get(h->ref_index_pool);
252  if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
253  goto fail;
254 
255  pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
256  pic->ref_index[i] = pic->ref_index_buf[i]->data;
257  }
258 
259  pic->pps_buf = av_buffer_ref(h->ps.pps_ref);
260  if (!pic->pps_buf)
261  goto fail;
262  pic->pps = (const PPS*)pic->pps_buf->data;
263 
264  pic->mb_width = h->mb_width;
265  pic->mb_height = h->mb_height;
266  pic->mb_stride = h->mb_stride;
267 
268  return 0;
269 fail:
270  ff_h264_unref_picture(h, pic);
271  return (ret < 0) ? ret : AVERROR(ENOMEM);
272 }
273 
275 {
276  int i;
277 
278  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
279  if (!h->DPB[i].f->buf[0])
280  return i;
281  }
282  return AVERROR_INVALIDDATA;
283 }
284 
285 
286 #define IN_RANGE(a, b, size) (((void*)(a) >= (void*)(b)) && ((void*)(a) < (void*)((b) + (size))))
287 
288 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
289  (((pic) && (pic) >= (old_ctx)->DPB && \
290  (pic) < (old_ctx)->DPB + H264_MAX_PICTURE_COUNT) ? \
291  &(new_ctx)->DPB[(pic) - (old_ctx)->DPB] : NULL)
292 
293 static void copy_picture_range(H264Picture **to, H264Picture **from, int count,
294  H264Context *new_base,
295  H264Context *old_base)
296 {
297  int i;
298 
299  for (i = 0; i < count; i++) {
300  av_assert1(!from[i] ||
301  IN_RANGE(from[i], old_base, 1) ||
302  IN_RANGE(from[i], old_base->DPB, H264_MAX_PICTURE_COUNT));
303  to[i] = REBASE_PICTURE(from[i], new_base, old_base);
304  }
305 }
306 
308 
310  const AVCodecContext *src)
311 {
312  H264Context *h = dst->priv_data, *h1 = src->priv_data;
313  int inited = h->context_initialized, err = 0;
314  int need_reinit = 0;
315  int i, ret;
316 
317  if (dst == src)
318  return 0;
319 
320  if (inited && !h1->ps.sps)
321  return AVERROR_INVALIDDATA;
322 
323  if (inited &&
324  (h->width != h1->width ||
325  h->height != h1->height ||
326  h->mb_width != h1->mb_width ||
327  h->mb_height != h1->mb_height ||
328  !h->ps.sps ||
329  h->ps.sps->bit_depth_luma != h1->ps.sps->bit_depth_luma ||
330  h->ps.sps->chroma_format_idc != h1->ps.sps->chroma_format_idc ||
331  h->ps.sps->colorspace != h1->ps.sps->colorspace)) {
332  need_reinit = 1;
333  }
334 
335  /* copy block_offset since frame_start may not be called */
336  memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset));
337 
338  // SPS/PPS
339  for (i = 0; i < FF_ARRAY_ELEMS(h->ps.sps_list); i++) {
340  ret = av_buffer_replace(&h->ps.sps_list[i], h1->ps.sps_list[i]);
341  if (ret < 0)
342  return ret;
343  }
344  for (i = 0; i < FF_ARRAY_ELEMS(h->ps.pps_list); i++) {
345  ret = av_buffer_replace(&h->ps.pps_list[i], h1->ps.pps_list[i]);
346  if (ret < 0)
347  return ret;
348  }
349 
350  ret = av_buffer_replace(&h->ps.pps_ref, h1->ps.pps_ref);
351  if (ret < 0)
352  return ret;
353  h->ps.pps = NULL;
354  h->ps.sps = NULL;
355  if (h1->ps.pps_ref) {
356  h->ps.pps = (const PPS*)h->ps.pps_ref->data;
357  h->ps.sps = h->ps.pps->sps;
358  }
359 
360  if (need_reinit || !inited) {
361  h->width = h1->width;
362  h->height = h1->height;
363  h->mb_height = h1->mb_height;
364  h->mb_width = h1->mb_width;
365  h->mb_num = h1->mb_num;
366  h->mb_stride = h1->mb_stride;
367  h->b_stride = h1->b_stride;
368  h->x264_build = h1->x264_build;
369 
370  if (h->context_initialized || h1->context_initialized) {
371  if ((err = h264_slice_header_init(h)) < 0) {
372  av_log(h->avctx, AV_LOG_ERROR, "h264_slice_header_init() failed");
373  return err;
374  }
375  }
376 
377  /* copy block_offset since frame_start may not be called */
378  memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset));
379  }
380 
381  h->avctx->coded_height = h1->avctx->coded_height;
382  h->avctx->coded_width = h1->avctx->coded_width;
383  h->avctx->width = h1->avctx->width;
384  h->avctx->height = h1->avctx->height;
385  h->width_from_caller = h1->width_from_caller;
386  h->height_from_caller = h1->height_from_caller;
387  h->coded_picture_number = h1->coded_picture_number;
388  h->first_field = h1->first_field;
389  h->picture_structure = h1->picture_structure;
390  h->mb_aff_frame = h1->mb_aff_frame;
391  h->droppable = h1->droppable;
392 
393  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
394  ret = ff_h264_replace_picture(h, &h->DPB[i], &h1->DPB[i]);
395  if (ret < 0)
396  return ret;
397  }
398 
399  h->cur_pic_ptr = REBASE_PICTURE(h1->cur_pic_ptr, h, h1);
400  ret = ff_h264_replace_picture(h, &h->cur_pic, &h1->cur_pic);
401  if (ret < 0)
402  return ret;
403 
404  h->enable_er = h1->enable_er;
405  h->workaround_bugs = h1->workaround_bugs;
406  h->droppable = h1->droppable;
407 
408  // extradata/NAL handling
409  h->is_avc = h1->is_avc;
410  h->nal_length_size = h1->nal_length_size;
411 
412  memcpy(&h->poc, &h1->poc, sizeof(h->poc));
413 
414  memcpy(h->short_ref, h1->short_ref, sizeof(h->short_ref));
415  memcpy(h->long_ref, h1->long_ref, sizeof(h->long_ref));
416  memcpy(h->delayed_pic, h1->delayed_pic, sizeof(h->delayed_pic));
417  memcpy(h->last_pocs, h1->last_pocs, sizeof(h->last_pocs));
418 
419  h->next_output_pic = h1->next_output_pic;
420  h->next_outputed_poc = h1->next_outputed_poc;
421  h->poc_offset = h1->poc_offset;
422 
423  memcpy(h->mmco, h1->mmco, sizeof(h->mmco));
424  h->nb_mmco = h1->nb_mmco;
425  h->mmco_reset = h1->mmco_reset;
426  h->explicit_ref_marking = h1->explicit_ref_marking;
427  h->long_ref_count = h1->long_ref_count;
428  h->short_ref_count = h1->short_ref_count;
429 
430  copy_picture_range(h->short_ref, h1->short_ref, 32, h, h1);
431  copy_picture_range(h->long_ref, h1->long_ref, 32, h, h1);
432  copy_picture_range(h->delayed_pic, h1->delayed_pic,
433  FF_ARRAY_ELEMS(h->delayed_pic), h, h1);
434 
435  h->frame_recovered = h1->frame_recovered;
436 
437  ret = av_buffer_replace(&h->sei.a53_caption.buf_ref, h1->sei.a53_caption.buf_ref);
438  if (ret < 0)
439  return ret;
440 
441  for (i = 0; i < h->sei.unregistered.nb_buf_ref; i++)
442  av_buffer_unref(&h->sei.unregistered.buf_ref[i]);
443  h->sei.unregistered.nb_buf_ref = 0;
444 
445  if (h1->sei.unregistered.nb_buf_ref) {
446  ret = av_reallocp_array(&h->sei.unregistered.buf_ref,
447  h1->sei.unregistered.nb_buf_ref,
448  sizeof(*h->sei.unregistered.buf_ref));
449  if (ret < 0)
450  return ret;
451 
452  for (i = 0; i < h1->sei.unregistered.nb_buf_ref; i++) {
453  h->sei.unregistered.buf_ref[i] = av_buffer_ref(h1->sei.unregistered.buf_ref[i]);
454  if (!h->sei.unregistered.buf_ref[i])
455  return AVERROR(ENOMEM);
456  h->sei.unregistered.nb_buf_ref++;
457  }
458  }
459  h->sei.unregistered.x264_build = h1->sei.unregistered.x264_build;
460 
461  if (!h->cur_pic_ptr)
462  return 0;
463 
464  if (!h->droppable) {
466  h->poc.prev_poc_msb = h->poc.poc_msb;
467  h->poc.prev_poc_lsb = h->poc.poc_lsb;
468  }
469  h->poc.prev_frame_num_offset = h->poc.frame_num_offset;
470  h->poc.prev_frame_num = h->poc.frame_num;
471 
472  h->recovery_frame = h1->recovery_frame;
473 
474  return err;
475 }
476 
478  const AVCodecContext *src)
479 {
480  H264Context *h = dst->priv_data;
481  const H264Context *h1 = src->priv_data;
482 
483  h->is_avc = h1->is_avc;
484  h->nal_length_size = h1->nal_length_size;
485 
486  return 0;
487 }
488 
490 {
491  H264Picture *pic;
492  int i, ret;
493  const int pixel_shift = h->pixel_shift;
494 
495  if (!ff_thread_can_start_frame(h->avctx)) {
496  av_log(h->avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
497  return -1;
498  }
499 
501  h->cur_pic_ptr = NULL;
502 
504  if (i < 0) {
505  av_log(h->avctx, AV_LOG_ERROR, "no frame buffer available\n");
506  return i;
507  }
508  pic = &h->DPB[i];
509 
510  pic->reference = h->droppable ? 0 : h->picture_structure;
511  pic->f->coded_picture_number = h->coded_picture_number++;
512  pic->field_picture = h->picture_structure != PICT_FRAME;
513  pic->frame_num = h->poc.frame_num;
514  /*
515  * Zero key_frame here; IDR markings per slice in frame or fields are ORed
516  * in later.
517  * See decode_nal_units().
518  */
519  pic->f->key_frame = 0;
520  pic->mmco_reset = 0;
521  pic->recovered = 0;
522  pic->invalid_gap = 0;
523  pic->sei_recovery_frame_cnt = h->sei.recovery_point.recovery_frame_cnt;
524 
525  pic->f->pict_type = h->slice_ctx[0].slice_type;
526 
527  pic->f->crop_left = h->crop_left;
528  pic->f->crop_right = h->crop_right;
529  pic->f->crop_top = h->crop_top;
530  pic->f->crop_bottom = h->crop_bottom;
531 
532  pic->needs_fg = h->sei.film_grain_characteristics.present && !h->avctx->hwaccel &&
533  !(h->avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN);
534 
535  if ((ret = alloc_picture(h, pic)) < 0)
536  return ret;
537 
538  h->cur_pic_ptr = pic;
539  ff_h264_unref_picture(h, &h->cur_pic);
540  if (CONFIG_ERROR_RESILIENCE) {
541  ff_h264_set_erpic(&h->er.cur_pic, NULL);
542  }
543 
544  if ((ret = ff_h264_ref_picture(h, &h->cur_pic, h->cur_pic_ptr)) < 0)
545  return ret;
546 
547  for (i = 0; i < h->nb_slice_ctx; i++) {
548  h->slice_ctx[i].linesize = h->cur_pic_ptr->f->linesize[0];
549  h->slice_ctx[i].uvlinesize = h->cur_pic_ptr->f->linesize[1];
550  }
551 
552  if (CONFIG_ERROR_RESILIENCE && h->enable_er) {
553  ff_er_frame_start(&h->er);
554  ff_h264_set_erpic(&h->er.last_pic, NULL);
555  ff_h264_set_erpic(&h->er.next_pic, NULL);
556  }
557 
558  for (i = 0; i < 16; i++) {
559  h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
560  h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
561  }
562  for (i = 0; i < 16; i++) {
563  h->block_offset[16 + i] =
564  h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
565  h->block_offset[48 + 16 + i] =
566  h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
567  }
568 
569  /* We mark the current picture as non-reference after allocating it, so
570  * that if we break out due to an error it can be released automatically
571  * in the next ff_mpv_frame_start().
572  */
573  h->cur_pic_ptr->reference = 0;
574 
575  h->cur_pic_ptr->field_poc[0] = h->cur_pic_ptr->field_poc[1] = INT_MAX;
576 
577  h->next_output_pic = NULL;
578 
579  h->postpone_filter = 0;
580 
581  h->mb_aff_frame = h->ps.sps->mb_aff && (h->picture_structure == PICT_FRAME);
582 
583  if (h->sei.unregistered.x264_build >= 0)
584  h->x264_build = h->sei.unregistered.x264_build;
585 
586  assert(h->cur_pic_ptr->long_ref == 0);
587 
588  return 0;
589 }
590 
592  uint8_t *src_y,
593  uint8_t *src_cb, uint8_t *src_cr,
594  int linesize, int uvlinesize,
595  int simple)
596 {
597  uint8_t *top_border;
598  int top_idx = 1;
599  const int pixel_shift = h->pixel_shift;
600  int chroma444 = CHROMA444(h);
601  int chroma422 = CHROMA422(h);
602 
603  src_y -= linesize;
604  src_cb -= uvlinesize;
605  src_cr -= uvlinesize;
606 
607  if (!simple && FRAME_MBAFF(h)) {
608  if (sl->mb_y & 1) {
609  if (!MB_MBAFF(sl)) {
610  top_border = sl->top_borders[0][sl->mb_x];
611  AV_COPY128(top_border, src_y + 15 * linesize);
612  if (pixel_shift)
613  AV_COPY128(top_border + 16, src_y + 15 * linesize + 16);
614  if (simple || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
615  if (chroma444) {
616  if (pixel_shift) {
617  AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
618  AV_COPY128(top_border + 48, src_cb + 15 * uvlinesize + 16);
619  AV_COPY128(top_border + 64, src_cr + 15 * uvlinesize);
620  AV_COPY128(top_border + 80, src_cr + 15 * uvlinesize + 16);
621  } else {
622  AV_COPY128(top_border + 16, src_cb + 15 * uvlinesize);
623  AV_COPY128(top_border + 32, src_cr + 15 * uvlinesize);
624  }
625  } else if (chroma422) {
626  if (pixel_shift) {
627  AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
628  AV_COPY128(top_border + 48, src_cr + 15 * uvlinesize);
629  } else {
630  AV_COPY64(top_border + 16, src_cb + 15 * uvlinesize);
631  AV_COPY64(top_border + 24, src_cr + 15 * uvlinesize);
632  }
633  } else {
634  if (pixel_shift) {
635  AV_COPY128(top_border + 32, src_cb + 7 * uvlinesize);
636  AV_COPY128(top_border + 48, src_cr + 7 * uvlinesize);
637  } else {
638  AV_COPY64(top_border + 16, src_cb + 7 * uvlinesize);
639  AV_COPY64(top_border + 24, src_cr + 7 * uvlinesize);
640  }
641  }
642  }
643  }
644  } else if (MB_MBAFF(sl)) {
645  top_idx = 0;
646  } else
647  return;
648  }
649 
650  top_border = sl->top_borders[top_idx][sl->mb_x];
651  /* There are two lines saved, the line above the top macroblock
652  * of a pair, and the line above the bottom macroblock. */
653  AV_COPY128(top_border, src_y + 16 * linesize);
654  if (pixel_shift)
655  AV_COPY128(top_border + 16, src_y + 16 * linesize + 16);
656 
657  if (simple || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
658  if (chroma444) {
659  if (pixel_shift) {
660  AV_COPY128(top_border + 32, src_cb + 16 * linesize);
661  AV_COPY128(top_border + 48, src_cb + 16 * linesize + 16);
662  AV_COPY128(top_border + 64, src_cr + 16 * linesize);
663  AV_COPY128(top_border + 80, src_cr + 16 * linesize + 16);
664  } else {
665  AV_COPY128(top_border + 16, src_cb + 16 * linesize);
666  AV_COPY128(top_border + 32, src_cr + 16 * linesize);
667  }
668  } else if (chroma422) {
669  if (pixel_shift) {
670  AV_COPY128(top_border + 32, src_cb + 16 * uvlinesize);
671  AV_COPY128(top_border + 48, src_cr + 16 * uvlinesize);
672  } else {
673  AV_COPY64(top_border + 16, src_cb + 16 * uvlinesize);
674  AV_COPY64(top_border + 24, src_cr + 16 * uvlinesize);
675  }
676  } else {
677  if (pixel_shift) {
678  AV_COPY128(top_border + 32, src_cb + 8 * uvlinesize);
679  AV_COPY128(top_border + 48, src_cr + 8 * uvlinesize);
680  } else {
681  AV_COPY64(top_border + 16, src_cb + 8 * uvlinesize);
682  AV_COPY64(top_border + 24, src_cr + 8 * uvlinesize);
683  }
684  }
685  }
686 }
687 
688 /**
689  * Initialize implicit_weight table.
690  * @param field 0/1 initialize the weight for interlaced MBAFF
691  * -1 initializes the rest
692  */
694 {
695  int ref0, ref1, i, cur_poc, ref_start, ref_count0, ref_count1;
696 
697  for (i = 0; i < 2; i++) {
698  sl->pwt.luma_weight_flag[i] = 0;
699  sl->pwt.chroma_weight_flag[i] = 0;
700  }
701 
702  if (field < 0) {
703  if (h->picture_structure == PICT_FRAME) {
704  cur_poc = h->cur_pic_ptr->poc;
705  } else {
706  cur_poc = h->cur_pic_ptr->field_poc[h->picture_structure - 1];
707  }
708  if (sl->ref_count[0] == 1 && sl->ref_count[1] == 1 && !FRAME_MBAFF(h) &&
709  sl->ref_list[0][0].poc + (int64_t)sl->ref_list[1][0].poc == 2LL * cur_poc) {
710  sl->pwt.use_weight = 0;
711  sl->pwt.use_weight_chroma = 0;
712  return;
713  }
714  ref_start = 0;
715  ref_count0 = sl->ref_count[0];
716  ref_count1 = sl->ref_count[1];
717  } else {
718  cur_poc = h->cur_pic_ptr->field_poc[field];
719  ref_start = 16;
720  ref_count0 = 16 + 2 * sl->ref_count[0];
721  ref_count1 = 16 + 2 * sl->ref_count[1];
722  }
723 
724  sl->pwt.use_weight = 2;
725  sl->pwt.use_weight_chroma = 2;
726  sl->pwt.luma_log2_weight_denom = 5;
728 
729  for (ref0 = ref_start; ref0 < ref_count0; ref0++) {
730  int64_t poc0 = sl->ref_list[0][ref0].poc;
731  for (ref1 = ref_start; ref1 < ref_count1; ref1++) {
732  int w = 32;
733  if (!sl->ref_list[0][ref0].parent->long_ref && !sl->ref_list[1][ref1].parent->long_ref) {
734  int poc1 = sl->ref_list[1][ref1].poc;
735  int td = av_clip_int8(poc1 - poc0);
736  if (td) {
737  int tb = av_clip_int8(cur_poc - poc0);
738  int tx = (16384 + (FFABS(td) >> 1)) / td;
739  int dist_scale_factor = (tb * tx + 32) >> 8;
740  if (dist_scale_factor >= -64 && dist_scale_factor <= 128)
741  w = 64 - dist_scale_factor;
742  }
743  }
744  if (field < 0) {
745  sl->pwt.implicit_weight[ref0][ref1][0] =
746  sl->pwt.implicit_weight[ref0][ref1][1] = w;
747  } else {
748  sl->pwt.implicit_weight[ref0][ref1][field] = w;
749  }
750  }
751  }
752 }
753 
754 /**
755  * initialize scan tables
756  */
758 {
759  int i;
760  for (i = 0; i < 16; i++) {
761 #define TRANSPOSE(x) ((x) >> 2) | (((x) << 2) & 0xF)
762  h->zigzag_scan[i] = TRANSPOSE(ff_zigzag_scan[i]);
763  h->field_scan[i] = TRANSPOSE(field_scan[i]);
764 #undef TRANSPOSE
765  }
766  for (i = 0; i < 64; i++) {
767 #define TRANSPOSE(x) ((x) >> 3) | (((x) & 7) << 3)
768  h->zigzag_scan8x8[i] = TRANSPOSE(ff_zigzag_direct[i]);
769  h->zigzag_scan8x8_cavlc[i] = TRANSPOSE(zigzag_scan8x8_cavlc[i]);
770  h->field_scan8x8[i] = TRANSPOSE(field_scan8x8[i]);
771  h->field_scan8x8_cavlc[i] = TRANSPOSE(field_scan8x8_cavlc[i]);
772 #undef TRANSPOSE
773  }
774  if (h->ps.sps->transform_bypass) { // FIXME same ugly
775  memcpy(h->zigzag_scan_q0 , ff_zigzag_scan , sizeof(h->zigzag_scan_q0 ));
776  memcpy(h->zigzag_scan8x8_q0 , ff_zigzag_direct , sizeof(h->zigzag_scan8x8_q0 ));
777  memcpy(h->zigzag_scan8x8_cavlc_q0 , zigzag_scan8x8_cavlc , sizeof(h->zigzag_scan8x8_cavlc_q0));
778  memcpy(h->field_scan_q0 , field_scan , sizeof(h->field_scan_q0 ));
779  memcpy(h->field_scan8x8_q0 , field_scan8x8 , sizeof(h->field_scan8x8_q0 ));
780  memcpy(h->field_scan8x8_cavlc_q0 , field_scan8x8_cavlc , sizeof(h->field_scan8x8_cavlc_q0 ));
781  } else {
782  memcpy(h->zigzag_scan_q0 , h->zigzag_scan , sizeof(h->zigzag_scan_q0 ));
783  memcpy(h->zigzag_scan8x8_q0 , h->zigzag_scan8x8 , sizeof(h->zigzag_scan8x8_q0 ));
784  memcpy(h->zigzag_scan8x8_cavlc_q0 , h->zigzag_scan8x8_cavlc , sizeof(h->zigzag_scan8x8_cavlc_q0));
785  memcpy(h->field_scan_q0 , h->field_scan , sizeof(h->field_scan_q0 ));
786  memcpy(h->field_scan8x8_q0 , h->field_scan8x8 , sizeof(h->field_scan8x8_q0 ));
787  memcpy(h->field_scan8x8_cavlc_q0 , h->field_scan8x8_cavlc , sizeof(h->field_scan8x8_cavlc_q0 ));
788  }
789 }
790 
791 static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
792 {
793 #define HWACCEL_MAX (CONFIG_H264_DXVA2_HWACCEL + \
794  (CONFIG_H264_D3D11VA_HWACCEL * 2) + \
795  CONFIG_H264_NVDEC_HWACCEL + \
796  CONFIG_H264_VAAPI_HWACCEL + \
797  CONFIG_H264_VIDEOTOOLBOX_HWACCEL + \
798  CONFIG_H264_VDPAU_HWACCEL)
799  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmt = pix_fmts;
800  const enum AVPixelFormat *choices = pix_fmts;
801  int i;
802 
803  switch (h->ps.sps->bit_depth_luma) {
804  case 9:
805  if (CHROMA444(h)) {
806  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
807  *fmt++ = AV_PIX_FMT_GBRP9;
808  } else
809  *fmt++ = AV_PIX_FMT_YUV444P9;
810  } else if (CHROMA422(h))
811  *fmt++ = AV_PIX_FMT_YUV422P9;
812  else
813  *fmt++ = AV_PIX_FMT_YUV420P9;
814  break;
815  case 10:
816 #if CONFIG_H264_VIDEOTOOLBOX_HWACCEL
817  if (h->avctx->colorspace != AVCOL_SPC_RGB)
818  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
819 #endif
820  if (CHROMA444(h)) {
821  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
822  *fmt++ = AV_PIX_FMT_GBRP10;
823  } else
824  *fmt++ = AV_PIX_FMT_YUV444P10;
825  } else if (CHROMA422(h))
826  *fmt++ = AV_PIX_FMT_YUV422P10;
827  else
828  *fmt++ = AV_PIX_FMT_YUV420P10;
829  break;
830  case 12:
831  if (CHROMA444(h)) {
832  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
833  *fmt++ = AV_PIX_FMT_GBRP12;
834  } else
835  *fmt++ = AV_PIX_FMT_YUV444P12;
836  } else if (CHROMA422(h))
837  *fmt++ = AV_PIX_FMT_YUV422P12;
838  else
839  *fmt++ = AV_PIX_FMT_YUV420P12;
840  break;
841  case 14:
842  if (CHROMA444(h)) {
843  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
844  *fmt++ = AV_PIX_FMT_GBRP14;
845  } else
846  *fmt++ = AV_PIX_FMT_YUV444P14;
847  } else if (CHROMA422(h))
848  *fmt++ = AV_PIX_FMT_YUV422P14;
849  else
850  *fmt++ = AV_PIX_FMT_YUV420P14;
851  break;
852  case 8:
853 #if CONFIG_H264_VDPAU_HWACCEL
854  *fmt++ = AV_PIX_FMT_VDPAU;
855 #endif
856 #if CONFIG_H264_NVDEC_HWACCEL
857  *fmt++ = AV_PIX_FMT_CUDA;
858 #endif
859 #if CONFIG_H264_VIDEOTOOLBOX_HWACCEL
860  if (h->avctx->colorspace != AVCOL_SPC_RGB)
861  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
862 #endif
863  if (CHROMA444(h)) {
864  if (h->avctx->colorspace == AVCOL_SPC_RGB)
865  *fmt++ = AV_PIX_FMT_GBRP;
866  else if (h->avctx->color_range == AVCOL_RANGE_JPEG)
867  *fmt++ = AV_PIX_FMT_YUVJ444P;
868  else
869  *fmt++ = AV_PIX_FMT_YUV444P;
870  } else if (CHROMA422(h)) {
871  if (h->avctx->color_range == AVCOL_RANGE_JPEG)
872  *fmt++ = AV_PIX_FMT_YUVJ422P;
873  else
874  *fmt++ = AV_PIX_FMT_YUV422P;
875  } else {
876 #if CONFIG_H264_DXVA2_HWACCEL
877  *fmt++ = AV_PIX_FMT_DXVA2_VLD;
878 #endif
879 #if CONFIG_H264_D3D11VA_HWACCEL
880  *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
881  *fmt++ = AV_PIX_FMT_D3D11;
882 #endif
883 #if CONFIG_H264_VAAPI_HWACCEL
884  *fmt++ = AV_PIX_FMT_VAAPI;
885 #endif
886  if (h->avctx->codec->pix_fmts)
887  choices = h->avctx->codec->pix_fmts;
888  else if (h->avctx->color_range == AVCOL_RANGE_JPEG)
889  *fmt++ = AV_PIX_FMT_YUVJ420P;
890  else
891  *fmt++ = AV_PIX_FMT_YUV420P;
892  }
893  break;
894  default:
895  av_log(h->avctx, AV_LOG_ERROR,
896  "Unsupported bit depth %d\n", h->ps.sps->bit_depth_luma);
897  return AVERROR_INVALIDDATA;
898  }
899 
900  *fmt = AV_PIX_FMT_NONE;
901 
902  for (i=0; choices[i] != AV_PIX_FMT_NONE; i++)
903  if (choices[i] == h->avctx->pix_fmt && !force_callback)
904  return choices[i];
905  return ff_thread_get_format(h->avctx, choices);
906 }
907 
908 /* export coded and cropped frame dimensions to AVCodecContext */
910 {
911  const SPS *sps = (const SPS*)h->ps.sps;
912  int cr = sps->crop_right;
913  int cl = sps->crop_left;
914  int ct = sps->crop_top;
915  int cb = sps->crop_bottom;
916  int width = h->width - (cr + cl);
917  int height = h->height - (ct + cb);
918  av_assert0(sps->crop_right + sps->crop_left < (unsigned)h->width);
919  av_assert0(sps->crop_top + sps->crop_bottom < (unsigned)h->height);
920 
921  /* handle container cropping */
922  if (h->width_from_caller > 0 && h->height_from_caller > 0 &&
923  !sps->crop_top && !sps->crop_left &&
924  FFALIGN(h->width_from_caller, 16) == FFALIGN(width, 16) &&
925  FFALIGN(h->height_from_caller, 16) == FFALIGN(height, 16) &&
926  h->width_from_caller <= width &&
927  h->height_from_caller <= height) {
928  width = h->width_from_caller;
929  height = h->height_from_caller;
930  cl = 0;
931  ct = 0;
932  cr = h->width - width;
933  cb = h->height - height;
934  } else {
935  h->width_from_caller = 0;
936  h->height_from_caller = 0;
937  }
938 
939  h->avctx->coded_width = h->width;
940  h->avctx->coded_height = h->height;
941  h->avctx->width = width;
942  h->avctx->height = height;
943  h->crop_right = cr;
944  h->crop_left = cl;
945  h->crop_top = ct;
946  h->crop_bottom = cb;
947 }
948 
950 {
951  const SPS *sps = h->ps.sps;
952  int i, ret;
953 
954  if (!sps) {
956  goto fail;
957  }
958 
959  ff_set_sar(h->avctx, sps->sar);
960  av_pix_fmt_get_chroma_sub_sample(h->avctx->pix_fmt,
961  &h->chroma_x_shift, &h->chroma_y_shift);
962 
963  if (sps->timing_info_present_flag) {
964  int64_t den = sps->time_scale;
965  if (h->x264_build < 44U)
966  den *= 2;
967  av_reduce(&h->avctx->framerate.den, &h->avctx->framerate.num,
968  sps->num_units_in_tick * h->avctx->ticks_per_frame, den, 1 << 30);
969  }
970 
972 
973  h->first_field = 0;
974  h->prev_interlaced_frame = 1;
975 
978  if (ret < 0) {
979  av_log(h->avctx, AV_LOG_ERROR, "Could not allocate memory\n");
980  goto fail;
981  }
982 
983  if (sps->bit_depth_luma < 8 || sps->bit_depth_luma > 14 ||
984  sps->bit_depth_luma == 11 || sps->bit_depth_luma == 13
985  ) {
986  av_log(h->avctx, AV_LOG_ERROR, "Unsupported bit depth %d\n",
987  sps->bit_depth_luma);
989  goto fail;
990  }
991 
992  h->cur_bit_depth_luma =
993  h->avctx->bits_per_raw_sample = sps->bit_depth_luma;
994  h->cur_chroma_format_idc = sps->chroma_format_idc;
995  h->pixel_shift = sps->bit_depth_luma > 8;
996  h->chroma_format_idc = sps->chroma_format_idc;
997  h->bit_depth_luma = sps->bit_depth_luma;
998 
999  ff_h264dsp_init(&h->h264dsp, sps->bit_depth_luma,
1000  sps->chroma_format_idc);
1001  ff_h264chroma_init(&h->h264chroma, sps->bit_depth_chroma);
1002  ff_h264qpel_init(&h->h264qpel, sps->bit_depth_luma);
1003  ff_h264_pred_init(&h->hpc, AV_CODEC_ID_H264, sps->bit_depth_luma,
1004  sps->chroma_format_idc);
1005  ff_videodsp_init(&h->vdsp, sps->bit_depth_luma);
1006 
1007  if (!HAVE_THREADS || !(h->avctx->active_thread_type & FF_THREAD_SLICE)) {
1008  ff_h264_slice_context_init(h, &h->slice_ctx[0]);
1009  } else {
1010  for (i = 0; i < h->nb_slice_ctx; i++) {
1011  H264SliceContext *sl = &h->slice_ctx[i];
1012 
1013  sl->h264 = h;
1014  sl->intra4x4_pred_mode = h->intra4x4_pred_mode + i * 8 * 2 * h->mb_stride;
1015  sl->mvd_table[0] = h->mvd_table[0] + i * 8 * 2 * h->mb_stride;
1016  sl->mvd_table[1] = h->mvd_table[1] + i * 8 * 2 * h->mb_stride;
1017 
1019  }
1020  }
1021 
1022  h->context_initialized = 1;
1023 
1024  return 0;
1025 fail:
1027  h->context_initialized = 0;
1028  return ret;
1029 }
1030 
1032 {
1033  switch (a) {
1037  default:
1038  return a;
1039  }
1040 }
1041 
1042 static int h264_init_ps(H264Context *h, const H264SliceContext *sl, int first_slice)
1043 {
1044  const SPS *sps;
1045  int needs_reinit = 0, must_reinit, ret;
1046 
1047  if (first_slice) {
1048  av_buffer_unref(&h->ps.pps_ref);
1049  h->ps.pps = NULL;
1050  h->ps.pps_ref = av_buffer_ref(h->ps.pps_list[sl->pps_id]);
1051  if (!h->ps.pps_ref)
1052  return AVERROR(ENOMEM);
1053  h->ps.pps = (const PPS*)h->ps.pps_ref->data;
1054  }
1055 
1056  if (h->ps.sps != h->ps.pps->sps) {
1057  h->ps.sps = (const SPS*)h->ps.pps->sps;
1058 
1059  if (h->mb_width != h->ps.sps->mb_width ||
1060  h->mb_height != h->ps.sps->mb_height ||
1061  h->cur_bit_depth_luma != h->ps.sps->bit_depth_luma ||
1062  h->cur_chroma_format_idc != h->ps.sps->chroma_format_idc
1063  )
1064  needs_reinit = 1;
1065 
1066  if (h->bit_depth_luma != h->ps.sps->bit_depth_luma ||
1067  h->chroma_format_idc != h->ps.sps->chroma_format_idc)
1068  needs_reinit = 1;
1069  }
1070  sps = h->ps.sps;
1071 
1072  must_reinit = (h->context_initialized &&
1073  ( 16*sps->mb_width != h->avctx->coded_width
1074  || 16*sps->mb_height != h->avctx->coded_height
1075  || h->cur_bit_depth_luma != sps->bit_depth_luma
1076  || h->cur_chroma_format_idc != sps->chroma_format_idc
1077  || h->mb_width != sps->mb_width
1078  || h->mb_height != sps->mb_height
1079  ));
1080  if (h->avctx->pix_fmt == AV_PIX_FMT_NONE
1081  || (non_j_pixfmt(h->avctx->pix_fmt) != non_j_pixfmt(get_pixel_format(h, 0))))
1082  must_reinit = 1;
1083 
1084  if (first_slice && av_cmp_q(sps->sar, h->avctx->sample_aspect_ratio))
1085  must_reinit = 1;
1086 
1087  if (!h->setup_finished) {
1088  h->avctx->profile = ff_h264_get_profile(sps);
1089  h->avctx->level = sps->level_idc;
1090  h->avctx->refs = sps->ref_frame_count;
1091 
1092  h->mb_width = sps->mb_width;
1093  h->mb_height = sps->mb_height;
1094  h->mb_num = h->mb_width * h->mb_height;
1095  h->mb_stride = h->mb_width + 1;
1096 
1097  h->b_stride = h->mb_width * 4;
1098 
1099  h->chroma_y_shift = sps->chroma_format_idc <= 1; // 400 uses yuv420p
1100 
1101  h->width = 16 * h->mb_width;
1102  h->height = 16 * h->mb_height;
1103 
1104  init_dimensions(h);
1105 
1106  if (sps->video_signal_type_present_flag) {
1107  h->avctx->color_range = sps->full_range > 0 ? AVCOL_RANGE_JPEG
1108  : AVCOL_RANGE_MPEG;
1109  if (sps->colour_description_present_flag) {
1110  if (h->avctx->colorspace != sps->colorspace)
1111  needs_reinit = 1;
1112  h->avctx->color_primaries = sps->color_primaries;
1113  h->avctx->color_trc = sps->color_trc;
1114  h->avctx->colorspace = sps->colorspace;
1115  }
1116  }
1117 
1118  if (h->sei.alternative_transfer.present &&
1119  av_color_transfer_name(h->sei.alternative_transfer.preferred_transfer_characteristics) &&
1120  h->sei.alternative_transfer.preferred_transfer_characteristics != AVCOL_TRC_UNSPECIFIED) {
1121  h->avctx->color_trc = h->sei.alternative_transfer.preferred_transfer_characteristics;
1122  }
1123  }
1124  h->avctx->chroma_sample_location = sps->chroma_location;
1125 
1126  if (!h->context_initialized || must_reinit || needs_reinit) {
1127  int flush_changes = h->context_initialized;
1128  h->context_initialized = 0;
1129  if (sl != h->slice_ctx) {
1130  av_log(h->avctx, AV_LOG_ERROR,
1131  "changing width %d -> %d / height %d -> %d on "
1132  "slice %d\n",
1133  h->width, h->avctx->coded_width,
1134  h->height, h->avctx->coded_height,
1135  h->current_slice + 1);
1136  return AVERROR_INVALIDDATA;
1137  }
1138 
1139  av_assert1(first_slice);
1140 
1141  if (flush_changes)
1143 
1144  if ((ret = get_pixel_format(h, 1)) < 0)
1145  return ret;
1146  h->avctx->pix_fmt = ret;
1147 
1148  av_log(h->avctx, AV_LOG_VERBOSE, "Reinit context to %dx%d, "
1149  "pix_fmt: %s\n", h->width, h->height, av_get_pix_fmt_name(h->avctx->pix_fmt));
1150 
1151  if ((ret = h264_slice_header_init(h)) < 0) {
1152  av_log(h->avctx, AV_LOG_ERROR,
1153  "h264_slice_header_init() failed\n");
1154  return ret;
1155  }
1156  }
1157 
1158  return 0;
1159 }
1160 
1162 {
1163  const SPS *sps = h->ps.sps;
1164  H264Picture *cur = h->cur_pic_ptr;
1165  AVFrame *out = cur->f;
1166 
1167  out->interlaced_frame = 0;
1168  out->repeat_pict = 0;
1169 
1170  /* Signal interlacing information externally. */
1171  /* Prioritize picture timing SEI information over used
1172  * decoding process if it exists. */
1173  if (h->sei.picture_timing.present) {
1174  int ret = ff_h264_sei_process_picture_timing(&h->sei.picture_timing, sps,
1175  h->avctx);
1176  if (ret < 0) {
1177  av_log(h->avctx, AV_LOG_ERROR, "Error processing a picture timing SEI\n");
1178  if (h->avctx->err_recognition & AV_EF_EXPLODE)
1179  return ret;
1180  h->sei.picture_timing.present = 0;
1181  }
1182  }
1183 
1184  if (sps->pic_struct_present_flag && h->sei.picture_timing.present) {
1185  H264SEIPictureTiming *pt = &h->sei.picture_timing;
1186  switch (pt->pic_struct) {
1188  break;
1191  out->interlaced_frame = 1;
1192  break;
1196  out->interlaced_frame = 1;
1197  else
1198  // try to flag soft telecine progressive
1199  out->interlaced_frame = h->prev_interlaced_frame;
1200  break;
1203  /* Signal the possibility of telecined film externally
1204  * (pic_struct 5,6). From these hints, let the applications
1205  * decide if they apply deinterlacing. */
1206  out->repeat_pict = 1;
1207  break;
1209  out->repeat_pict = 2;
1210  break;
1212  out->repeat_pict = 4;
1213  break;
1214  }
1215 
1216  if ((pt->ct_type & 3) &&
1217  pt->pic_struct <= H264_SEI_PIC_STRUCT_BOTTOM_TOP)
1218  out->interlaced_frame = (pt->ct_type & (1 << 1)) != 0;
1219  } else {
1220  /* Derive interlacing flag from used decoding process. */
1221  out->interlaced_frame = FIELD_OR_MBAFF_PICTURE(h);
1222  }
1223  h->prev_interlaced_frame = out->interlaced_frame;
1224 
1225  if (cur->field_poc[0] != cur->field_poc[1]) {
1226  /* Derive top_field_first from field pocs. */
1227  out->top_field_first = cur->field_poc[0] < cur->field_poc[1];
1228  } else {
1229  if (sps->pic_struct_present_flag && h->sei.picture_timing.present) {
1230  /* Use picture timing SEI information. Even if it is a
1231  * information of a past frame, better than nothing. */
1232  if (h->sei.picture_timing.pic_struct == H264_SEI_PIC_STRUCT_TOP_BOTTOM ||
1233  h->sei.picture_timing.pic_struct == H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP)
1234  out->top_field_first = 1;
1235  else
1236  out->top_field_first = 0;
1237  } else if (out->interlaced_frame) {
1238  /* Default to top field first when pic_struct_present_flag
1239  * is not set but interlaced frame detected */
1240  out->top_field_first = 1;
1241  } else {
1242  /* Most likely progressive */
1243  out->top_field_first = 0;
1244  }
1245  }
1246 
1247  if (h->sei.frame_packing.present &&
1248  h->sei.frame_packing.arrangement_type <= 6 &&
1249  h->sei.frame_packing.content_interpretation_type > 0 &&
1250  h->sei.frame_packing.content_interpretation_type < 3) {
1251  H264SEIFramePacking *fp = &h->sei.frame_packing;
1253  if (stereo) {
1254  switch (fp->arrangement_type) {
1256  stereo->type = AV_STEREO3D_CHECKERBOARD;
1257  break;
1259  stereo->type = AV_STEREO3D_COLUMNS;
1260  break;
1262  stereo->type = AV_STEREO3D_LINES;
1263  break;
1265  if (fp->quincunx_sampling_flag)
1267  else
1268  stereo->type = AV_STEREO3D_SIDEBYSIDE;
1269  break;
1271  stereo->type = AV_STEREO3D_TOPBOTTOM;
1272  break;
1274  stereo->type = AV_STEREO3D_FRAMESEQUENCE;
1275  break;
1276  case H264_SEI_FPA_TYPE_2D:
1277  stereo->type = AV_STEREO3D_2D;
1278  break;
1279  }
1280 
1281  if (fp->content_interpretation_type == 2)
1282  stereo->flags = AV_STEREO3D_FLAG_INVERT;
1283 
1284  if (fp->arrangement_type == H264_SEI_FPA_TYPE_INTERLEAVE_TEMPORAL) {
1285  if (fp->current_frame_is_frame0_flag)
1286  stereo->view = AV_STEREO3D_VIEW_LEFT;
1287  else
1288  stereo->view = AV_STEREO3D_VIEW_RIGHT;
1289  }
1290  }
1291  }
1292 
1293  if (h->sei.display_orientation.present &&
1294  (h->sei.display_orientation.anticlockwise_rotation ||
1295  h->sei.display_orientation.hflip ||
1296  h->sei.display_orientation.vflip)) {
1297  H264SEIDisplayOrientation *o = &h->sei.display_orientation;
1298  double angle = o->anticlockwise_rotation * 360 / (double) (1 << 16);
1301  sizeof(int32_t) * 9);
1302  if (rotation) {
1303  /* av_display_rotation_set() expects the angle in the clockwise
1304  * direction, hence the first minus.
1305  * The below code applies the flips after the rotation, yet
1306  * the H.2645 specs require flipping to be applied first.
1307  * Because of R O(phi) = O(-phi) R (where R is flipping around
1308  * an arbitatry axis and O(phi) is the proper rotation by phi)
1309  * we can create display matrices as desired by negating
1310  * the degree once for every flip applied. */
1311  angle = -angle * (1 - 2 * !!o->hflip) * (1 - 2 * !!o->vflip);
1312  av_display_rotation_set((int32_t *)rotation->data, angle);
1313  av_display_matrix_flip((int32_t *)rotation->data,
1314  o->hflip, o->vflip);
1315  }
1316  }
1317 
1318  if (h->sei.afd.present) {
1320  sizeof(uint8_t));
1321 
1322  if (sd) {
1323  *sd->data = h->sei.afd.active_format_description;
1324  h->sei.afd.present = 0;
1325  }
1326  }
1327 
1328  if (h->sei.a53_caption.buf_ref) {
1329  H264SEIA53Caption *a53 = &h->sei.a53_caption;
1330 
1332  if (!sd)
1333  av_buffer_unref(&a53->buf_ref);
1334  a53->buf_ref = NULL;
1335 
1336  h->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
1337  }
1338 
1339  for (int i = 0; i < h->sei.unregistered.nb_buf_ref; i++) {
1340  H264SEIUnregistered *unreg = &h->sei.unregistered;
1341 
1342  if (unreg->buf_ref[i]) {
1345  unreg->buf_ref[i]);
1346  if (!sd)
1347  av_buffer_unref(&unreg->buf_ref[i]);
1348  unreg->buf_ref[i] = NULL;
1349  }
1350  }
1351  h->sei.unregistered.nb_buf_ref = 0;
1352 
1353  if (h->sei.film_grain_characteristics.present) {
1354  H264SEIFilmGrainCharacteristics *fgc = &h->sei.film_grain_characteristics;
1356  if (!fgp)
1357  return AVERROR(ENOMEM);
1358 
1360  fgp->seed = cur->poc + (h->poc_offset << 5);
1361 
1362  fgp->codec.h274.model_id = fgc->model_id;
1366  fgp->codec.h274.color_range = fgc->full_range + 1;
1369  fgp->codec.h274.color_space = fgc->matrix_coeffs;
1370  } else {
1371  fgp->codec.h274.bit_depth_luma = sps->bit_depth_luma;
1372  fgp->codec.h274.bit_depth_chroma = sps->bit_depth_chroma;
1373  if (sps->video_signal_type_present_flag)
1374  fgp->codec.h274.color_range = sps->full_range + 1;
1375  else
1377  if (sps->colour_description_present_flag) {
1378  fgp->codec.h274.color_primaries = sps->color_primaries;
1379  fgp->codec.h274.color_trc = sps->color_trc;
1380  fgp->codec.h274.color_space = sps->colorspace;
1381  } else {
1385  }
1386  }
1389 
1391  sizeof(fgp->codec.h274.component_model_present));
1393  sizeof(fgp->codec.h274.num_intensity_intervals));
1394  memcpy(&fgp->codec.h274.num_model_values, &fgc->num_model_values,
1395  sizeof(fgp->codec.h274.num_model_values));
1400  memcpy(&fgp->codec.h274.comp_model_value, &fgc->comp_model_value,
1401  sizeof(fgp->codec.h274.comp_model_value));
1402 
1403  fgc->present = !!fgc->repetition_period;
1404 
1405  h->avctx->properties |= FF_CODEC_PROPERTY_FILM_GRAIN;
1406  }
1407 
1408  if (h->sei.picture_timing.timecode_cnt > 0) {
1409  uint32_t *tc_sd;
1410  char tcbuf[AV_TIMECODE_STR_SIZE];
1411 
1414  sizeof(uint32_t)*4);
1415  if (!tcside)
1416  return AVERROR(ENOMEM);
1417 
1418  tc_sd = (uint32_t*)tcside->data;
1419  tc_sd[0] = h->sei.picture_timing.timecode_cnt;
1420 
1421  for (int i = 0; i < tc_sd[0]; i++) {
1422  int drop = h->sei.picture_timing.timecode[i].dropframe;
1423  int hh = h->sei.picture_timing.timecode[i].hours;
1424  int mm = h->sei.picture_timing.timecode[i].minutes;
1425  int ss = h->sei.picture_timing.timecode[i].seconds;
1426  int ff = h->sei.picture_timing.timecode[i].frame;
1427 
1428  tc_sd[i + 1] = av_timecode_get_smpte(h->avctx->framerate, drop, hh, mm, ss, ff);
1429  av_timecode_make_smpte_tc_string2(tcbuf, h->avctx->framerate, tc_sd[i + 1], 0, 0);
1430  av_dict_set(&out->metadata, "timecode", tcbuf, 0);
1431  }
1432  h->sei.picture_timing.timecode_cnt = 0;
1433  }
1434 
1435  return 0;
1436 }
1437 
1439 {
1440  const SPS *sps = h->ps.sps;
1441  H264Picture *out = h->cur_pic_ptr;
1442  H264Picture *cur = h->cur_pic_ptr;
1443  int i, pics, out_of_order, out_idx;
1444 
1445  cur->mmco_reset = h->mmco_reset;
1446  h->mmco_reset = 0;
1447 
1448  if (sps->bitstream_restriction_flag ||
1449  h->avctx->strict_std_compliance >= FF_COMPLIANCE_STRICT) {
1450  h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, sps->num_reorder_frames);
1451  }
1452 
1453  for (i = 0; 1; i++) {
1454  if(i == H264_MAX_DPB_FRAMES || cur->poc < h->last_pocs[i]){
1455  if(i)
1456  h->last_pocs[i-1] = cur->poc;
1457  break;
1458  } else if(i) {
1459  h->last_pocs[i-1]= h->last_pocs[i];
1460  }
1461  }
1462  out_of_order = H264_MAX_DPB_FRAMES - i;
1463  if( cur->f->pict_type == AV_PICTURE_TYPE_B
1464  || (h->last_pocs[H264_MAX_DPB_FRAMES-2] > INT_MIN && h->last_pocs[H264_MAX_DPB_FRAMES-1] - (int64_t)h->last_pocs[H264_MAX_DPB_FRAMES-2] > 2))
1465  out_of_order = FFMAX(out_of_order, 1);
1466  if (out_of_order == H264_MAX_DPB_FRAMES) {
1467  av_log(h->avctx, AV_LOG_VERBOSE, "Invalid POC %d<%d\n", cur->poc, h->last_pocs[0]);
1468  for (i = 1; i < H264_MAX_DPB_FRAMES; i++)
1469  h->last_pocs[i] = INT_MIN;
1470  h->last_pocs[0] = cur->poc;
1471  cur->mmco_reset = 1;
1472  } else if(h->avctx->has_b_frames < out_of_order && !sps->bitstream_restriction_flag){
1473  int loglevel = h->avctx->frame_number > 1 ? AV_LOG_WARNING : AV_LOG_VERBOSE;
1474  av_log(h->avctx, loglevel, "Increasing reorder buffer to %d\n", out_of_order);
1475  h->avctx->has_b_frames = out_of_order;
1476  }
1477 
1478  pics = 0;
1479  while (h->delayed_pic[pics])
1480  pics++;
1481 
1483 
1484  h->delayed_pic[pics++] = cur;
1485  if (cur->reference == 0)
1486  cur->reference = DELAYED_PIC_REF;
1487 
1488  out = h->delayed_pic[0];
1489  out_idx = 0;
1490  for (i = 1; h->delayed_pic[i] &&
1491  !h->delayed_pic[i]->f->key_frame &&
1492  !h->delayed_pic[i]->mmco_reset;
1493  i++)
1494  if (h->delayed_pic[i]->poc < out->poc) {
1495  out = h->delayed_pic[i];
1496  out_idx = i;
1497  }
1498  if (h->avctx->has_b_frames == 0 &&
1499  (h->delayed_pic[0]->f->key_frame || h->delayed_pic[0]->mmco_reset))
1500  h->next_outputed_poc = INT_MIN;
1501  out_of_order = out->poc < h->next_outputed_poc;
1502 
1503  if (out_of_order || pics > h->avctx->has_b_frames) {
1504  out->reference &= ~DELAYED_PIC_REF;
1505  for (i = out_idx; h->delayed_pic[i]; i++)
1506  h->delayed_pic[i] = h->delayed_pic[i + 1];
1507  }
1508  if (!out_of_order && pics > h->avctx->has_b_frames) {
1509  h->next_output_pic = out;
1510  if (out_idx == 0 && h->delayed_pic[0] && (h->delayed_pic[0]->f->key_frame || h->delayed_pic[0]->mmco_reset)) {
1511  h->next_outputed_poc = INT_MIN;
1512  } else
1513  h->next_outputed_poc = out->poc;
1514 
1515  if (out->recovered) {
1516  // We have reached an recovery point and all frames after it in
1517  // display order are "recovered".
1518  h->frame_recovered |= FRAME_RECOVERED_SEI;
1519  }
1520  out->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_SEI);
1521 
1522  if (!out->recovered) {
1523  if (!(h->avctx->flags & AV_CODEC_FLAG_OUTPUT_CORRUPT) &&
1524  !(h->avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL)) {
1525  h->next_output_pic = NULL;
1526  } else {
1527  out->f->flags |= AV_FRAME_FLAG_CORRUPT;
1528  }
1529  }
1530  } else {
1531  av_log(h->avctx, AV_LOG_DEBUG, "no picture %s\n", out_of_order ? "ooo" : "");
1532  }
1533 
1534  return 0;
1535 }
1536 
1537 /* This function is called right after decoding the slice header for a first
1538  * slice in a field (or a frame). It decides whether we are decoding a new frame
1539  * or a second field in a pair and does the necessary setup.
1540  */
1542  const H2645NAL *nal, int first_slice)
1543 {
1544  int i;
1545  const SPS *sps;
1546 
1547  int last_pic_structure, last_pic_droppable, ret;
1548 
1549  ret = h264_init_ps(h, sl, first_slice);
1550  if (ret < 0)
1551  return ret;
1552 
1553  sps = h->ps.sps;
1554 
1555  if (sps && sps->bitstream_restriction_flag &&
1556  h->avctx->has_b_frames < sps->num_reorder_frames) {
1557  h->avctx->has_b_frames = sps->num_reorder_frames;
1558  }
1559 
1560  last_pic_droppable = h->droppable;
1561  last_pic_structure = h->picture_structure;
1562  h->droppable = (nal->ref_idc == 0);
1563  h->picture_structure = sl->picture_structure;
1564 
1565  h->poc.frame_num = sl->frame_num;
1566  h->poc.poc_lsb = sl->poc_lsb;
1567  h->poc.delta_poc_bottom = sl->delta_poc_bottom;
1568  h->poc.delta_poc[0] = sl->delta_poc[0];
1569  h->poc.delta_poc[1] = sl->delta_poc[1];
1570 
1571  if (nal->type == H264_NAL_IDR_SLICE)
1572  h->poc_offset = sl->idr_pic_id;
1573  else if (h->picture_intra_only)
1574  h->poc_offset = 0;
1575 
1576  /* Shorten frame num gaps so we don't have to allocate reference
1577  * frames just to throw them away */
1578  if (h->poc.frame_num != h->poc.prev_frame_num) {
1579  int unwrap_prev_frame_num = h->poc.prev_frame_num;
1580  int max_frame_num = 1 << sps->log2_max_frame_num;
1581 
1582  if (unwrap_prev_frame_num > h->poc.frame_num)
1583  unwrap_prev_frame_num -= max_frame_num;
1584 
1585  if ((h->poc.frame_num - unwrap_prev_frame_num) > sps->ref_frame_count) {
1586  unwrap_prev_frame_num = (h->poc.frame_num - sps->ref_frame_count) - 1;
1587  if (unwrap_prev_frame_num < 0)
1588  unwrap_prev_frame_num += max_frame_num;
1589 
1590  h->poc.prev_frame_num = unwrap_prev_frame_num;
1591  }
1592  }
1593 
1594  /* See if we have a decoded first field looking for a pair...
1595  * Here, we're using that to see if we should mark previously
1596  * decode frames as "finished".
1597  * We have to do that before the "dummy" in-between frame allocation,
1598  * since that can modify h->cur_pic_ptr. */
1599  if (h->first_field) {
1600  int last_field = last_pic_structure == PICT_BOTTOM_FIELD;
1601  av_assert0(h->cur_pic_ptr);
1602  av_assert0(h->cur_pic_ptr->f->buf[0]);
1603  assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF);
1604 
1605  /* Mark old field/frame as completed */
1606  if (h->cur_pic_ptr->tf.owner[last_field] == h->avctx) {
1607  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, last_field);
1608  }
1609 
1610  /* figure out if we have a complementary field pair */
1611  if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
1612  /* Previous field is unmatched. Don't display it, but let it
1613  * remain for reference if marked as such. */
1614  if (last_pic_structure != PICT_FRAME) {
1615  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
1616  last_pic_structure == PICT_TOP_FIELD);
1617  }
1618  } else {
1619  if (h->cur_pic_ptr->frame_num != h->poc.frame_num) {
1620  /* This and previous field were reference, but had
1621  * different frame_nums. Consider this field first in
1622  * pair. Throw away previous field except for reference
1623  * purposes. */
1624  if (last_pic_structure != PICT_FRAME) {
1625  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
1626  last_pic_structure == PICT_TOP_FIELD);
1627  }
1628  } else {
1629  /* Second field in complementary pair */
1630  if (!((last_pic_structure == PICT_TOP_FIELD &&
1631  h->picture_structure == PICT_BOTTOM_FIELD) ||
1632  (last_pic_structure == PICT_BOTTOM_FIELD &&
1633  h->picture_structure == PICT_TOP_FIELD))) {
1634  av_log(h->avctx, AV_LOG_ERROR,
1635  "Invalid field mode combination %d/%d\n",
1636  last_pic_structure, h->picture_structure);
1637  h->picture_structure = last_pic_structure;
1638  h->droppable = last_pic_droppable;
1639  return AVERROR_INVALIDDATA;
1640  } else if (last_pic_droppable != h->droppable) {
1641  avpriv_request_sample(h->avctx,
1642  "Found reference and non-reference fields in the same frame, which");
1643  h->picture_structure = last_pic_structure;
1644  h->droppable = last_pic_droppable;
1645  return AVERROR_PATCHWELCOME;
1646  }
1647  }
1648  }
1649  }
1650 
1651  while (h->poc.frame_num != h->poc.prev_frame_num && !h->first_field &&
1652  h->poc.frame_num != (h->poc.prev_frame_num + 1) % (1 << sps->log2_max_frame_num)) {
1653  const H264Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
1654  av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",
1655  h->poc.frame_num, h->poc.prev_frame_num);
1656  if (!sps->gaps_in_frame_num_allowed_flag)
1657  for(i=0; i<FF_ARRAY_ELEMS(h->last_pocs); i++)
1658  h->last_pocs[i] = INT_MIN;
1659  ret = h264_frame_start(h);
1660  if (ret < 0) {
1661  h->first_field = 0;
1662  return ret;
1663  }
1664 
1665  h->poc.prev_frame_num++;
1666  h->poc.prev_frame_num %= 1 << sps->log2_max_frame_num;
1667  h->cur_pic_ptr->frame_num = h->poc.prev_frame_num;
1668  h->cur_pic_ptr->invalid_gap = !sps->gaps_in_frame_num_allowed_flag;
1669  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
1670  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
1671 
1672  h->explicit_ref_marking = 0;
1674  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1675  return ret;
1676  /* Error concealment: If a ref is missing, copy the previous ref
1677  * in its place.
1678  * FIXME: Avoiding a memcpy would be nice, but ref handling makes
1679  * many assumptions about there being no actual duplicates.
1680  * FIXME: This does not copy padding for out-of-frame motion
1681  * vectors. Given we are concealing a lost frame, this probably
1682  * is not noticeable by comparison, but it should be fixed. */
1683  if (h->short_ref_count) {
1684  int c[4] = {
1685  1<<(h->ps.sps->bit_depth_luma-1),
1686  1<<(h->ps.sps->bit_depth_chroma-1),
1687  1<<(h->ps.sps->bit_depth_chroma-1),
1688  -1
1689  };
1690 
1691  if (prev &&
1692  h->short_ref[0]->f->width == prev->f->width &&
1693  h->short_ref[0]->f->height == prev->f->height &&
1694  h->short_ref[0]->f->format == prev->f->format) {
1695  ff_thread_await_progress(&prev->tf, INT_MAX, 0);
1696  if (prev->field_picture)
1697  ff_thread_await_progress(&prev->tf, INT_MAX, 1);
1698  ff_thread_release_ext_buffer(h->avctx, &h->short_ref[0]->tf);
1699  h->short_ref[0]->tf.f = h->short_ref[0]->f;
1700  ret = ff_thread_ref_frame(&h->short_ref[0]->tf, &prev->tf);
1701  if (ret < 0)
1702  return ret;
1703  h->short_ref[0]->poc = prev->poc + 2U;
1704  ff_thread_report_progress(&h->short_ref[0]->tf, INT_MAX, 0);
1705  if (h->short_ref[0]->field_picture)
1706  ff_thread_report_progress(&h->short_ref[0]->tf, INT_MAX, 1);
1707  } else if (!h->frame_recovered && !h->avctx->hwaccel)
1708  ff_color_frame(h->short_ref[0]->f, c);
1709  h->short_ref[0]->frame_num = h->poc.prev_frame_num;
1710  }
1711  }
1712 
1713  /* See if we have a decoded first field looking for a pair...
1714  * We're using that to see whether to continue decoding in that
1715  * frame, or to allocate a new one. */
1716  if (h->first_field) {
1717  av_assert0(h->cur_pic_ptr);
1718  av_assert0(h->cur_pic_ptr->f->buf[0]);
1719  assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF);
1720 
1721  /* figure out if we have a complementary field pair */
1722  if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
1723  /* Previous field is unmatched. Don't display it, but let it
1724  * remain for reference if marked as such. */
1725  h->missing_fields ++;
1726  h->cur_pic_ptr = NULL;
1727  h->first_field = FIELD_PICTURE(h);
1728  } else {
1729  h->missing_fields = 0;
1730  if (h->cur_pic_ptr->frame_num != h->poc.frame_num) {
1731  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
1732  h->picture_structure==PICT_BOTTOM_FIELD);
1733  /* This and the previous field had different frame_nums.
1734  * Consider this field first in pair. Throw away previous
1735  * one except for reference purposes. */
1736  h->first_field = 1;
1737  h->cur_pic_ptr = NULL;
1738  } else if (h->cur_pic_ptr->reference & DELAYED_PIC_REF) {
1739  /* This frame was already output, we cannot draw into it
1740  * anymore.
1741  */
1742  h->first_field = 1;
1743  h->cur_pic_ptr = NULL;
1744  } else {
1745  /* Second field in complementary pair */
1746  h->first_field = 0;
1747  }
1748  }
1749  } else {
1750  /* Frame or first field in a potentially complementary pair */
1751  h->first_field = FIELD_PICTURE(h);
1752  }
1753 
1754  if (!FIELD_PICTURE(h) || h->first_field) {
1755  if (h264_frame_start(h) < 0) {
1756  h->first_field = 0;
1757  return AVERROR_INVALIDDATA;
1758  }
1759  } else {
1760  int field = h->picture_structure == PICT_BOTTOM_FIELD;
1762  h->cur_pic_ptr->tf.owner[field] = h->avctx;
1763  }
1764  /* Some macroblocks can be accessed before they're available in case
1765  * of lost slices, MBAFF or threading. */
1766  if (FIELD_PICTURE(h)) {
1767  for(i = (h->picture_structure == PICT_BOTTOM_FIELD); i<h->mb_height; i++)
1768  memset(h->slice_table + i*h->mb_stride, -1, (h->mb_stride - (i+1==h->mb_height)) * sizeof(*h->slice_table));
1769  } else {
1770  memset(h->slice_table, -1,
1771  (h->mb_height * h->mb_stride - 1) * sizeof(*h->slice_table));
1772  }
1773 
1774  ret = ff_h264_init_poc(h->cur_pic_ptr->field_poc, &h->cur_pic_ptr->poc,
1775  h->ps.sps, &h->poc, h->picture_structure, nal->ref_idc);
1776  if (ret < 0)
1777  return ret;
1778 
1779  memcpy(h->mmco, sl->mmco, sl->nb_mmco * sizeof(*h->mmco));
1780  h->nb_mmco = sl->nb_mmco;
1781  h->explicit_ref_marking = sl->explicit_ref_marking;
1782 
1783  h->picture_idr = nal->type == H264_NAL_IDR_SLICE;
1784 
1785  if (h->sei.recovery_point.recovery_frame_cnt >= 0) {
1786  const int sei_recovery_frame_cnt = h->sei.recovery_point.recovery_frame_cnt;
1787 
1788  if (h->poc.frame_num != sei_recovery_frame_cnt || sl->slice_type_nos != AV_PICTURE_TYPE_I)
1789  h->valid_recovery_point = 1;
1790 
1791  if ( h->recovery_frame < 0
1792  || av_mod_uintp2(h->recovery_frame - h->poc.frame_num, h->ps.sps->log2_max_frame_num) > sei_recovery_frame_cnt) {
1793  h->recovery_frame = av_mod_uintp2(h->poc.frame_num + sei_recovery_frame_cnt, h->ps.sps->log2_max_frame_num);
1794 
1795  if (!h->valid_recovery_point)
1796  h->recovery_frame = h->poc.frame_num;
1797  }
1798  }
1799 
1800  h->cur_pic_ptr->f->key_frame |= (nal->type == H264_NAL_IDR_SLICE);
1801 
1802  if (nal->type == H264_NAL_IDR_SLICE ||
1803  (h->recovery_frame == h->poc.frame_num && nal->ref_idc)) {
1804  h->recovery_frame = -1;
1805  h->cur_pic_ptr->recovered = 1;
1806  }
1807  // If we have an IDR, all frames after it in decoded order are
1808  // "recovered".
1809  if (nal->type == H264_NAL_IDR_SLICE)
1810  h->frame_recovered |= FRAME_RECOVERED_IDR;
1811 #if 1
1812  h->cur_pic_ptr->recovered |= h->frame_recovered;
1813 #else
1814  h->cur_pic_ptr->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_IDR);
1815 #endif
1816 
1817  /* Set the frame properties/side data. Only done for the second field in
1818  * field coded frames, since some SEI information is present for each field
1819  * and is merged by the SEI parsing code. */
1820  if (!FIELD_PICTURE(h) || !h->first_field || h->missing_fields > 1) {
1822  if (ret < 0)
1823  return ret;
1824 
1826  if (ret < 0)
1827  return ret;
1828  }
1829 
1830  return 0;
1831 }
1832 
1834  const H2645NAL *nal)
1835 {
1836  const SPS *sps;
1837  const PPS *pps;
1838  int ret;
1839  unsigned int slice_type, tmp, i;
1840  int field_pic_flag, bottom_field_flag;
1841  int first_slice = sl == h->slice_ctx && !h->current_slice;
1842  int picture_structure;
1843 
1844  if (first_slice)
1845  av_assert0(!h->setup_finished);
1846 
1847  sl->first_mb_addr = get_ue_golomb_long(&sl->gb);
1848 
1849  slice_type = get_ue_golomb_31(&sl->gb);
1850  if (slice_type > 9) {
1851  av_log(h->avctx, AV_LOG_ERROR,
1852  "slice type %d too large at %d\n",
1853  slice_type, sl->first_mb_addr);
1854  return AVERROR_INVALIDDATA;
1855  }
1856  if (slice_type > 4) {
1857  slice_type -= 5;
1858  sl->slice_type_fixed = 1;
1859  } else
1860  sl->slice_type_fixed = 0;
1861 
1862  slice_type = ff_h264_golomb_to_pict_type[slice_type];
1863  sl->slice_type = slice_type;
1864  sl->slice_type_nos = slice_type & 3;
1865 
1866  if (nal->type == H264_NAL_IDR_SLICE &&
1868  av_log(h->avctx, AV_LOG_ERROR, "A non-intra slice in an IDR NAL unit.\n");
1869  return AVERROR_INVALIDDATA;
1870  }
1871 
1872  sl->pps_id = get_ue_golomb(&sl->gb);
1873  if (sl->pps_id >= MAX_PPS_COUNT) {
1874  av_log(h->avctx, AV_LOG_ERROR, "pps_id %u out of range\n", sl->pps_id);
1875  return AVERROR_INVALIDDATA;
1876  }
1877  if (!h->ps.pps_list[sl->pps_id]) {
1878  av_log(h->avctx, AV_LOG_ERROR,
1879  "non-existing PPS %u referenced\n",
1880  sl->pps_id);
1881  return AVERROR_INVALIDDATA;
1882  }
1883  pps = (const PPS*)h->ps.pps_list[sl->pps_id]->data;
1884  sps = pps->sps;
1885 
1886  sl->frame_num = get_bits(&sl->gb, sps->log2_max_frame_num);
1887  if (!first_slice) {
1888  if (h->poc.frame_num != sl->frame_num) {
1889  av_log(h->avctx, AV_LOG_ERROR, "Frame num change from %d to %d\n",
1890  h->poc.frame_num, sl->frame_num);
1891  return AVERROR_INVALIDDATA;
1892  }
1893  }
1894 
1895  sl->mb_mbaff = 0;
1896 
1897  if (sps->frame_mbs_only_flag) {
1898  picture_structure = PICT_FRAME;
1899  } else {
1900  if (!sps->direct_8x8_inference_flag && slice_type == AV_PICTURE_TYPE_B) {
1901  av_log(h->avctx, AV_LOG_ERROR, "This stream was generated by a broken encoder, invalid 8x8 inference\n");
1902  return -1;
1903  }
1904  field_pic_flag = get_bits1(&sl->gb);
1905  if (field_pic_flag) {
1906  bottom_field_flag = get_bits1(&sl->gb);
1907  picture_structure = PICT_TOP_FIELD + bottom_field_flag;
1908  } else {
1909  picture_structure = PICT_FRAME;
1910  }
1911  }
1912  sl->picture_structure = picture_structure;
1913  sl->mb_field_decoding_flag = picture_structure != PICT_FRAME;
1914 
1915  if (picture_structure == PICT_FRAME) {
1916  sl->curr_pic_num = sl->frame_num;
1917  sl->max_pic_num = 1 << sps->log2_max_frame_num;
1918  } else {
1919  sl->curr_pic_num = 2 * sl->frame_num + 1;
1920  sl->max_pic_num = 1 << (sps->log2_max_frame_num + 1);
1921  }
1922 
1923  if (nal->type == H264_NAL_IDR_SLICE) {
1924  unsigned idr_pic_id = get_ue_golomb_long(&sl->gb);
1925  if (idr_pic_id < 65536) {
1926  sl->idr_pic_id = idr_pic_id;
1927  } else
1928  av_log(h->avctx, AV_LOG_WARNING, "idr_pic_id is invalid\n");
1929  }
1930 
1931  sl->poc_lsb = 0;
1932  sl->delta_poc_bottom = 0;
1933  if (sps->poc_type == 0) {
1934  sl->poc_lsb = get_bits(&sl->gb, sps->log2_max_poc_lsb);
1935 
1936  if (pps->pic_order_present == 1 && picture_structure == PICT_FRAME)
1937  sl->delta_poc_bottom = get_se_golomb(&sl->gb);
1938  }
1939 
1940  sl->delta_poc[0] = sl->delta_poc[1] = 0;
1941  if (sps->poc_type == 1 && !sps->delta_pic_order_always_zero_flag) {
1942  sl->delta_poc[0] = get_se_golomb(&sl->gb);
1943 
1944  if (pps->pic_order_present == 1 && picture_structure == PICT_FRAME)
1945  sl->delta_poc[1] = get_se_golomb(&sl->gb);
1946  }
1947 
1948  sl->redundant_pic_count = 0;
1949  if (pps->redundant_pic_cnt_present)
1950  sl->redundant_pic_count = get_ue_golomb(&sl->gb);
1951 
1952  if (sl->slice_type_nos == AV_PICTURE_TYPE_B)
1953  sl->direct_spatial_mv_pred = get_bits1(&sl->gb);
1954 
1956  &sl->gb, pps, sl->slice_type_nos,
1957  picture_structure, h->avctx);
1958  if (ret < 0)
1959  return ret;
1960 
1961  if (sl->slice_type_nos != AV_PICTURE_TYPE_I) {
1963  if (ret < 0) {
1964  sl->ref_count[1] = sl->ref_count[0] = 0;
1965  return ret;
1966  }
1967  }
1968 
1969  sl->pwt.use_weight = 0;
1970  for (i = 0; i < 2; i++) {
1971  sl->pwt.luma_weight_flag[i] = 0;
1972  sl->pwt.chroma_weight_flag[i] = 0;
1973  }
1974  if ((pps->weighted_pred && sl->slice_type_nos == AV_PICTURE_TYPE_P) ||
1975  (pps->weighted_bipred_idc == 1 &&
1978  sl->slice_type_nos, &sl->pwt,
1979  picture_structure, h->avctx);
1980  if (ret < 0)
1981  return ret;
1982  }
1983 
1984  sl->explicit_ref_marking = 0;
1985  if (nal->ref_idc) {
1986  ret = ff_h264_decode_ref_pic_marking(sl, &sl->gb, nal, h->avctx);
1987  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1988  return AVERROR_INVALIDDATA;
1989  }
1990 
1991  if (sl->slice_type_nos != AV_PICTURE_TYPE_I && pps->cabac) {
1992  tmp = get_ue_golomb_31(&sl->gb);
1993  if (tmp > 2) {
1994  av_log(h->avctx, AV_LOG_ERROR, "cabac_init_idc %u overflow\n", tmp);
1995  return AVERROR_INVALIDDATA;
1996  }
1997  sl->cabac_init_idc = tmp;
1998  }
1999 
2000  sl->last_qscale_diff = 0;
2001  tmp = pps->init_qp + (unsigned)get_se_golomb(&sl->gb);
2002  if (tmp > 51 + 6 * (sps->bit_depth_luma - 8)) {
2003  av_log(h->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp);
2004  return AVERROR_INVALIDDATA;
2005  }
2006  sl->qscale = tmp;
2007  sl->chroma_qp[0] = get_chroma_qp(pps, 0, sl->qscale);
2008  sl->chroma_qp[1] = get_chroma_qp(pps, 1, sl->qscale);
2009  // FIXME qscale / qp ... stuff
2010  if (sl->slice_type == AV_PICTURE_TYPE_SP)
2011  get_bits1(&sl->gb); /* sp_for_switch_flag */
2012  if (sl->slice_type == AV_PICTURE_TYPE_SP ||
2014  get_se_golomb(&sl->gb); /* slice_qs_delta */
2015 
2016  sl->deblocking_filter = 1;
2017  sl->slice_alpha_c0_offset = 0;
2018  sl->slice_beta_offset = 0;
2019  if (pps->deblocking_filter_parameters_present) {
2020  tmp = get_ue_golomb_31(&sl->gb);
2021  if (tmp > 2) {
2022  av_log(h->avctx, AV_LOG_ERROR,
2023  "deblocking_filter_idc %u out of range\n", tmp);
2024  return AVERROR_INVALIDDATA;
2025  }
2026  sl->deblocking_filter = tmp;
2027  if (sl->deblocking_filter < 2)
2028  sl->deblocking_filter ^= 1; // 1<->0
2029 
2030  if (sl->deblocking_filter) {
2031  int slice_alpha_c0_offset_div2 = get_se_golomb(&sl->gb);
2032  int slice_beta_offset_div2 = get_se_golomb(&sl->gb);
2033  if (slice_alpha_c0_offset_div2 > 6 ||
2034  slice_alpha_c0_offset_div2 < -6 ||
2035  slice_beta_offset_div2 > 6 ||
2036  slice_beta_offset_div2 < -6) {
2037  av_log(h->avctx, AV_LOG_ERROR,
2038  "deblocking filter parameters %d %d out of range\n",
2039  slice_alpha_c0_offset_div2, slice_beta_offset_div2);
2040  return AVERROR_INVALIDDATA;
2041  }
2042  sl->slice_alpha_c0_offset = slice_alpha_c0_offset_div2 * 2;
2043  sl->slice_beta_offset = slice_beta_offset_div2 * 2;
2044  }
2045  }
2046 
2047  return 0;
2048 }
2049 
2050 /* do all the per-slice initialization needed before we can start decoding the
2051  * actual MBs */
2053  const H2645NAL *nal)
2054 {
2055  int i, j, ret = 0;
2056 
2057  if (h->picture_idr && nal->type != H264_NAL_IDR_SLICE) {
2058  av_log(h->avctx, AV_LOG_ERROR, "Invalid mix of IDR and non-IDR slices\n");
2059  return AVERROR_INVALIDDATA;
2060  }
2061 
2062  av_assert1(h->mb_num == h->mb_width * h->mb_height);
2063  if (sl->first_mb_addr << FIELD_OR_MBAFF_PICTURE(h) >= h->mb_num ||
2064  sl->first_mb_addr >= h->mb_num) {
2065  av_log(h->avctx, AV_LOG_ERROR, "first_mb_in_slice overflow\n");
2066  return AVERROR_INVALIDDATA;
2067  }
2068  sl->resync_mb_x = sl->mb_x = sl->first_mb_addr % h->mb_width;
2069  sl->resync_mb_y = sl->mb_y = (sl->first_mb_addr / h->mb_width) <<
2071  if (h->picture_structure == PICT_BOTTOM_FIELD)
2072  sl->resync_mb_y = sl->mb_y = sl->mb_y + 1;
2073  av_assert1(sl->mb_y < h->mb_height);
2074 
2075  ret = ff_h264_build_ref_list(h, sl);
2076  if (ret < 0)
2077  return ret;
2078 
2079  if (h->ps.pps->weighted_bipred_idc == 2 &&
2081  implicit_weight_table(h, sl, -1);
2082  if (FRAME_MBAFF(h)) {
2083  implicit_weight_table(h, sl, 0);
2084  implicit_weight_table(h, sl, 1);
2085  }
2086  }
2087 
2090  if (!h->setup_finished)
2092 
2093  if (h->avctx->skip_loop_filter >= AVDISCARD_ALL ||
2094  (h->avctx->skip_loop_filter >= AVDISCARD_NONKEY &&
2095  h->nal_unit_type != H264_NAL_IDR_SLICE) ||
2096  (h->avctx->skip_loop_filter >= AVDISCARD_NONINTRA &&
2098  (h->avctx->skip_loop_filter >= AVDISCARD_BIDIR &&
2100  (h->avctx->skip_loop_filter >= AVDISCARD_NONREF &&
2101  nal->ref_idc == 0))
2102  sl->deblocking_filter = 0;
2103 
2104  if (sl->deblocking_filter == 1 && h->nb_slice_ctx > 1) {
2105  if (h->avctx->flags2 & AV_CODEC_FLAG2_FAST) {
2106  /* Cheat slightly for speed:
2107  * Do not bother to deblock across slices. */
2108  sl->deblocking_filter = 2;
2109  } else {
2110  h->postpone_filter = 1;
2111  }
2112  }
2113  sl->qp_thresh = 15 -
2115  FFMAX3(0,
2116  h->ps.pps->chroma_qp_index_offset[0],
2117  h->ps.pps->chroma_qp_index_offset[1]) +
2118  6 * (h->ps.sps->bit_depth_luma - 8);
2119 
2120  sl->slice_num = ++h->current_slice;
2121 
2122  if (sl->slice_num)
2123  h->slice_row[(sl->slice_num-1)&(MAX_SLICES-1)]= sl->resync_mb_y;
2124  if ( h->slice_row[sl->slice_num&(MAX_SLICES-1)] + 3 >= sl->resync_mb_y
2125  && h->slice_row[sl->slice_num&(MAX_SLICES-1)] <= sl->resync_mb_y
2126  && sl->slice_num >= MAX_SLICES) {
2127  //in case of ASO this check needs to be updated depending on how we decide to assign slice numbers in this case
2128  av_log(h->avctx, AV_LOG_WARNING, "Possibly too many slices (%d >= %d), increase MAX_SLICES and recompile if there are artifacts\n", sl->slice_num, MAX_SLICES);
2129  }
2130 
2131  for (j = 0; j < 2; j++) {
2132  int id_list[16];
2133  int *ref2frm = h->ref2frm[sl->slice_num & (MAX_SLICES - 1)][j];
2134  for (i = 0; i < 16; i++) {
2135  id_list[i] = 60;
2136  if (j < sl->list_count && i < sl->ref_count[j] &&
2137  sl->ref_list[j][i].parent->f->buf[0]) {
2138  int k;
2139  AVBuffer *buf = sl->ref_list[j][i].parent->f->buf[0]->buffer;
2140  for (k = 0; k < h->short_ref_count; k++)
2141  if (h->short_ref[k]->f->buf[0]->buffer == buf) {
2142  id_list[i] = k;
2143  break;
2144  }
2145  for (k = 0; k < h->long_ref_count; k++)
2146  if (h->long_ref[k] && h->long_ref[k]->f->buf[0]->buffer == buf) {
2147  id_list[i] = h->short_ref_count + k;
2148  break;
2149  }
2150  }
2151  }
2152 
2153  ref2frm[0] =
2154  ref2frm[1] = -1;
2155  for (i = 0; i < 16; i++)
2156  ref2frm[i + 2] = 4 * id_list[i] + (sl->ref_list[j][i].reference & 3);
2157  ref2frm[18 + 0] =
2158  ref2frm[18 + 1] = -1;
2159  for (i = 16; i < 48; i++)
2160  ref2frm[i + 4] = 4 * id_list[(i - 16) >> 1] +
2161  (sl->ref_list[j][i].reference & 3);
2162  }
2163 
2164  if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
2165  av_log(h->avctx, AV_LOG_DEBUG,
2166  "slice:%d %c mb:%d %c%s%s frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n",
2167  sl->slice_num,
2168  (h->picture_structure == PICT_FRAME ? 'F' : h->picture_structure == PICT_TOP_FIELD ? 'T' : 'B'),
2169  sl->mb_y * h->mb_width + sl->mb_x,
2171  sl->slice_type_fixed ? " fix" : "",
2172  nal->type == H264_NAL_IDR_SLICE ? " IDR" : "",
2173  h->poc.frame_num,
2174  h->cur_pic_ptr->field_poc[0],
2175  h->cur_pic_ptr->field_poc[1],
2176  sl->ref_count[0], sl->ref_count[1],
2177  sl->qscale,
2178  sl->deblocking_filter,
2180  sl->pwt.use_weight,
2181  sl->pwt.use_weight == 1 && sl->pwt.use_weight_chroma ? "c" : "",
2182  sl->slice_type == AV_PICTURE_TYPE_B ? (sl->direct_spatial_mv_pred ? "SPAT" : "TEMP") : "");
2183  }
2184 
2185  return 0;
2186 }
2187 
2189 {
2190  H264SliceContext *sl = h->slice_ctx + h->nb_slice_ctx_queued;
2191  int first_slice = sl == h->slice_ctx && !h->current_slice;
2192  int ret;
2193 
2194  sl->gb = nal->gb;
2195 
2196  ret = h264_slice_header_parse(h, sl, nal);
2197  if (ret < 0)
2198  return ret;
2199 
2200  // discard redundant pictures
2201  if (sl->redundant_pic_count > 0) {
2202  sl->ref_count[0] = sl->ref_count[1] = 0;
2203  return 0;
2204  }
2205 
2206  if (sl->first_mb_addr == 0 || !h->current_slice) {
2207  if (h->setup_finished) {
2208  av_log(h->avctx, AV_LOG_ERROR, "Too many fields\n");
2209  return AVERROR_INVALIDDATA;
2210  }
2211  }
2212 
2213  if (sl->first_mb_addr == 0) { // FIXME better field boundary detection
2214  if (h->current_slice) {
2215  // this slice starts a new field
2216  // first decode any pending queued slices
2217  if (h->nb_slice_ctx_queued) {
2218  H264SliceContext tmp_ctx;
2219 
2221  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
2222  return ret;
2223 
2224  memcpy(&tmp_ctx, h->slice_ctx, sizeof(tmp_ctx));
2225  memcpy(h->slice_ctx, sl, sizeof(tmp_ctx));
2226  memcpy(sl, &tmp_ctx, sizeof(tmp_ctx));
2227  sl = h->slice_ctx;
2228  }
2229 
2230  if (h->cur_pic_ptr && FIELD_PICTURE(h) && h->first_field) {
2231  ret = ff_h264_field_end(h, h->slice_ctx, 1);
2232  if (ret < 0)
2233  return ret;
2234  } else if (h->cur_pic_ptr && !FIELD_PICTURE(h) && !h->first_field && h->nal_unit_type == H264_NAL_IDR_SLICE) {
2235  av_log(h, AV_LOG_WARNING, "Broken frame packetizing\n");
2236  ret = ff_h264_field_end(h, h->slice_ctx, 1);
2237  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
2238  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
2239  h->cur_pic_ptr = NULL;
2240  if (ret < 0)
2241  return ret;
2242  } else
2243  return AVERROR_INVALIDDATA;
2244  }
2245 
2246  if (!h->first_field) {
2247  if (h->cur_pic_ptr && !h->droppable) {
2248  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
2249  h->picture_structure == PICT_BOTTOM_FIELD);
2250  }
2251  h->cur_pic_ptr = NULL;
2252  }
2253  }
2254 
2255  if (!h->current_slice)
2256  av_assert0(sl == h->slice_ctx);
2257 
2258  if (h->current_slice == 0 && !h->first_field) {
2259  if (
2260  (h->avctx->skip_frame >= AVDISCARD_NONREF && !h->nal_ref_idc) ||
2261  (h->avctx->skip_frame >= AVDISCARD_BIDIR && sl->slice_type_nos == AV_PICTURE_TYPE_B) ||
2262  (h->avctx->skip_frame >= AVDISCARD_NONINTRA && sl->slice_type_nos != AV_PICTURE_TYPE_I) ||
2263  (h->avctx->skip_frame >= AVDISCARD_NONKEY && h->nal_unit_type != H264_NAL_IDR_SLICE && h->sei.recovery_point.recovery_frame_cnt < 0) ||
2264  h->avctx->skip_frame >= AVDISCARD_ALL) {
2265  return 0;
2266  }
2267  }
2268 
2269  if (!first_slice) {
2270  const PPS *pps = (const PPS*)h->ps.pps_list[sl->pps_id]->data;
2271 
2272  if (h->ps.pps->sps_id != pps->sps_id ||
2273  h->ps.pps->transform_8x8_mode != pps->transform_8x8_mode /*||
2274  (h->setup_finished && h->ps.pps != pps)*/) {
2275  av_log(h->avctx, AV_LOG_ERROR, "PPS changed between slices\n");
2276  return AVERROR_INVALIDDATA;
2277  }
2278  if (h->ps.sps != pps->sps) {
2279  av_log(h->avctx, AV_LOG_ERROR,
2280  "SPS changed in the middle of the frame\n");
2281  return AVERROR_INVALIDDATA;
2282  }
2283  }
2284 
2285  if (h->current_slice == 0) {
2286  ret = h264_field_start(h, sl, nal, first_slice);
2287  if (ret < 0)
2288  return ret;
2289  } else {
2290  if (h->picture_structure != sl->picture_structure ||
2291  h->droppable != (nal->ref_idc == 0)) {
2292  av_log(h->avctx, AV_LOG_ERROR,
2293  "Changing field mode (%d -> %d) between slices is not allowed\n",
2294  h->picture_structure, sl->picture_structure);
2295  return AVERROR_INVALIDDATA;
2296  } else if (!h->cur_pic_ptr) {
2297  av_log(h->avctx, AV_LOG_ERROR,
2298  "unset cur_pic_ptr on slice %d\n",
2299  h->current_slice + 1);
2300  return AVERROR_INVALIDDATA;
2301  }
2302  }
2303 
2304  ret = h264_slice_init(h, sl, nal);
2305  if (ret < 0)
2306  return ret;
2307 
2308  h->nb_slice_ctx_queued++;
2309 
2310  return 0;
2311 }
2312 
2314 {
2315  switch (sl->slice_type) {
2316  case AV_PICTURE_TYPE_P:
2317  return 0;
2318  case AV_PICTURE_TYPE_B:
2319  return 1;
2320  case AV_PICTURE_TYPE_I:
2321  return 2;
2322  case AV_PICTURE_TYPE_SP:
2323  return 3;
2324  case AV_PICTURE_TYPE_SI:
2325  return 4;
2326  default:
2327  return AVERROR_INVALIDDATA;
2328  }
2329 }
2330 
2332  H264SliceContext *sl,
2333  int mb_type, int top_xy,
2334  int left_xy[LEFT_MBS],
2335  int top_type,
2336  int left_type[LEFT_MBS],
2337  int mb_xy, int list)
2338 {
2339  int b_stride = h->b_stride;
2340  int16_t(*mv_dst)[2] = &sl->mv_cache[list][scan8[0]];
2341  int8_t *ref_cache = &sl->ref_cache[list][scan8[0]];
2342  if (IS_INTER(mb_type) || IS_DIRECT(mb_type)) {
2343  if (USES_LIST(top_type, list)) {
2344  const int b_xy = h->mb2b_xy[top_xy] + 3 * b_stride;
2345  const int b8_xy = 4 * top_xy + 2;
2346  const int *ref2frm = &h->ref2frm[h->slice_table[top_xy] & (MAX_SLICES - 1)][list][(MB_MBAFF(sl) ? 20 : 2)];
2347  AV_COPY128(mv_dst - 1 * 8, h->cur_pic.motion_val[list][b_xy + 0]);
2348  ref_cache[0 - 1 * 8] =
2349  ref_cache[1 - 1 * 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 0]];
2350  ref_cache[2 - 1 * 8] =
2351  ref_cache[3 - 1 * 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 1]];
2352  } else {
2353  AV_ZERO128(mv_dst - 1 * 8);
2354  AV_WN32A(&ref_cache[0 - 1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2355  }
2356 
2357  if (!IS_INTERLACED(mb_type ^ left_type[LTOP])) {
2358  if (USES_LIST(left_type[LTOP], list)) {
2359  const int b_xy = h->mb2b_xy[left_xy[LTOP]] + 3;
2360  const int b8_xy = 4 * left_xy[LTOP] + 1;
2361  const int *ref2frm = &h->ref2frm[h->slice_table[left_xy[LTOP]] & (MAX_SLICES - 1)][list][(MB_MBAFF(sl) ? 20 : 2)];
2362  AV_COPY32(mv_dst - 1 + 0, h->cur_pic.motion_val[list][b_xy + b_stride * 0]);
2363  AV_COPY32(mv_dst - 1 + 8, h->cur_pic.motion_val[list][b_xy + b_stride * 1]);
2364  AV_COPY32(mv_dst - 1 + 16, h->cur_pic.motion_val[list][b_xy + b_stride * 2]);
2365  AV_COPY32(mv_dst - 1 + 24, h->cur_pic.motion_val[list][b_xy + b_stride * 3]);
2366  ref_cache[-1 + 0] =
2367  ref_cache[-1 + 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 2 * 0]];
2368  ref_cache[-1 + 16] =
2369  ref_cache[-1 + 24] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 2 * 1]];
2370  } else {
2371  AV_ZERO32(mv_dst - 1 + 0);
2372  AV_ZERO32(mv_dst - 1 + 8);
2373  AV_ZERO32(mv_dst - 1 + 16);
2374  AV_ZERO32(mv_dst - 1 + 24);
2375  ref_cache[-1 + 0] =
2376  ref_cache[-1 + 8] =
2377  ref_cache[-1 + 16] =
2378  ref_cache[-1 + 24] = LIST_NOT_USED;
2379  }
2380  }
2381  }
2382 
2383  if (!USES_LIST(mb_type, list)) {
2384  fill_rectangle(mv_dst, 4, 4, 8, pack16to32(0, 0), 4);
2385  AV_WN32A(&ref_cache[0 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2386  AV_WN32A(&ref_cache[1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2387  AV_WN32A(&ref_cache[2 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2388  AV_WN32A(&ref_cache[3 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2389  return;
2390  }
2391 
2392  {
2393  int8_t *ref = &h->cur_pic.ref_index[list][4 * mb_xy];
2394  const int *ref2frm = &h->ref2frm[sl->slice_num & (MAX_SLICES - 1)][list][(MB_MBAFF(sl) ? 20 : 2)];
2395  uint32_t ref01 = (pack16to32(ref2frm[ref[0]], ref2frm[ref[1]]) & 0x00FF00FF) * 0x0101;
2396  uint32_t ref23 = (pack16to32(ref2frm[ref[2]], ref2frm[ref[3]]) & 0x00FF00FF) * 0x0101;
2397  AV_WN32A(&ref_cache[0 * 8], ref01);
2398  AV_WN32A(&ref_cache[1 * 8], ref01);
2399  AV_WN32A(&ref_cache[2 * 8], ref23);
2400  AV_WN32A(&ref_cache[3 * 8], ref23);
2401  }
2402 
2403  {
2404  int16_t(*mv_src)[2] = &h->cur_pic.motion_val[list][4 * sl->mb_x + 4 * sl->mb_y * b_stride];
2405  AV_COPY128(mv_dst + 8 * 0, mv_src + 0 * b_stride);
2406  AV_COPY128(mv_dst + 8 * 1, mv_src + 1 * b_stride);
2407  AV_COPY128(mv_dst + 8 * 2, mv_src + 2 * b_stride);
2408  AV_COPY128(mv_dst + 8 * 3, mv_src + 3 * b_stride);
2409  }
2410 }
2411 
2412 /**
2413  * @return non zero if the loop filter can be skipped
2414  */
2415 static int fill_filter_caches(const H264Context *h, H264SliceContext *sl, int mb_type)
2416 {
2417  const int mb_xy = sl->mb_xy;
2418  int top_xy, left_xy[LEFT_MBS];
2419  int top_type, left_type[LEFT_MBS];
2420  uint8_t *nnz;
2421  uint8_t *nnz_cache;
2422 
2423  top_xy = mb_xy - (h->mb_stride << MB_FIELD(sl));
2424 
2425  left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1;
2426  if (FRAME_MBAFF(h)) {
2427  const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.mb_type[mb_xy - 1]);
2428  const int curr_mb_field_flag = IS_INTERLACED(mb_type);
2429  if (sl->mb_y & 1) {
2430  if (left_mb_field_flag != curr_mb_field_flag)
2431  left_xy[LTOP] -= h->mb_stride;
2432  } else {
2433  if (curr_mb_field_flag)
2434  top_xy += h->mb_stride &
2435  (((h->cur_pic.mb_type[top_xy] >> 7) & 1) - 1);
2436  if (left_mb_field_flag != curr_mb_field_flag)
2437  left_xy[LBOT] += h->mb_stride;
2438  }
2439  }
2440 
2441  sl->top_mb_xy = top_xy;
2442  sl->left_mb_xy[LTOP] = left_xy[LTOP];
2443  sl->left_mb_xy[LBOT] = left_xy[LBOT];
2444  {
2445  /* For sufficiently low qp, filtering wouldn't do anything.
2446  * This is a conservative estimate: could also check beta_offset
2447  * and more accurate chroma_qp. */
2448  int qp_thresh = sl->qp_thresh; // FIXME strictly we should store qp_thresh for each mb of a slice
2449  int qp = h->cur_pic.qscale_table[mb_xy];
2450  if (qp <= qp_thresh &&
2451  (left_xy[LTOP] < 0 ||
2452  ((qp + h->cur_pic.qscale_table[left_xy[LTOP]] + 1) >> 1) <= qp_thresh) &&
2453  (top_xy < 0 ||
2454  ((qp + h->cur_pic.qscale_table[top_xy] + 1) >> 1) <= qp_thresh)) {
2455  if (!FRAME_MBAFF(h))
2456  return 1;
2457  if ((left_xy[LTOP] < 0 ||
2458  ((qp + h->cur_pic.qscale_table[left_xy[LBOT]] + 1) >> 1) <= qp_thresh) &&
2459  (top_xy < h->mb_stride ||
2460  ((qp + h->cur_pic.qscale_table[top_xy - h->mb_stride] + 1) >> 1) <= qp_thresh))
2461  return 1;
2462  }
2463  }
2464 
2465  top_type = h->cur_pic.mb_type[top_xy];
2466  left_type[LTOP] = h->cur_pic.mb_type[left_xy[LTOP]];
2467  left_type[LBOT] = h->cur_pic.mb_type[left_xy[LBOT]];
2468  if (sl->deblocking_filter == 2) {
2469  if (h->slice_table[top_xy] != sl->slice_num)
2470  top_type = 0;
2471  if (h->slice_table[left_xy[LBOT]] != sl->slice_num)
2472  left_type[LTOP] = left_type[LBOT] = 0;
2473  } else {
2474  if (h->slice_table[top_xy] == 0xFFFF)
2475  top_type = 0;
2476  if (h->slice_table[left_xy[LBOT]] == 0xFFFF)
2477  left_type[LTOP] = left_type[LBOT] = 0;
2478  }
2479  sl->top_type = top_type;
2480  sl->left_type[LTOP] = left_type[LTOP];
2481  sl->left_type[LBOT] = left_type[LBOT];
2482 
2483  if (IS_INTRA(mb_type))
2484  return 0;
2485 
2486  fill_filter_caches_inter(h, sl, mb_type, top_xy, left_xy,
2487  top_type, left_type, mb_xy, 0);
2488  if (sl->list_count == 2)
2489  fill_filter_caches_inter(h, sl, mb_type, top_xy, left_xy,
2490  top_type, left_type, mb_xy, 1);
2491 
2492  nnz = h->non_zero_count[mb_xy];
2493  nnz_cache = sl->non_zero_count_cache;
2494  AV_COPY32(&nnz_cache[4 + 8 * 1], &nnz[0]);
2495  AV_COPY32(&nnz_cache[4 + 8 * 2], &nnz[4]);
2496  AV_COPY32(&nnz_cache[4 + 8 * 3], &nnz[8]);
2497  AV_COPY32(&nnz_cache[4 + 8 * 4], &nnz[12]);
2498  sl->cbp = h->cbp_table[mb_xy];
2499 
2500  if (top_type) {
2501  nnz = h->non_zero_count[top_xy];
2502  AV_COPY32(&nnz_cache[4 + 8 * 0], &nnz[3 * 4]);
2503  }
2504 
2505  if (left_type[LTOP]) {
2506  nnz = h->non_zero_count[left_xy[LTOP]];
2507  nnz_cache[3 + 8 * 1] = nnz[3 + 0 * 4];
2508  nnz_cache[3 + 8 * 2] = nnz[3 + 1 * 4];
2509  nnz_cache[3 + 8 * 3] = nnz[3 + 2 * 4];
2510  nnz_cache[3 + 8 * 4] = nnz[3 + 3 * 4];
2511  }
2512 
2513  /* CAVLC 8x8dct requires NNZ values for residual decoding that differ
2514  * from what the loop filter needs */
2515  if (!CABAC(h) && h->ps.pps->transform_8x8_mode) {
2516  if (IS_8x8DCT(top_type)) {
2517  nnz_cache[4 + 8 * 0] =
2518  nnz_cache[5 + 8 * 0] = (h->cbp_table[top_xy] & 0x4000) >> 12;
2519  nnz_cache[6 + 8 * 0] =
2520  nnz_cache[7 + 8 * 0] = (h->cbp_table[top_xy] & 0x8000) >> 12;
2521  }
2522  if (IS_8x8DCT(left_type[LTOP])) {
2523  nnz_cache[3 + 8 * 1] =
2524  nnz_cache[3 + 8 * 2] = (h->cbp_table[left_xy[LTOP]] & 0x2000) >> 12; // FIXME check MBAFF
2525  }
2526  if (IS_8x8DCT(left_type[LBOT])) {
2527  nnz_cache[3 + 8 * 3] =
2528  nnz_cache[3 + 8 * 4] = (h->cbp_table[left_xy[LBOT]] & 0x8000) >> 12; // FIXME check MBAFF
2529  }
2530 
2531  if (IS_8x8DCT(mb_type)) {
2532  nnz_cache[scan8[0]] =
2533  nnz_cache[scan8[1]] =
2534  nnz_cache[scan8[2]] =
2535  nnz_cache[scan8[3]] = (sl->cbp & 0x1000) >> 12;
2536 
2537  nnz_cache[scan8[0 + 4]] =
2538  nnz_cache[scan8[1 + 4]] =
2539  nnz_cache[scan8[2 + 4]] =
2540  nnz_cache[scan8[3 + 4]] = (sl->cbp & 0x2000) >> 12;
2541 
2542  nnz_cache[scan8[0 + 8]] =
2543  nnz_cache[scan8[1 + 8]] =
2544  nnz_cache[scan8[2 + 8]] =
2545  nnz_cache[scan8[3 + 8]] = (sl->cbp & 0x4000) >> 12;
2546 
2547  nnz_cache[scan8[0 + 12]] =
2548  nnz_cache[scan8[1 + 12]] =
2549  nnz_cache[scan8[2 + 12]] =
2550  nnz_cache[scan8[3 + 12]] = (sl->cbp & 0x8000) >> 12;
2551  }
2552  }
2553 
2554  return 0;
2555 }
2556 
2557 static void loop_filter(const H264Context *h, H264SliceContext *sl, int start_x, int end_x)
2558 {
2559  uint8_t *dest_y, *dest_cb, *dest_cr;
2560  int linesize, uvlinesize, mb_x, mb_y;
2561  const int end_mb_y = sl->mb_y + FRAME_MBAFF(h);
2562  const int old_slice_type = sl->slice_type;
2563  const int pixel_shift = h->pixel_shift;
2564  const int block_h = 16 >> h->chroma_y_shift;
2565 
2566  if (h->postpone_filter)
2567  return;
2568 
2569  if (sl->deblocking_filter) {
2570  for (mb_x = start_x; mb_x < end_x; mb_x++)
2571  for (mb_y = end_mb_y - FRAME_MBAFF(h); mb_y <= end_mb_y; mb_y++) {
2572  int mb_xy, mb_type;
2573  mb_xy = sl->mb_xy = mb_x + mb_y * h->mb_stride;
2574  mb_type = h->cur_pic.mb_type[mb_xy];
2575 
2576  if (FRAME_MBAFF(h))
2577  sl->mb_mbaff =
2578  sl->mb_field_decoding_flag = !!IS_INTERLACED(mb_type);
2579 
2580  sl->mb_x = mb_x;
2581  sl->mb_y = mb_y;
2582  dest_y = h->cur_pic.f->data[0] +
2583  ((mb_x << pixel_shift) + mb_y * sl->linesize) * 16;
2584  dest_cb = h->cur_pic.f->data[1] +
2585  (mb_x << pixel_shift) * (8 << CHROMA444(h)) +
2586  mb_y * sl->uvlinesize * block_h;
2587  dest_cr = h->cur_pic.f->data[2] +
2588  (mb_x << pixel_shift) * (8 << CHROMA444(h)) +
2589  mb_y * sl->uvlinesize * block_h;
2590  // FIXME simplify above
2591 
2592  if (MB_FIELD(sl)) {
2593  linesize = sl->mb_linesize = sl->linesize * 2;
2594  uvlinesize = sl->mb_uvlinesize = sl->uvlinesize * 2;
2595  if (mb_y & 1) { // FIXME move out of this function?
2596  dest_y -= sl->linesize * 15;
2597  dest_cb -= sl->uvlinesize * (block_h - 1);
2598  dest_cr -= sl->uvlinesize * (block_h - 1);
2599  }
2600  } else {
2601  linesize = sl->mb_linesize = sl->linesize;
2602  uvlinesize = sl->mb_uvlinesize = sl->uvlinesize;
2603  }
2604  backup_mb_border(h, sl, dest_y, dest_cb, dest_cr, linesize,
2605  uvlinesize, 0);
2606  if (fill_filter_caches(h, sl, mb_type))
2607  continue;
2608  sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0, h->cur_pic.qscale_table[mb_xy]);
2609  sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1, h->cur_pic.qscale_table[mb_xy]);
2610 
2611  if (FRAME_MBAFF(h)) {
2612  ff_h264_filter_mb(h, sl, mb_x, mb_y, dest_y, dest_cb, dest_cr,
2613  linesize, uvlinesize);
2614  } else {
2615  ff_h264_filter_mb_fast(h, sl, mb_x, mb_y, dest_y, dest_cb,
2616  dest_cr, linesize, uvlinesize);
2617  }
2618  }
2619  }
2620  sl->slice_type = old_slice_type;
2621  sl->mb_x = end_x;
2622  sl->mb_y = end_mb_y - FRAME_MBAFF(h);
2623  sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0, sl->qscale);
2624  sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1, sl->qscale);
2625 }
2626 
2628 {
2629  const int mb_xy = sl->mb_x + sl->mb_y * h->mb_stride;
2630  int mb_type = (h->slice_table[mb_xy - 1] == sl->slice_num) ?
2631  h->cur_pic.mb_type[mb_xy - 1] :
2632  (h->slice_table[mb_xy - h->mb_stride] == sl->slice_num) ?
2633  h->cur_pic.mb_type[mb_xy - h->mb_stride] : 0;
2634  sl->mb_mbaff = sl->mb_field_decoding_flag = IS_INTERLACED(mb_type) ? 1 : 0;
2635 }
2636 
2637 /**
2638  * Draw edges and report progress for the last MB row.
2639  */
2641 {
2642  int top = 16 * (sl->mb_y >> FIELD_PICTURE(h));
2643  int pic_height = 16 * h->mb_height >> FIELD_PICTURE(h);
2644  int height = 16 << FRAME_MBAFF(h);
2645  int deblock_border = (16 + 4) << FRAME_MBAFF(h);
2646 
2647  if (sl->deblocking_filter) {
2648  if ((top + height) >= pic_height)
2649  height += deblock_border;
2650  top -= deblock_border;
2651  }
2652 
2653  if (top >= pic_height || (top + height) < 0)
2654  return;
2655 
2656  height = FFMIN(height, pic_height - top);
2657  if (top < 0) {
2658  height = top + height;
2659  top = 0;
2660  }
2661 
2662  ff_h264_draw_horiz_band(h, sl, top, height);
2663 
2664  if (h->droppable || h->er.error_occurred)
2665  return;
2666 
2667  ff_thread_report_progress(&h->cur_pic_ptr->tf, top + height - 1,
2668  h->picture_structure == PICT_BOTTOM_FIELD);
2669 }
2670 
2672  int startx, int starty,
2673  int endx, int endy, int status)
2674 {
2675  if (!sl->h264->enable_er)
2676  return;
2677 
2678  if (CONFIG_ERROR_RESILIENCE) {
2679  ff_er_add_slice(sl->er, startx, starty, endx, endy, status);
2680  }
2681 }
2682 
2683 static int decode_slice(struct AVCodecContext *avctx, void *arg)
2684 {
2685  H264SliceContext *sl = arg;
2686  const H264Context *h = sl->h264;
2687  int lf_x_start = sl->mb_x;
2688  int orig_deblock = sl->deblocking_filter;
2689  int ret;
2690 
2691  sl->linesize = h->cur_pic_ptr->f->linesize[0];
2692  sl->uvlinesize = h->cur_pic_ptr->f->linesize[1];
2693 
2694  ret = alloc_scratch_buffers(sl, sl->linesize);
2695  if (ret < 0)
2696  return ret;
2697 
2698  sl->mb_skip_run = -1;
2699 
2700  av_assert0(h->block_offset[15] == (4 * ((scan8[15] - scan8[0]) & 7) << h->pixel_shift) + 4 * sl->linesize * ((scan8[15] - scan8[0]) >> 3));
2701 
2702  if (h->postpone_filter)
2703  sl->deblocking_filter = 0;
2704 
2705  sl->is_complex = FRAME_MBAFF(h) || h->picture_structure != PICT_FRAME ||
2706  (CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY));
2707 
2708  if (!(h->avctx->active_thread_type & FF_THREAD_SLICE) && h->picture_structure == PICT_FRAME && sl->er->error_status_table) {
2709  const int start_i = av_clip(sl->resync_mb_x + sl->resync_mb_y * h->mb_width, 0, h->mb_num - 1);
2710  if (start_i) {
2711  int prev_status = sl->er->error_status_table[sl->er->mb_index2xy[start_i - 1]];
2712  prev_status &= ~ VP_START;
2713  if (prev_status != (ER_MV_END | ER_DC_END | ER_AC_END))
2714  sl->er->error_occurred = 1;
2715  }
2716  }
2717 
2718  if (h->ps.pps->cabac) {
2719  /* realign */
2720  align_get_bits(&sl->gb);
2721 
2722  /* init cabac */
2724  sl->gb.buffer + get_bits_count(&sl->gb) / 8,
2725  (get_bits_left(&sl->gb) + 7) / 8);
2726  if (ret < 0)
2727  return ret;
2728 
2730 
2731  for (;;) {
2732  int ret, eos;
2733  if (sl->mb_x + sl->mb_y * h->mb_width >= sl->next_slice_idx) {
2734  av_log(h->avctx, AV_LOG_ERROR, "Slice overlaps with next at %d\n",
2735  sl->next_slice_idx);
2736  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2737  sl->mb_y, ER_MB_ERROR);
2738  return AVERROR_INVALIDDATA;
2739  }
2740 
2741  ret = ff_h264_decode_mb_cabac(h, sl);
2742 
2743  if (ret >= 0)
2744  ff_h264_hl_decode_mb(h, sl);
2745 
2746  // FIXME optimal? or let mb_decode decode 16x32 ?
2747  if (ret >= 0 && FRAME_MBAFF(h)) {
2748  sl->mb_y++;
2749 
2750  ret = ff_h264_decode_mb_cabac(h, sl);
2751 
2752  if (ret >= 0)
2753  ff_h264_hl_decode_mb(h, sl);
2754  sl->mb_y--;
2755  }
2756  eos = get_cabac_terminate(&sl->cabac);
2757 
2758  if ((h->workaround_bugs & FF_BUG_TRUNCATED) &&
2759  sl->cabac.bytestream > sl->cabac.bytestream_end + 2) {
2760  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x - 1,
2761  sl->mb_y, ER_MB_END);
2762  if (sl->mb_x >= lf_x_start)
2763  loop_filter(h, sl, lf_x_start, sl->mb_x + 1);
2764  goto finish;
2765  }
2766  if (sl->cabac.bytestream > sl->cabac.bytestream_end + 2 )
2767  av_log(h->avctx, AV_LOG_DEBUG, "bytestream overread %"PTRDIFF_SPECIFIER"\n", sl->cabac.bytestream_end - sl->cabac.bytestream);
2768  if (ret < 0 || sl->cabac.bytestream > sl->cabac.bytestream_end + 4) {
2769  av_log(h->avctx, AV_LOG_ERROR,
2770  "error while decoding MB %d %d, bytestream %"PTRDIFF_SPECIFIER"\n",
2771  sl->mb_x, sl->mb_y,
2772  sl->cabac.bytestream_end - sl->cabac.bytestream);
2773  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2774  sl->mb_y, ER_MB_ERROR);
2775  return AVERROR_INVALIDDATA;
2776  }
2777 
2778  if (++sl->mb_x >= h->mb_width) {
2779  loop_filter(h, sl, lf_x_start, sl->mb_x);
2780  sl->mb_x = lf_x_start = 0;
2781  decode_finish_row(h, sl);
2782  ++sl->mb_y;
2783  if (FIELD_OR_MBAFF_PICTURE(h)) {
2784  ++sl->mb_y;
2785  if (FRAME_MBAFF(h) && sl->mb_y < h->mb_height)
2787  }
2788  }
2789 
2790  if (eos || sl->mb_y >= h->mb_height) {
2791  ff_tlog(h->avctx, "slice end %d %d\n",
2792  get_bits_count(&sl->gb), sl->gb.size_in_bits);
2793  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x - 1,
2794  sl->mb_y, ER_MB_END);
2795  if (sl->mb_x > lf_x_start)
2796  loop_filter(h, sl, lf_x_start, sl->mb_x);
2797  goto finish;
2798  }
2799  }
2800  } else {
2801  for (;;) {
2802  int ret;
2803 
2804  if (sl->mb_x + sl->mb_y * h->mb_width >= sl->next_slice_idx) {
2805  av_log(h->avctx, AV_LOG_ERROR, "Slice overlaps with next at %d\n",
2806  sl->next_slice_idx);
2807  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2808  sl->mb_y, ER_MB_ERROR);
2809  return AVERROR_INVALIDDATA;
2810  }
2811 
2812  ret = ff_h264_decode_mb_cavlc(h, sl);
2813 
2814  if (ret >= 0)
2815  ff_h264_hl_decode_mb(h, sl);
2816 
2817  // FIXME optimal? or let mb_decode decode 16x32 ?
2818  if (ret >= 0 && FRAME_MBAFF(h)) {
2819  sl->mb_y++;
2820  ret = ff_h264_decode_mb_cavlc(h, sl);
2821 
2822  if (ret >= 0)
2823  ff_h264_hl_decode_mb(h, sl);
2824  sl->mb_y--;
2825  }
2826 
2827  if (ret < 0) {
2828  av_log(h->avctx, AV_LOG_ERROR,
2829  "error while decoding MB %d %d\n", sl->mb_x, sl->mb_y);
2830  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2831  sl->mb_y, ER_MB_ERROR);
2832  return ret;
2833  }
2834 
2835  if (++sl->mb_x >= h->mb_width) {
2836  loop_filter(h, sl, lf_x_start, sl->mb_x);
2837  sl->mb_x = lf_x_start = 0;
2838  decode_finish_row(h, sl);
2839  ++sl->mb_y;
2840  if (FIELD_OR_MBAFF_PICTURE(h)) {
2841  ++sl->mb_y;
2842  if (FRAME_MBAFF(h) && sl->mb_y < h->mb_height)
2844  }
2845  if (sl->mb_y >= h->mb_height) {
2846  ff_tlog(h->avctx, "slice end %d %d\n",
2847  get_bits_count(&sl->gb), sl->gb.size_in_bits);
2848 
2849  if ( get_bits_left(&sl->gb) == 0
2850  || get_bits_left(&sl->gb) > 0 && !(h->avctx->err_recognition & AV_EF_AGGRESSIVE)) {
2851  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
2852  sl->mb_x - 1, sl->mb_y, ER_MB_END);
2853 
2854  goto finish;
2855  } else {
2856  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
2857  sl->mb_x, sl->mb_y, ER_MB_END);
2858 
2859  return AVERROR_INVALIDDATA;
2860  }
2861  }
2862  }
2863 
2864  if (get_bits_left(&sl->gb) <= 0 && sl->mb_skip_run <= 0) {
2865  ff_tlog(h->avctx, "slice end %d %d\n",
2866  get_bits_count(&sl->gb), sl->gb.size_in_bits);
2867 
2868  if (get_bits_left(&sl->gb) == 0) {
2869  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
2870  sl->mb_x - 1, sl->mb_y, ER_MB_END);
2871  if (sl->mb_x > lf_x_start)
2872  loop_filter(h, sl, lf_x_start, sl->mb_x);
2873 
2874  goto finish;
2875  } else {
2876  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2877  sl->mb_y, ER_MB_ERROR);
2878 
2879  return AVERROR_INVALIDDATA;
2880  }
2881  }
2882  }
2883  }
2884 
2885 finish:
2886  sl->deblocking_filter = orig_deblock;
2887  return 0;
2888 }
2889 
2890 /**
2891  * Call decode_slice() for each context.
2892  *
2893  * @param h h264 master context
2894  */
2896 {
2897  AVCodecContext *const avctx = h->avctx;
2898  H264SliceContext *sl;
2899  int context_count = h->nb_slice_ctx_queued;
2900  int ret = 0;
2901  int i, j;
2902 
2903  h->slice_ctx[0].next_slice_idx = INT_MAX;
2904 
2905  if (h->avctx->hwaccel || context_count < 1)
2906  return 0;
2907 
2908  av_assert0(context_count && h->slice_ctx[context_count - 1].mb_y < h->mb_height);
2909 
2910  if (context_count == 1) {
2911 
2912  h->slice_ctx[0].next_slice_idx = h->mb_width * h->mb_height;
2913  h->postpone_filter = 0;
2914 
2915  ret = decode_slice(avctx, &h->slice_ctx[0]);
2916  h->mb_y = h->slice_ctx[0].mb_y;
2917  if (ret < 0)
2918  goto finish;
2919  } else {
2920  av_assert0(context_count > 0);
2921  for (i = 0; i < context_count; i++) {
2922  int next_slice_idx = h->mb_width * h->mb_height;
2923  int slice_idx;
2924 
2925  sl = &h->slice_ctx[i];
2926 
2927  /* make sure none of those slices overlap */
2928  slice_idx = sl->mb_y * h->mb_width + sl->mb_x;
2929  for (j = 0; j < context_count; j++) {
2930  H264SliceContext *sl2 = &h->slice_ctx[j];
2931  int slice_idx2 = sl2->mb_y * h->mb_width + sl2->mb_x;
2932 
2933  if (i == j || slice_idx2 < slice_idx)
2934  continue;
2935  next_slice_idx = FFMIN(next_slice_idx, slice_idx2);
2936  }
2937  sl->next_slice_idx = next_slice_idx;
2938  }
2939 
2940  avctx->execute(avctx, decode_slice, h->slice_ctx,
2941  NULL, context_count, sizeof(h->slice_ctx[0]));
2942 
2943  /* pull back stuff from slices to master context */
2944  sl = &h->slice_ctx[context_count - 1];
2945  h->mb_y = sl->mb_y;
2946 
2947  if (h->postpone_filter) {
2948  h->postpone_filter = 0;
2949 
2950  for (i = 0; i < context_count; i++) {
2951  int y_end, x_end;
2952 
2953  sl = &h->slice_ctx[i];
2954  y_end = FFMIN(sl->mb_y + 1, h->mb_height);
2955  x_end = (sl->mb_y >= h->mb_height) ? h->mb_width : sl->mb_x;
2956 
2957  for (j = sl->resync_mb_y; j < y_end; j += 1 + FIELD_OR_MBAFF_PICTURE(h)) {
2958  sl->mb_y = j;
2959  loop_filter(h, sl, j > sl->resync_mb_y ? 0 : sl->resync_mb_x,
2960  j == y_end - 1 ? x_end : h->mb_width);
2961  }
2962  }
2963  }
2964  }
2965 
2966 finish:
2967  h->nb_slice_ctx_queued = 0;
2968  return ret;
2969 }
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:38
er_add_slice
static void er_add_slice(H264SliceContext *sl, int startx, int starty, int endx, int endy, int status)
Definition: h264_slice.c:2671
ff_h264_filter_mb_fast
void ff_h264_filter_mb_fast(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
Definition: h264_loopfilter.c:416
h264_slice_header_init
static int h264_slice_header_init(H264Context *h)
Definition: h264_slice.c:949
implicit_weight_table
static void implicit_weight_table(const H264Context *h, H264SliceContext *sl, int field)
Initialize implicit_weight table.
Definition: h264_slice.c:693
H264SliceContext::mb_xy
int mb_xy
Definition: h264dec.h:224
ff_h264_unref_picture
void ff_h264_unref_picture(H264Context *h, H264Picture *pic)
Definition: h264_picture.c:36
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(size_t size, AVBufferRef *(*alloc)(size_t size))
Allocate and initialize a buffer pool.
Definition: buffer.c:280
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_TIMECODE_STR_SIZE
#define AV_TIMECODE_STR_SIZE
Definition: timecode.h:33
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:253
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
H264SliceContext::ref_cache
int8_t ref_cache[2][5 *8]
Definition: h264dec.h:292
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
ff_h264_free_tables
void ff_h264_free_tables(H264Context *h)
Definition: h264dec.c:134
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
H264SEIDisplayOrientation::hflip
int hflip
Definition: h264_sei.h:147
AV_STEREO3D_VIEW_LEFT
@ AV_STEREO3D_VIEW_LEFT
Frame contains only the left view.
Definition: stereo3d.h:153
av_clip
#define av_clip
Definition: common.h:95
h264_init_ps
static int h264_init_ps(H264Context *h, const H264SliceContext *sl, int first_slice)
Definition: h264_slice.c:1042
H264SEIFilmGrainCharacteristics::blending_mode_id
int blending_mode_id
Definition: h264_sei.h:178
H264SliceContext::max_pic_num
int max_pic_num
Definition: h264dec.h:324
H264SliceContext::nb_mmco
int nb_mmco
Definition: h264dec.h:315
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:839
CHROMA422
#define CHROMA422(h)
Definition: h264dec.h:92
FF_BUG_TRUNCATED
#define FF_BUG_TRUNCATED
Definition: avcodec.h:1294
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
cabac.h
AV_STEREO3D_SIDEBYSIDE_QUINCUNX
@ AV_STEREO3D_SIDEBYSIDE_QUINCUNX
Views are next to each other, but when upscaling apply a checkerboard pattern.
Definition: stereo3d.h:114
H264Picture::poc
int poc
frame POC
Definition: h264dec.h:129
h264_export_frame_props
static int h264_export_frame_props(H264Context *h)
Definition: h264_slice.c:1161
ff_thread_release_ext_buffer
void ff_thread_release_ext_buffer(AVCodecContext *avctx, ThreadFrame *f)
Unref a ThreadFrame.
Definition: pthread_frame.c:1178
H264Picture::f
AVFrame * f
Definition: h264dec.h:108
out
FILE * out
Definition: movenc.c:54
cb
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:239
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:679
av_clip_int8
#define av_clip_int8
Definition: common.h:104
zigzag_scan8x8_cavlc
static const uint8_t zigzag_scan8x8_cavlc[64+1]
Definition: h264_slice.c:100
ff_h264_replace_picture
int ff_h264_replace_picture(H264Context *h, H264Picture *dst, const H264Picture *src)
Definition: h264_picture.c:145
AVFilmGrainH274Params::color_space
enum AVColorSpace color_space
Definition: film_grain_params.h:152
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:59
ff_thread_can_start_frame
int ff_thread_can_start_frame(AVCodecContext *avctx)
Definition: pthread_frame.c:971
H264Picture::ref_index
int8_t * ref_index[2]
Definition: h264dec.h:126
HWACCEL_MAX
#define HWACCEL_MAX
AVFrame::coded_picture_number
int coded_picture_number
picture number in bitstream order
Definition: frame.h:452
MB_MBAFF
#define MB_MBAFF(h)
Definition: h264dec.h:65
H264SliceContext::mvd_table
uint8_t(*[2] mvd_table)[2]
Definition: h264dec.h:305
ff_h264_set_erpic
void ff_h264_set_erpic(ERPicture *dst, H264Picture *src)
Definition: h264_picture.c:196
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
H264_SEI_PIC_STRUCT_TOP_BOTTOM
@ H264_SEI_PIC_STRUCT_TOP_BOTTOM
3: top field, bottom field, in that order
Definition: h264_sei.h:34
AVFilmGrainH274Params::blending_mode_id
int blending_mode_id
Specifies the blending mode used to blend the simulated film grain with the decoded images.
Definition: film_grain_params.h:160
H264Picture::pps
const PPS * pps
Definition: h264dec.h:153
AV_FRAME_DATA_S12M_TIMECODE
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:152
av_mod_uintp2
#define av_mod_uintp2
Definition: common.h:122
GetBitContext::size_in_bits
int size_in_bits
Definition: get_bits.h:68
H2645NAL::ref_idc
int ref_idc
H.264 only, nal_ref_idc.
Definition: h2645_parse.h:57
ff_h264_slice_context_init
void ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl)
Init slice context.
Definition: h264dec.c:258
ERContext::mb_index2xy
int * mb_index2xy
Definition: error_resilience.h:59
predict_field_decoding_flag
static void predict_field_decoding_flag(const H264Context *h, H264SliceContext *sl)
Definition: h264_slice.c:2627
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
pixdesc.h
AVFrame::width
int width
Definition: frame.h:397
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:661
get_ue_golomb
static int get_ue_golomb(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to 8190.
Definition: golomb.h:53
av_display_matrix_flip
void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip)
Flip the input matrix horizontally and/or vertically.
Definition: display.c:66
internal.h
ff_h264_update_thread_context
int ff_h264_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: h264_slice.c:309
alloc_scratch_buffers
static int alloc_scratch_buffers(H264SliceContext *sl, int linesize)
Definition: h264_slice.c:132
H264_MAX_DPB_FRAMES
@ H264_MAX_DPB_FRAMES
Definition: h264.h:76
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:561
FRAME_RECOVERED_IDR
#define FRAME_RECOVERED_IDR
We have seen an IDR, so all the following frames in coded order are correctly decodable.
Definition: h264dec.h:516
decode_finish_row
static void decode_finish_row(const H264Context *h, H264SliceContext *sl)
Draw edges and report progress for the last MB row.
Definition: h264_slice.c:2640
H264SliceContext::ref_count
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264dec.h:260
H264SEIFilmGrainCharacteristics::color_primaries
int color_primaries
Definition: h264_sei.h:175
FF_COMPLIANCE_STRICT
#define FF_COMPLIANCE_STRICT
Strictly conform to all the things in the spec no matter what consequences.
Definition: defs.h:59
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:459
ff_er_frame_start
void ff_er_frame_start(ERContext *s)
Definition: error_resilience.c:787
H264_SEI_FPA_TYPE_CHECKERBOARD
@ H264_SEI_FPA_TYPE_CHECKERBOARD
Definition: h264_sei.h:46
H264Picture::qscale_table
int8_t * qscale_table
Definition: h264dec.h:114
H264SliceContext::left_mb_xy
int left_mb_xy[LEFT_MBS]
Definition: h264dec.h:204
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:247
H264PredWeightTable::use_weight_chroma
int use_weight_chroma
Definition: h264_parse.h:71
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:37
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
Definition: pixfmt.h:588
AV_WN32A
#define AV_WN32A(p, v)
Definition: intreadwrite.h:538
ff_h264_update_thread_context_for_user
int ff_h264_update_thread_context_for_user(AVCodecContext *dst, const AVCodecContext *src)
Definition: h264_slice.c:477
AVFilmGrainH274Params::color_range
enum AVColorRange color_range
Definition: film_grain_params.h:149
ff_er_add_slice
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
Definition: error_resilience.c:822
av_display_rotation_set
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure clockwise rotation by the specified angle (in de...
Definition: display.c:51
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
H264Picture::ref_index_buf
AVBufferRef * ref_index_buf[2]
Definition: h264dec.h:125
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
ff_h264_pred_weight_table
int ff_h264_pred_weight_table(GetBitContext *gb, const SPS *sps, const int *ref_count, int slice_type_nos, H264PredWeightTable *pwt, int picture_structure, void *logctx)
Definition: h264_parse.c:29
FRAME_RECOVERED_SEI
#define FRAME_RECOVERED_SEI
Sufficient number of frames have been decoded since a SEI recovery point, so all the following frames...
Definition: h264dec.h:521
H264SliceContext::is_complex
int is_complex
Definition: h264dec.h:231
ER_DC_END
#define ER_DC_END
Definition: error_resilience.h:35
ff_h264_decode_ref_pic_list_reordering
int ff_h264_decode_ref_pic_list_reordering(H264SliceContext *sl, void *logctx)
Definition: h264_refs.c:422
mpegutils.h
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:525
H264Picture::invalid_gap
int invalid_gap
Definition: h264dec.h:148
av_timecode_get_smpte
uint32_t av_timecode_get_smpte(AVRational rate, int drop, int hh, int mm, int ss, int ff)
Convert sei info to SMPTE 12M binary representation.
Definition: timecode.c:69
AV_STEREO3D_VIEW_RIGHT
@ AV_STEREO3D_VIEW_RIGHT
Frame contains only the right view.
Definition: stereo3d.h:158
H264Picture::pps_buf
AVBufferRef * pps_buf
Definition: h264dec.h:152
thread.h
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
ThreadFrame::f
AVFrame * f
Definition: threadframe.h:28
AVFilmGrainParams::seed
uint64_t seed
Seed to use for the synthesis process, if the codec allows for it.
Definition: film_grain_params.h:228
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1328
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:346
H264SliceContext::mb_x
int mb_x
Definition: h264dec.h:223
H264Picture::frame_num
int frame_num
frame_num (raw frame_num from slice header)
Definition: h264dec.h:130
H264SliceContext::next_slice_idx
int next_slice_idx
Definition: h264dec.h:229
H264SliceContext
Definition: h264dec.h:170
fill_filter_caches_inter
static av_always_inline void fill_filter_caches_inter(const H264Context *h, H264SliceContext *sl, int mb_type, int top_xy, int left_xy[LEFT_MBS], int top_type, int left_type[LEFT_MBS], int mb_xy, int list)
Definition: h264_slice.c:2331
golomb.h
exp golomb vlc stuff
MB_FIELD
#define MB_FIELD(sl)
Definition: h264dec.h:66
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:64
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
ff_h264_filter_mb
void ff_h264_filter_mb(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
Definition: h264_loopfilter.c:716
H264SliceContext::mv_cache
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
Definition: h264dec.h:291
AV_CODEC_FLAG_OUTPUT_CORRUPT
#define AV_CODEC_FLAG_OUTPUT_CORRUPT
Output even those frames that might be corrupted.
Definition: avcodec.h:224
AVHWAccel
Definition: avcodec.h:2070
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:477
finish
static void finish(void)
Definition: movenc.c:342
get_chroma_qp
static av_always_inline int get_chroma_qp(const PPS *pps, int t, int qscale)
Get the chroma qp.
Definition: h264dec.h:647
H264Picture::mmco_reset
int mmco_reset
MMCO_RESET set this 1.
Definition: h264dec.h:131
fail
#define fail()
Definition: checkasm.h:134
copy_picture_range
static void copy_picture_range(H264Picture **to, H264Picture **from, int count, H264Context *new_base, H264Context *old_base)
Definition: h264_slice.c:293
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:52
H264SEIA53Caption
Definition: h264_sei.h:107
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:475
timecode.h
h264_select_output_frame
static int h264_select_output_frame(H264Context *h)
Definition: h264_slice.c:1438
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:457
USES_LIST
#define USES_LIST(a, list)
Definition: mpegutils.h:92
CABACContext::bytestream
const uint8_t * bytestream
Definition: cabac.h:45
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:417
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2886
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
H264Picture::mb_stride
int mb_stride
Definition: h264dec.h:156
IN_RANGE
#define IN_RANGE(a, b, size)
Definition: h264_slice.c:286
scan8
static const uint8_t scan8[16 *3+3]
Definition: h264_parse.h:40
ff_h264_flush_change
void ff_h264_flush_change(H264Context *h)
Definition: h264dec.c:445
ff_h264qpel_init
av_cold void ff_h264qpel_init(H264QpelContext *c, int bit_depth)
Definition: h264qpel.c:49
ff_h264_sei_process_picture_timing
int ff_h264_sei_process_picture_timing(H264SEIPictureTiming *h, const SPS *sps, void *logctx)
Parse the contents of a picture timing message given an active SPS.
Definition: h264_sei.c:69
h264_frame_start
static int h264_frame_start(H264Context *h)
Definition: h264_slice.c:489
H264SliceContext::deblocking_filter
int deblocking_filter
disable_deblocking_filter_idc with 1 <-> 0
Definition: h264dec.h:186
H264PredWeightTable::luma_log2_weight_denom
int luma_log2_weight_denom
Definition: h264_parse.h:72
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:260
H264Picture::f_grain
AVFrame * f_grain
Definition: h264dec.h:111
H264SliceContext::picture_structure
int picture_structure
Definition: h264dec.h:233
ff_h264_golomb_to_pict_type
const uint8_t ff_h264_golomb_to_pict_type[5]
Definition: h264data.c:37
release_unused_pictures
static void release_unused_pictures(H264Context *h, int remove_current)
Definition: h264_slice.c:119
H264PredWeightTable::use_weight
int use_weight
Definition: h264_parse.h:70
H264_SEI_FPA_TYPE_SIDE_BY_SIDE
@ H264_SEI_FPA_TYPE_SIDE_BY_SIDE
Definition: h264_sei.h:49
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
H264SliceContext::direct_spatial_mv_pred
int direct_spatial_mv_pred
Definition: h264dec.h:244
H264SliceContext::slice_num
int slice_num
Definition: h264dec.h:175
pack16to32
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
Definition: h264_parse.h:127
H264_SEI_FPA_TYPE_INTERLEAVE_TEMPORAL
@ H264_SEI_FPA_TYPE_INTERLEAVE_TEMPORAL
Definition: h264_sei.h:51
AVFilmGrainH274Params::intensity_interval_upper_bound
uint8_t intensity_interval_upper_bound[3][256]
Specifies the upper bound of each intensity interval for which the set of model values applies for th...
Definition: film_grain_params.h:194
non_j_pixfmt
static enum AVPixelFormat non_j_pixfmt(enum AVPixelFormat a)
Definition: h264_slice.c:1031
AVFilmGrainH274Params::bit_depth_luma
int bit_depth_luma
Specifies the bit depth used for the luma component.
Definition: film_grain_params.h:142
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:462
ff_h264_init_cabac_states
void ff_h264_init_cabac_states(const H264Context *h, H264SliceContext *sl)
Definition: h264_cabac.c:1262
ff_h264_hl_decode_mb
void ff_h264_hl_decode_mb(const H264Context *h, H264SliceContext *sl)
Definition: h264_mb.c:799
avassert.h
AV_STEREO3D_FRAMESEQUENCE
@ AV_STEREO3D_FRAMESEQUENCE
Views are alternated temporally.
Definition: stereo3d.h:89
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
film_grain_params.h
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
ff_color_frame
void ff_color_frame(AVFrame *frame, const int color[4])
Definition: utils.c:403
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:624
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
av_buffer_pool_get
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:384
ff_h264_queue_decode_slice
int ff_h264_queue_decode_slice(H264Context *h, const H2645NAL *nal)
Submit a slice for decoding.
Definition: h264_slice.c:2188
width
#define width
H264Context::DPB
H264Picture DPB[H264_MAX_PICTURE_COUNT]
Definition: h264dec.h:339
AV_STEREO3D_LINES
@ AV_STEREO3D_LINES
Views are packed per line, as if interlaced.
Definition: stereo3d.h:126
H264SEIFilmGrainCharacteristics::present
int present
Definition: h264_sei.h:169
stereo3d.h
H264_SEI_FPA_TYPE_TOP_BOTTOM
@ H264_SEI_FPA_TYPE_TOP_BOTTOM
Definition: h264_sei.h:50
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:127
H264SEIA53Caption::buf_ref
AVBufferRef * buf_ref
Definition: h264_sei.h:108
H264PredWeightTable::chroma_log2_weight_denom
int chroma_log2_weight_denom
Definition: h264_parse.h:73
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:50
AV_ZERO32
#define AV_ZERO32(d)
Definition: intreadwrite.h:629
H264_NAL_IDR_SLICE
@ H264_NAL_IDR_SLICE
Definition: h264.h:39
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:376
FIELD_PICTURE
#define FIELD_PICTURE(h)
Definition: h264dec.h:68
av_film_grain_params_create_side_data
AVFilmGrainParams * av_film_grain_params_create_side_data(AVFrame *frame)
Allocate a complete AVFilmGrainParams and add it to the frame.
Definition: film_grain_params.c:31
ff_h264_execute_ref_pic_marking
int ff_h264_execute_ref_pic_marking(H264Context *h)
Execute the reference picture marking (memory management control operations).
Definition: h264_refs.c:609
ff_h264_decode_ref_pic_marking
int ff_h264_decode_ref_pic_marking(H264SliceContext *sl, GetBitContext *gb, const H2645NAL *nal, void *logctx)
Definition: h264_refs.c:833
from
const char * from
Definition: jacosubdec.c:66
to
const char * to
Definition: webvttdec.c:35
h264_slice_header_parse
static int h264_slice_header_parse(const H264Context *h, H264SliceContext *sl, const H2645NAL *nal)
Definition: h264_slice.c:1833
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
H264PredWeightTable::chroma_weight_flag
int chroma_weight_flag[2]
7.4.3.2 chroma_weight_lX_flag
Definition: h264_parse.h:75
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
h264data.h
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:456
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
H264Ref::parent
H264Picture * parent
Definition: h264dec.h:167
PICT_TOP_FIELD
#define PICT_TOP_FIELD
Definition: mpegutils.h:36
decode.h
field_scan8x8_cavlc
static const uint8_t field_scan8x8_cavlc[64+1]
Definition: h264_slice.c:80
H264SliceContext::slice_alpha_c0_offset
int slice_alpha_c0_offset
Definition: h264dec.h:187
H264SEIFilmGrainCharacteristics::intensity_interval_lower_bound
uint8_t intensity_interval_lower_bound[3][256]
Definition: h264_sei.h:183
IS_INTRA
#define IS_INTRA(x, y)
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
AVFrame::crop_right
size_t crop_right
Definition: frame.h:688
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
AVFilmGrainH274Params::comp_model_value
int16_t comp_model_value[3][256][6]
Specifies the model values for the component for each intensity interval.
Definition: film_grain_params.h:205
H264SliceContext::slice_type
int slice_type
Definition: h264dec.h:176
H264SliceContext::resync_mb_x
int resync_mb_x
Definition: h264dec.h:225
H264Picture::sei_recovery_frame_cnt
int sei_recovery_frame_cnt
Definition: h264dec.h:149
AVCOL_PRI_UNSPECIFIED
@ AVCOL_PRI_UNSPECIFIED
Definition: pixfmt.h:536
AVDISCARD_BIDIR
@ AVDISCARD_BIDIR
discard all bidirectional frames
Definition: defs.h:73
get_se_golomb
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:239
H2645NAL::type
int type
NAL unit type.
Definition: h2645_parse.h:52
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:79
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
H264Context::enable_er
int enable_er
Definition: h264dec.h:544
ff_h264_draw_horiz_band
void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height)
Definition: h264dec.c:99
H264SliceContext::curr_pic_num
int curr_pic_num
Definition: h264dec.h:323
ff_thread_ref_frame
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
Definition: utils.c:885
FF_CODEC_PROPERTY_FILM_GRAIN
#define FF_CODEC_PROPERTY_FILM_GRAIN
Definition: avcodec.h:1850
arg
const char * arg
Definition: jacosubdec.c:67
AVStereo3D::flags
int flags
Additional information about the frame packing.
Definition: stereo3d.h:182
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:64
if
if(ret)
Definition: filter_design.txt:179
AVFilmGrainH274Params::model_id
int model_id
Specifies the film grain simulation mode.
Definition: film_grain_params.h:137
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:76
threadframe.h
GetBitContext::buffer
const uint8_t * buffer
Definition: get_bits.h:62
alloc_picture
static int alloc_picture(H264Context *h, H264Picture *pic)
Definition: h264_slice.c:191
H264Picture::motion_val_buf
AVBufferRef * motion_val_buf[2]
Definition: h264dec.h:116
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:149
NULL
#define NULL
Definition: coverity.c:32
AV_COPY128
#define AV_COPY128(d, s)
Definition: intreadwrite.h:609
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AV_COPY64
#define AV_COPY64(d, s)
Definition: intreadwrite.h:605
H264SliceContext::edge_emu_buffer
uint8_t * edge_emu_buffer
Definition: h264dec.h:276
H264SEIUnregistered
Definition: h264_sei.h:111
SPS
Sequence parameter set.
Definition: h264_ps.h:44
TRANSPOSE
#define TRANSPOSE(x)
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
ER_MB_ERROR
#define ER_MB_ERROR
Definition: error_resilience.h:38
ff_h264_decode_mb_cabac
int ff_h264_decode_mb_cabac(const H264Context *h, H264SliceContext *sl)
Decode a macroblock.
Definition: h264_cabac.c:1920
AV_PICTURE_TYPE_SI
@ AV_PICTURE_TYPE_SI
Switching Intra.
Definition: avutil.h:278
H264SliceContext::chroma_qp
int chroma_qp[2]
Definition: h264dec.h:181
AV_CODEC_FLAG2_FAST
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:303
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
H264SEIFilmGrainCharacteristics::repetition_period
int repetition_period
Definition: h264_sei.h:186
av_buffer_pool_uninit
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:322
PPS
Picture parameter set.
Definition: h264_ps.h:111
av_fast_mallocz
void av_fast_mallocz(void *ptr, unsigned int *size, size_t min_size)
Allocate and clear a buffer, reusing the given one if large enough.
Definition: mem.c:570
ff_set_sar
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:106
mathops.h
list
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
Definition: filter_design.txt:25
IS_INTERLACED
#define IS_INTERLACED(a)
Definition: mpegutils.h:76
double
double
Definition: af_crystalizer.c:132
av_frame_new_side_data_from_buf
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
Definition: frame.c:647
H264Picture::mb_height
int mb_height
Definition: h264dec.h:155
MAX_PPS_COUNT
#define MAX_PPS_COUNT
Definition: h264_ps.h:38
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:460
H264SliceContext::qscale
int qscale
Definition: h264dec.h:180
get_pixel_format
static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
Definition: h264_slice.c:791
fill_filter_caches
static int fill_filter_caches(const H264Context *h, H264SliceContext *sl, int mb_type)
Definition: h264_slice.c:2415
ERContext::error_occurred
int error_occurred
Definition: error_resilience.h:66
fp
#define fp
Definition: regdef.h:44
AV_ZERO128
#define AV_ZERO128(d)
Definition: intreadwrite.h:637
init_scan_tables
static void init_scan_tables(H264Context *h)
initialize scan tables
Definition: h264_slice.c:757
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:474
H264SliceContext::top_borders_allocated
int top_borders_allocated[2]
Definition: h264dec.h:280
AV_FRAME_DATA_AFD
@ AV_FRAME_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:90
AV_PICTURE_TYPE_SP
@ AV_PICTURE_TYPE_SP
Switching Predicted.
Definition: avutil.h:279
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:627
FIELD_OR_MBAFF_PICTURE
#define FIELD_OR_MBAFF_PICTURE(h)
Definition: h264dec.h:85
H264SliceContext::mb_skip_run
int mb_skip_run
Definition: h264dec.h:230
h264_ps.h
init_dimensions
static void init_dimensions(H264Context *h)
Definition: h264_slice.c:909
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_FRAME_DATA_SEI_UNREGISTERED
@ AV_FRAME_DATA_SEI_UNREGISTERED
User data unregistered metadata associated with a video frame.
Definition: frame.h:178
H264SliceContext::top_type
int top_type
Definition: h264dec.h:207
AVFrame::crop_bottom
size_t crop_bottom
Definition: frame.h:686
H264SliceContext::resync_mb_y
int resync_mb_y
Definition: h264dec.h:226
H264_SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM
@ H264_SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM
6: bottom field, top field, bottom field repeated, in that order
Definition: h264_sei.h:37
DELAYED_PIC_REF
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output.
Definition: diracdec.c:67
H264SEIPictureTiming
Definition: h264_sei.h:66
H264SliceContext::cabac
CABACContext cabac
Cabac.
Definition: h264dec.h:310
H264SliceContext::redundant_pic_count
int redundant_pic_count
Definition: h264dec.h:237
AVFrame::crop_left
size_t crop_left
Definition: frame.h:687
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:75
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:422
ff_zigzag_scan
const uint8_t ff_zigzag_scan[16+1]
Definition: mathtables.c:109
AV_STEREO3D_CHECKERBOARD
@ AV_STEREO3D_CHECKERBOARD
Views are packed in a checkerboard-like structure per pixel.
Definition: stereo3d.h:101
H264Picture::reference
int reference
Definition: h264dec.h:146
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:259
CABAC
#define CABAC(h)
Definition: h264_cabac.c:28
LEFT_MBS
#define LEFT_MBS
Definition: h264dec.h:69
pps
static int FUNC() pps(CodedBitstreamContext *ctx, RWContext *rw, H264RawPPS *current)
Definition: cbs_h264_syntax_template.c:404
AVFilmGrainH274Params::component_model_present
int component_model_present[3]
Indicates if the modelling of film grain for a given component is present.
Definition: film_grain_params.h:170
H264SEIFramePacking
Definition: h264_sei.h:133
rectangle.h
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
AVFilmGrainParams::codec
union AVFilmGrainParams::@317 codec
Additional fields may be added both here and in any structure included.
H264SliceContext::mb_uvlinesize
ptrdiff_t mb_uvlinesize
Definition: h264dec.h:221
VP_START
#define VP_START
< current MB is the first after a resync marker
Definition: error_resilience.h:30
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:464
H264SliceContext::pwt
H264PredWeightTable pwt
Definition: h264dec.h:190
H264SEIFilmGrainCharacteristics::bit_depth_chroma
int bit_depth_chroma
Definition: h264_sei.h:173
H264Picture::tf
ThreadFrame tf
Definition: h264dec.h:109
H264Picture::mb_type
uint32_t * mb_type
Definition: h264dec.h:120
ff_h264_decode_mb_cavlc
int ff_h264_decode_mb_cavlc(const H264Context *h, H264SliceContext *sl)
Decode a macroblock.
Definition: h264_cavlc.c:695
H264_SEI_PIC_STRUCT_BOTTOM_TOP
@ H264_SEI_PIC_STRUCT_BOTTOM_TOP
4: bottom field, top field, in that order
Definition: h264_sei.h:35
H264Picture::recovered
int recovered
picture at IDR or recovery point + recovery count
Definition: h264dec.h:147
H2645NAL::gb
GetBitContext gb
Definition: h2645_parse.h:47
H264SliceContext::top_mb_xy
int top_mb_xy
Definition: h264dec.h:202
H264SliceContext::qp_thresh
int qp_thresh
QP threshold to skip loopfilter.
Definition: h264dec.h:182
H2645NAL
Definition: h2645_parse.h:34
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:466
H264SliceContext::top_borders
uint8_t(*[2] top_borders)[(16 *3) *2]
Definition: h264dec.h:277
AVFrameSideData::data
uint8_t * data
Definition: frame.h:233
h264chroma.h
AVFilmGrainParams
This structure describes how to handle film grain synthesis in video for specific codecs.
Definition: film_grain_params.h:216
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1478
H264SliceContext::cbp
int cbp
Definition: h264dec.h:248
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:412
H264SliceContext::left_type
int left_type[LEFT_MBS]
Definition: h264dec.h:209
ff_h264_direct_ref_list_init
void ff_h264_direct_ref_list_init(const H264Context *const h, H264SliceContext *sl)
Definition: h264_direct.c:120
H264SliceContext::mb_y
int mb_y
Definition: h264dec.h:223
H264PredWeightTable::implicit_weight
int implicit_weight[48][48][2]
Definition: h264_parse.h:79
height
#define height
decode_slice
static int decode_slice(struct AVCodecContext *avctx, void *arg)
Definition: h264_slice.c:2683
H264SliceContext::explicit_ref_marking
int explicit_ref_marking
Definition: h264dec.h:316
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
av_reallocp_array
int av_reallocp_array(void *ptr, size_t nmemb, size_t size)
Allocate, reallocate an array through a pointer to a pointer.
Definition: mem.c:233
H264_SEI_FPA_TYPE_INTERLEAVE_COLUMN
@ H264_SEI_FPA_TYPE_INTERLEAVE_COLUMN
Definition: h264_sei.h:47
pt
int pt
Definition: rtp.c:35
H264SliceContext::uvlinesize
ptrdiff_t uvlinesize
Definition: h264dec.h:219
AVBufferRef::buffer
AVBuffer * buffer
Definition: buffer.h:83
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:333
H264SEIFilmGrainCharacteristics::model_id
int model_id
Definition: h264_sei.h:170
H264SEIDisplayOrientation::anticlockwise_rotation
int anticlockwise_rotation
Definition: h264_sei.h:146
H264SliceContext::slice_type_nos
int slice_type_nos
S free slice type (SI/SP are remapped to I/P)
Definition: h264dec.h:177
H264SliceContext::delta_poc_bottom
int delta_poc_bottom
Definition: h264dec.h:321
AVFilmGrainParams::h274
AVFilmGrainH274Params h274
Definition: film_grain_params.h:237
AV_STEREO3D_FLAG_INVERT
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:164
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:119
H264SEIFilmGrainCharacteristics::intensity_interval_upper_bound
uint8_t intensity_interval_upper_bound[3][256]
Definition: h264_sei.h:184
FRAME_MBAFF
#define FRAME_MBAFF(h)
Definition: h264dec.h:67
IS_DIRECT
#define IS_DIRECT(a)
Definition: mpegutils.h:77
H264_SEI_PIC_STRUCT_FRAME
@ H264_SEI_PIC_STRUCT_FRAME
0: frame
Definition: h264_sei.h:31
get_cabac_terminate
static int av_unused get_cabac_terminate(CABACContext *c)
Definition: cabac_functions.h:187
H264_SEI_PIC_STRUCT_FRAME_TRIPLING
@ H264_SEI_PIC_STRUCT_FRAME_TRIPLING
8: frame tripling
Definition: h264_sei.h:39
field_scan
static const uint8_t field_scan[16+1]
Definition: h264_slice.c:54
loop_filter
static void loop_filter(const H264Context *h, H264SliceContext *sl, int start_x, int end_x)
Definition: h264_slice.c:2557
ff_init_cabac_decoder
int ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size)
Definition: cabac.c:162
H264SliceContext::mb_mbaff
int mb_mbaff
mb_aff_frame && mb_field_decoding_flag
Definition: h264dec.h:235
field_scan8x8
static const uint8_t field_scan8x8[64+1]
Definition: h264_slice.c:61
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:187
av_get_picture_type_char
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:40
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:302
LIST_NOT_USED
#define LIST_NOT_USED
Definition: h264dec.h:389
H264Picture::field_picture
int field_picture
whether or not picture was encoded in separate fields
Definition: h264dec.h:139
h264dec.h
H264SliceContext::poc_lsb
int poc_lsb
Definition: h264dec.h:320
H264SEIFilmGrainCharacteristics
Definition: h264_sei.h:168
H264SliceContext::first_mb_addr
unsigned int first_mb_addr
Definition: h264dec.h:227
ff_h264_direct_dist_scale_factor
void ff_h264_direct_dist_scale_factor(const H264Context *const h, H264SliceContext *sl)
Definition: h264_direct.c:61
H264Picture::needs_fg
int needs_fg
whether picture needs film grain synthesis (see f_grain)
Definition: h264dec.h:150
AVBuffer
A reference counted buffer type.
Definition: buffer_internal.h:38
H264Context
H264Context.
Definition: h264dec.h:330
AVDISCARD_NONINTRA
@ AVDISCARD_NONINTRA
discard all non intra frames
Definition: defs.h:74
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
av_timecode_make_smpte_tc_string2
char * av_timecode_make_smpte_tc_string2(char *buf, AVRational rate, uint32_t tcsmpte, int prevent_df, int skip_field)
Get the timecode string from the SMPTE timecode format.
Definition: timecode.c:138
AV_CODEC_FLAG2_SHOW_ALL
#define AV_CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
Definition: avcodec.h:331
AV_FRAME_FLAG_CORRUPT
#define AV_FRAME_FLAG_CORRUPT
The frame data may be corrupted, e.g.
Definition: frame.h:559
H264_SEI_PIC_STRUCT_FRAME_DOUBLING
@ H264_SEI_PIC_STRUCT_FRAME_DOUBLING
7: frame doubling
Definition: h264_sei.h:38
ff_h264chroma_init
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:41
H264SliceContext::frame_num
int frame_num
Definition: h264dec.h:318
AVFilmGrainH274Params::num_intensity_intervals
uint16_t num_intensity_intervals[3]
Specifies the number of intensity intervals for which a specific set of model values has been estimat...
Definition: film_grain_params.h:176
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:476
display.h
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:76
ff_h264_execute_decode_slices
int ff_h264_execute_decode_slices(H264Context *h)
Call decode_slice() for each context.
Definition: h264_slice.c:2895
H264SliceContext::mb_linesize
ptrdiff_t mb_linesize
may be equal to s->linesize or s->linesize * 2, for mbaff
Definition: h264dec.h:220
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
H264SEIFilmGrainCharacteristics::matrix_coeffs
int matrix_coeffs
Definition: h264_sei.h:177
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
cabac_functions.h
H264Picture::hwaccel_priv_buf
AVBufferRef * hwaccel_priv_buf
Definition: h264dec.h:122
tb
#define tb
Definition: regdef.h:68
AV_COPY32
#define AV_COPY32(d, s)
Definition: intreadwrite.h:601
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:233
ff_h264_parse_ref_count
int ff_h264_parse_ref_count(int *plist_count, int ref_count[2], GetBitContext *gb, const PPS *pps, int slice_type_nos, int picture_structure, void *logctx)
Definition: h264_parse.c:221
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:590
AVFilmGrainH274Params::color_primaries
enum AVColorPrimaries color_primaries
Definition: film_grain_params.h:150
H264_SEI_FPA_TYPE_INTERLEAVE_ROW
@ H264_SEI_FPA_TYPE_INTERLEAVE_ROW
Definition: h264_sei.h:48
ff_h264_alloc_tables
int ff_h264_alloc_tables(H264Context *h)
Allocate tables.
Definition: h264dec.c:179
ff_thread_get_ext_buffer
int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around ff_get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:1080
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:644
AVFilmGrainH274Params::intensity_interval_lower_bound
uint8_t intensity_interval_lower_bound[3][256]
Specifies the lower ounds of each intensity interval for whichthe set of model values applies for the...
Definition: film_grain_params.h:188
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:458
H264SliceContext::list_count
unsigned int list_count
Definition: h264dec.h:261
avcodec.h
H264SliceContext::h264
const struct H264Context * h264
Definition: h264dec.h:171
av_cmp_q
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
ff_h264dsp_init
av_cold void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
Definition: h264dsp.c:66
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
av_buffer_allocz
AVBufferRef * av_buffer_allocz(size_t size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:93
ff_h264_ref_picture
int ff_h264_ref_picture(H264Context *h, H264Picture *dst, H264Picture *src)
Definition: h264_picture.c:92
ret
ret
Definition: filter_design.txt:187
AV_EF_AGGRESSIVE
#define AV_EF_AGGRESSIVE
consider things that a sane encoder/muxer should not do as an error
Definition: defs.h:56
ff_h264_init_poc
int ff_h264_init_poc(int pic_field_poc[2], int *pic_poc, const SPS *sps, H264POCContext *pc, int picture_structure, int nal_ref_idc)
Definition: h264_parse.c:279
ff_h264_get_profile
int ff_h264_get_profile(const SPS *sps)
Compute profile from profile_idc and constraint_set?_flags.
Definition: h264_parse.c:531
H264SEIFilmGrainCharacteristics::transfer_characteristics
int transfer_characteristics
Definition: h264_sei.h:176
AV_STEREO3D_COLUMNS
@ AV_STEREO3D_COLUMNS
Views are packed per column.
Definition: stereo3d.h:138
h264_field_start
static int h264_field_start(H264Context *h, const H264SliceContext *sl, const H2645NAL *nal, int first_slice)
Definition: h264_slice.c:1541
AVStereo3D::type
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:177
H264SliceContext::last_qscale_diff
int last_qscale_diff
Definition: h264dec.h:183
H264SEIFilmGrainCharacteristics::bit_depth_luma
int bit_depth_luma
Definition: h264_sei.h:172
sps
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
Definition: cbs_h264_syntax_template.c:260
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:683
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:463
U
#define U(x)
Definition: vpx_arith.h:37
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:468
H264SEIFilmGrainCharacteristics::full_range
int full_range
Definition: h264_sei.h:174
H264SliceContext::pps_id
unsigned int pps_id
Definition: h264dec.h:271
H264SliceContext::linesize
ptrdiff_t linesize
Definition: h264dec.h:219
H264SliceContext::slice_beta_offset
int slice_beta_offset
Definition: h264dec.h:188
AVCodecContext
main external API structure.
Definition: avcodec.h:398
AVFrame::height
int height
Definition: frame.h:397
get_ue_golomb_31
static int get_ue_golomb_31(GetBitContext *gb)
read unsigned exp golomb code, constraint to a max of 31.
Definition: golomb.h:120
MAX_SLICES
#define MAX_SLICES
Definition: dxva2_hevc.c:31
backup_mb_border
static av_always_inline void backup_mb_border(const H264Context *h, H264SliceContext *sl, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int simple)
Definition: h264_slice.c:591
ff_h264_build_ref_list
int ff_h264_build_ref_list(H264Context *h, H264SliceContext *sl)
Definition: h264_refs.c:298
AVCodecContext::execute
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:1517
H264SEIFilmGrainCharacteristics::comp_model_present_flag
int comp_model_present_flag[3]
Definition: h264_sei.h:180
AV_FILM_GRAIN_PARAMS_H274
@ AV_FILM_GRAIN_PARAMS_H274
The union is valid when interpreted as AVFilmGrainH274Params (codec.h274)
Definition: film_grain_params.h:35
H264SliceContext::bipred_scratchpad
uint8_t * bipred_scratchpad
Definition: h264dec.h:275
ff_h264_pred_init
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:437
H264Picture::field_poc
int field_poc[2]
top/bottom POC
Definition: h264dec.h:128
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
H264SliceContext::mmco
MMCO mmco[H264_MAX_MMCO_COUNT]
Definition: h264dec.h:314
error_resilience.h
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AVHWAccel::frame_priv_data_size
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
Definition: avcodec.h:2179
H264Picture::mb_width
int mb_width
Definition: h264dec.h:155
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:814
H264Picture
Definition: h264dec.h:107
ERContext::error_status_table
uint8_t * error_status_table
Definition: error_resilience.h:67
find_unused_picture
static int find_unused_picture(H264Context *h)
Definition: h264_slice.c:274
ff_thread_get_format
FF_DISABLE_DEPRECATION_WARNINGS enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Wrapper around get_format() for frame-multithreaded codecs.
Definition: pthread_frame.c:1042
AVFilmGrainH274Params::log2_scale_factor
int log2_scale_factor
Specifies a scale factor used in the film grain characterization equations.
Definition: film_grain_params.h:165
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
h264_slice_init
static int h264_slice_init(H264Context *h, H264SliceContext *sl, const H2645NAL *nal)
Definition: h264_slice.c:2052
H264SEIDisplayOrientation::vflip
int vflip
Definition: h264_sei.h:147
H264SEIDisplayOrientation
Definition: h264_sei.h:144
FF_CODEC_PROPERTY_CLOSED_CAPTIONS
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
Definition: avcodec.h:1849
ff_h264_field_end
int ff_h264_field_end(H264Context *h, H264SliceContext *sl, int in_setup)
Definition: h264_picture.c:219
CABACContext::bytestream_end
const uint8_t * bytestream_end
Definition: cabac.h:46
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVFilmGrainH274Params::num_model_values
uint8_t num_model_values[3]
Specifies the number of model values present for each intensity interval in which the film grain has ...
Definition: film_grain_params.h:182
init_table_pools
static int init_table_pools(H264Context *h)
Definition: h264_slice.c:164
H264Picture::mb_type_buf
AVBufferRef * mb_type_buf
Definition: h264dec.h:119
H264SEIUnregistered::buf_ref
AVBufferRef ** buf_ref
Definition: h264_sei.h:113
H264SliceContext::ref_list
H264Ref ref_list[2][48]
0..15: frame refs, 16..47: mbaff field refs.
Definition: h264dec.h:262
LBOT
#define LBOT
Definition: h264dec.h:71
H264SliceContext::non_zero_count_cache
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
Definition: h264dec.h:286
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
H264SEIFilmGrainCharacteristics::num_intensity_intervals
uint16_t num_intensity_intervals[3]
Definition: h264_sei.h:181
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
IS_INTER
#define IS_INTER(a)
Definition: mpegutils.h:72
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
H264SEIFilmGrainCharacteristics::comp_model_value
int16_t comp_model_value[3][256][6]
Definition: h264_sei.h:185
H264SEIFilmGrainCharacteristics::num_model_values
uint8_t num_model_values[3]
Definition: h264_sei.h:182
get_ue_golomb_long
static unsigned get_ue_golomb_long(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to UINT32_MAX-1.
Definition: golomb.h:104
H264Context::nal_length_size
int nal_length_size
Number of bytes used for nal length (1, 2 or 4)
Definition: h264dec.h:449
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:34
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
ER_MB_END
#define ER_MB_END
Definition: error_resilience.h:39
AVFilmGrainH274Params::color_trc
enum AVColorTransferCharacteristic color_trc
Definition: film_grain_params.h:151
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:231
H264SliceContext::er
ERContext * er
Definition: h264dec.h:173
H264_SEI_PIC_STRUCT_BOTTOM_FIELD
@ H264_SEI_PIC_STRUCT_BOTTOM_FIELD
2: bottom field
Definition: h264_sei.h:33
H264Picture::hwaccel_picture_private
void * hwaccel_picture_private
hardware accelerator private data
Definition: h264dec.h:123
ER_MV_END
#define ER_MV_END
Definition: error_resilience.h:36
H264SEIFilmGrainCharacteristics::log2_scale_factor
int log2_scale_factor
Definition: h264_sei.h:179
H264SliceContext::idr_pic_id
int idr_pic_id
Definition: h264dec.h:319
AVStereo3D::view
enum AVStereo3DView view
Determines which views are packed.
Definition: stereo3d.h:187
H264_SEI_FPA_TYPE_2D
@ H264_SEI_FPA_TYPE_2D
Definition: h264_sei.h:52
ff_tlog
#define ff_tlog(ctx,...)
Definition: internal.h:162
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:425
cr
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:240
AVFrame::crop_top
size_t crop_top
Definition: frame.h:685
H264SliceContext::gb
GetBitContext gb
Definition: h264dec.h:172
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:86
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:565
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
H264SliceContext::intra4x4_pred_mode
int8_t * intra4x4_pred_mode
Definition: h264dec.h:199
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
LTOP
#define LTOP
Definition: h264dec.h:70
int32_t
int32_t
Definition: audioconvert.c:56
h264.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:370
H264SliceContext::edge_emu_buffer_allocated
int edge_emu_buffer_allocated
Definition: h264dec.h:279
REBASE_PICTURE
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
Definition: h264_slice.c:288
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
CHROMA444
#define CHROMA444(h)
Definition: h264dec.h:93
AVFilmGrainH274Params::bit_depth_chroma
int bit_depth_chroma
Specifies the bit depth used for the chroma components.
Definition: film_grain_params.h:147
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
ff_h264_get_slice_type
int ff_h264_get_slice_type(const H264SliceContext *sl)
Reconstruct bitstream slice_type.
Definition: h264_slice.c:2313
h
h
Definition: vp9dsp_template.c:2038
H264SliceContext::cabac_init_idc
int cabac_init_idc
Definition: h264dec.h:312
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:469
H264PredWeightTable::luma_weight_flag
int luma_weight_flag[2]
7.4.3.2 luma_weight_lX_flag
Definition: h264_parse.h:74
H264_MAX_PICTURE_COUNT
#define H264_MAX_PICTURE_COUNT
Definition: h264dec.h:50
ER_AC_END
#define ER_AC_END
Definition: error_resilience.h:34
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:173
H264SliceContext::bipred_scratchpad_allocated
int bipred_scratchpad_allocated
Definition: h264dec.h:278
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:72
H264SliceContext::slice_type_fixed
int slice_type_fixed
Definition: h264dec.h:178
H264Ref::poc
int poc
Definition: h264dec.h:164
AVFilmGrainParams::type
enum AVFilmGrainParamsType type
Specifies the codec for which this structure is valid.
Definition: film_grain_params.h:220
IS_8x8DCT
#define IS_8x8DCT(a)
Definition: h264dec.h:96
H264Picture::qscale_table_buf
AVBufferRef * qscale_table_buf
Definition: h264dec.h:113
H264_SEI_PIC_STRUCT_TOP_FIELD
@ H264_SEI_PIC_STRUCT_TOP_FIELD
1: top field
Definition: h264_sei.h:32
H264SliceContext::delta_poc
int delta_poc[2]
Definition: h264dec.h:322
av_color_transfer_name
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
Definition: pixdesc.c:3213
H264Picture::long_ref
int long_ref
1->long term reference 0->short term reference
Definition: h264dec.h:135
H264Ref::reference
int reference
Definition: h264dec.h:163
H264Picture::motion_val
int16_t(*[2] motion_val)[2]
Definition: h264dec.h:117
AV_CODEC_EXPORT_DATA_FILM_GRAIN
#define AV_CODEC_EXPORT_DATA_FILM_GRAIN
Decoding only.
Definition: avcodec.h:371
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:467
H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP
@ H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP
5: top field, bottom field, top field repeated, in that order
Definition: h264_sei.h:36
H264SEIFilmGrainCharacteristics::separate_colour_description_present_flag
int separate_colour_description_present_flag
Definition: h264_sei.h:171
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2778
H264SliceContext::mb_field_decoding_flag
int mb_field_decoding_flag
Definition: h264dec.h:234
H264Context::is_avc
int is_avc
Used to parse AVC variant of H.264.
Definition: h264dec.h:448