FFmpeg
h264_slice.c
Go to the documentation of this file.
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... decoder
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * H.264 / AVC / MPEG-4 part10 codec.
25  * @author Michael Niedermayer <michaelni@gmx.at>
26  */
27 
28 #include "config_components.h"
29 
30 #include "libavutil/avassert.h"
31 #include "libavutil/display.h"
33 #include "libavutil/pixdesc.h"
34 #include "libavutil/timecode.h"
35 #include "internal.h"
36 #include "cabac.h"
37 #include "cabac_functions.h"
38 #include "decode.h"
39 #include "error_resilience.h"
40 #include "avcodec.h"
41 #include "h264.h"
42 #include "h264dec.h"
43 #include "h264data.h"
44 #include "h264chroma.h"
45 #include "h264_ps.h"
46 #include "golomb.h"
47 #include "mathops.h"
48 #include "mpegutils.h"
49 #include "rectangle.h"
50 #include "thread.h"
51 #include "threadframe.h"
52 
53 static const uint8_t field_scan[16+1] = {
54  0 + 0 * 4, 0 + 1 * 4, 1 + 0 * 4, 0 + 2 * 4,
55  0 + 3 * 4, 1 + 1 * 4, 1 + 2 * 4, 1 + 3 * 4,
56  2 + 0 * 4, 2 + 1 * 4, 2 + 2 * 4, 2 + 3 * 4,
57  3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4, 3 + 3 * 4,
58 };
59 
60 static const uint8_t field_scan8x8[64+1] = {
61  0 + 0 * 8, 0 + 1 * 8, 0 + 2 * 8, 1 + 0 * 8,
62  1 + 1 * 8, 0 + 3 * 8, 0 + 4 * 8, 1 + 2 * 8,
63  2 + 0 * 8, 1 + 3 * 8, 0 + 5 * 8, 0 + 6 * 8,
64  0 + 7 * 8, 1 + 4 * 8, 2 + 1 * 8, 3 + 0 * 8,
65  2 + 2 * 8, 1 + 5 * 8, 1 + 6 * 8, 1 + 7 * 8,
66  2 + 3 * 8, 3 + 1 * 8, 4 + 0 * 8, 3 + 2 * 8,
67  2 + 4 * 8, 2 + 5 * 8, 2 + 6 * 8, 2 + 7 * 8,
68  3 + 3 * 8, 4 + 1 * 8, 5 + 0 * 8, 4 + 2 * 8,
69  3 + 4 * 8, 3 + 5 * 8, 3 + 6 * 8, 3 + 7 * 8,
70  4 + 3 * 8, 5 + 1 * 8, 6 + 0 * 8, 5 + 2 * 8,
71  4 + 4 * 8, 4 + 5 * 8, 4 + 6 * 8, 4 + 7 * 8,
72  5 + 3 * 8, 6 + 1 * 8, 6 + 2 * 8, 5 + 4 * 8,
73  5 + 5 * 8, 5 + 6 * 8, 5 + 7 * 8, 6 + 3 * 8,
74  7 + 0 * 8, 7 + 1 * 8, 6 + 4 * 8, 6 + 5 * 8,
75  6 + 6 * 8, 6 + 7 * 8, 7 + 2 * 8, 7 + 3 * 8,
76  7 + 4 * 8, 7 + 5 * 8, 7 + 6 * 8, 7 + 7 * 8,
77 };
78 
79 static const uint8_t field_scan8x8_cavlc[64+1] = {
80  0 + 0 * 8, 1 + 1 * 8, 2 + 0 * 8, 0 + 7 * 8,
81  2 + 2 * 8, 2 + 3 * 8, 2 + 4 * 8, 3 + 3 * 8,
82  3 + 4 * 8, 4 + 3 * 8, 4 + 4 * 8, 5 + 3 * 8,
83  5 + 5 * 8, 7 + 0 * 8, 6 + 6 * 8, 7 + 4 * 8,
84  0 + 1 * 8, 0 + 3 * 8, 1 + 3 * 8, 1 + 4 * 8,
85  1 + 5 * 8, 3 + 1 * 8, 2 + 5 * 8, 4 + 1 * 8,
86  3 + 5 * 8, 5 + 1 * 8, 4 + 5 * 8, 6 + 1 * 8,
87  5 + 6 * 8, 7 + 1 * 8, 6 + 7 * 8, 7 + 5 * 8,
88  0 + 2 * 8, 0 + 4 * 8, 0 + 5 * 8, 2 + 1 * 8,
89  1 + 6 * 8, 4 + 0 * 8, 2 + 6 * 8, 5 + 0 * 8,
90  3 + 6 * 8, 6 + 0 * 8, 4 + 6 * 8, 6 + 2 * 8,
91  5 + 7 * 8, 6 + 4 * 8, 7 + 2 * 8, 7 + 6 * 8,
92  1 + 0 * 8, 1 + 2 * 8, 0 + 6 * 8, 3 + 0 * 8,
93  1 + 7 * 8, 3 + 2 * 8, 2 + 7 * 8, 4 + 2 * 8,
94  3 + 7 * 8, 5 + 2 * 8, 4 + 7 * 8, 5 + 4 * 8,
95  6 + 3 * 8, 6 + 5 * 8, 7 + 3 * 8, 7 + 7 * 8,
96 };
97 
98 // zigzag_scan8x8_cavlc[i] = zigzag_scan8x8[(i/4) + 16*(i%4)]
99 static const uint8_t zigzag_scan8x8_cavlc[64+1] = {
100  0 + 0 * 8, 1 + 1 * 8, 1 + 2 * 8, 2 + 2 * 8,
101  4 + 1 * 8, 0 + 5 * 8, 3 + 3 * 8, 7 + 0 * 8,
102  3 + 4 * 8, 1 + 7 * 8, 5 + 3 * 8, 6 + 3 * 8,
103  2 + 7 * 8, 6 + 4 * 8, 5 + 6 * 8, 7 + 5 * 8,
104  1 + 0 * 8, 2 + 0 * 8, 0 + 3 * 8, 3 + 1 * 8,
105  3 + 2 * 8, 0 + 6 * 8, 4 + 2 * 8, 6 + 1 * 8,
106  2 + 5 * 8, 2 + 6 * 8, 6 + 2 * 8, 5 + 4 * 8,
107  3 + 7 * 8, 7 + 3 * 8, 4 + 7 * 8, 7 + 6 * 8,
108  0 + 1 * 8, 3 + 0 * 8, 0 + 4 * 8, 4 + 0 * 8,
109  2 + 3 * 8, 1 + 5 * 8, 5 + 1 * 8, 5 + 2 * 8,
110  1 + 6 * 8, 3 + 5 * 8, 7 + 1 * 8, 4 + 5 * 8,
111  4 + 6 * 8, 7 + 4 * 8, 5 + 7 * 8, 6 + 7 * 8,
112  0 + 2 * 8, 2 + 1 * 8, 1 + 3 * 8, 5 + 0 * 8,
113  1 + 4 * 8, 2 + 4 * 8, 6 + 0 * 8, 4 + 3 * 8,
114  0 + 7 * 8, 4 + 4 * 8, 7 + 2 * 8, 3 + 6 * 8,
115  5 + 5 * 8, 6 + 5 * 8, 6 + 6 * 8, 7 + 7 * 8,
116 };
117 
118 static void release_unused_pictures(H264Context *h, int remove_current)
119 {
120  int i;
121 
122  /* release non reference frames */
123  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
124  if (h->DPB[i].f->buf[0] && !h->DPB[i].reference &&
125  (remove_current || &h->DPB[i] != h->cur_pic_ptr)) {
126  ff_h264_unref_picture(h, &h->DPB[i]);
127  }
128  }
129 }
130 
131 static int alloc_scratch_buffers(H264SliceContext *sl, int linesize)
132 {
133  const H264Context *h = sl->h264;
134  int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
135 
136  av_fast_malloc(&sl->bipred_scratchpad, &sl->bipred_scratchpad_allocated, 16 * 6 * alloc_size);
137  // edge emu needs blocksize + filter length - 1
138  // (= 21x21 for H.264)
139  av_fast_malloc(&sl->edge_emu_buffer, &sl->edge_emu_buffer_allocated, alloc_size * 2 * 21);
140 
142  h->mb_width * 16 * 3 * sizeof(uint8_t) * 2);
144  h->mb_width * 16 * 3 * sizeof(uint8_t) * 2);
145 
146  if (!sl->bipred_scratchpad || !sl->edge_emu_buffer ||
147  !sl->top_borders[0] || !sl->top_borders[1]) {
150  av_freep(&sl->top_borders[0]);
151  av_freep(&sl->top_borders[1]);
152 
155  sl->top_borders_allocated[0] = 0;
156  sl->top_borders_allocated[1] = 0;
157  return AVERROR(ENOMEM);
158  }
159 
160  return 0;
161 }
162 
164 {
165  const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
166  const int mb_array_size = h->mb_stride * h->mb_height;
167  const int b4_stride = h->mb_width * 4 + 1;
168  const int b4_array_size = b4_stride * h->mb_height * 4;
169 
170  h->qscale_table_pool = av_buffer_pool_init(big_mb_num + h->mb_stride,
172  h->mb_type_pool = av_buffer_pool_init((big_mb_num + h->mb_stride) *
173  sizeof(uint32_t), av_buffer_allocz);
174  h->motion_val_pool = av_buffer_pool_init(2 * (b4_array_size + 4) *
175  sizeof(int16_t), av_buffer_allocz);
176  h->ref_index_pool = av_buffer_pool_init(4 * mb_array_size, av_buffer_allocz);
177 
178  if (!h->qscale_table_pool || !h->mb_type_pool || !h->motion_val_pool ||
179  !h->ref_index_pool) {
180  av_buffer_pool_uninit(&h->qscale_table_pool);
181  av_buffer_pool_uninit(&h->mb_type_pool);
182  av_buffer_pool_uninit(&h->motion_val_pool);
183  av_buffer_pool_uninit(&h->ref_index_pool);
184  return AVERROR(ENOMEM);
185  }
186 
187  return 0;
188 }
189 
191 {
192  int i, ret = 0;
193 
194  av_assert0(!pic->f->data[0]);
195 
196  pic->tf.f = pic->f;
197  ret = ff_thread_get_ext_buffer(h->avctx, &pic->tf,
198  pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
199  if (ret < 0)
200  goto fail;
201 
202  if (pic->needs_fg) {
203  pic->f_grain->format = pic->f->format;
204  pic->f_grain->width = pic->f->width;
205  pic->f_grain->height = pic->f->height;
206  ret = ff_thread_get_buffer(h->avctx, pic->f_grain, 0);
207  if (ret < 0)
208  goto fail;
209  }
210 
211  if (h->avctx->hwaccel) {
212  const AVHWAccel *hwaccel = h->avctx->hwaccel;
214  if (hwaccel->frame_priv_data_size) {
216  if (!pic->hwaccel_priv_buf)
217  return AVERROR(ENOMEM);
219  }
220  }
221  if (CONFIG_GRAY && !h->avctx->hwaccel && h->flags & AV_CODEC_FLAG_GRAY && pic->f->data[2]) {
222  int h_chroma_shift, v_chroma_shift;
224  &h_chroma_shift, &v_chroma_shift);
225 
226  for(i=0; i<AV_CEIL_RSHIFT(pic->f->height, v_chroma_shift); i++) {
227  memset(pic->f->data[1] + pic->f->linesize[1]*i,
228  0x80, AV_CEIL_RSHIFT(pic->f->width, h_chroma_shift));
229  memset(pic->f->data[2] + pic->f->linesize[2]*i,
230  0x80, AV_CEIL_RSHIFT(pic->f->width, h_chroma_shift));
231  }
232  }
233 
234  if (!h->qscale_table_pool) {
236  if (ret < 0)
237  goto fail;
238  }
239 
240  pic->qscale_table_buf = av_buffer_pool_get(h->qscale_table_pool);
241  pic->mb_type_buf = av_buffer_pool_get(h->mb_type_pool);
242  if (!pic->qscale_table_buf || !pic->mb_type_buf)
243  goto fail;
244 
245  pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
246  pic->qscale_table = pic->qscale_table_buf->data + 2 * h->mb_stride + 1;
247 
248  for (i = 0; i < 2; i++) {
249  pic->motion_val_buf[i] = av_buffer_pool_get(h->motion_val_pool);
250  pic->ref_index_buf[i] = av_buffer_pool_get(h->ref_index_pool);
251  if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
252  goto fail;
253 
254  pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
255  pic->ref_index[i] = pic->ref_index_buf[i]->data;
256  }
257 
258  pic->pps_buf = av_buffer_ref(h->ps.pps_ref);
259  if (!pic->pps_buf)
260  goto fail;
261  pic->pps = (const PPS*)pic->pps_buf->data;
262 
263  pic->mb_width = h->mb_width;
264  pic->mb_height = h->mb_height;
265  pic->mb_stride = h->mb_stride;
266 
267  return 0;
268 fail:
269  ff_h264_unref_picture(h, pic);
270  return (ret < 0) ? ret : AVERROR(ENOMEM);
271 }
272 
274 {
275  int i;
276 
277  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
278  if (!h->DPB[i].f->buf[0])
279  return i;
280  }
281  return AVERROR_INVALIDDATA;
282 }
283 
284 
285 #define IN_RANGE(a, b, size) (((void*)(a) >= (void*)(b)) && ((void*)(a) < (void*)((b) + (size))))
286 
287 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
288  (((pic) && (pic) >= (old_ctx)->DPB && \
289  (pic) < (old_ctx)->DPB + H264_MAX_PICTURE_COUNT) ? \
290  &(new_ctx)->DPB[(pic) - (old_ctx)->DPB] : NULL)
291 
292 static void copy_picture_range(H264Picture **to, H264Picture **from, int count,
293  H264Context *new_base,
294  H264Context *old_base)
295 {
296  int i;
297 
298  for (i = 0; i < count; i++) {
299  av_assert1(!from[i] ||
300  IN_RANGE(from[i], old_base, 1) ||
301  IN_RANGE(from[i], old_base->DPB, H264_MAX_PICTURE_COUNT));
302  to[i] = REBASE_PICTURE(from[i], new_base, old_base);
303  }
304 }
305 
307 
309  const AVCodecContext *src)
310 {
311  H264Context *h = dst->priv_data, *h1 = src->priv_data;
312  int inited = h->context_initialized, err = 0;
313  int need_reinit = 0;
314  int i, ret;
315 
316  if (dst == src)
317  return 0;
318 
319  if (inited && !h1->ps.sps)
320  return AVERROR_INVALIDDATA;
321 
322  if (inited &&
323  (h->width != h1->width ||
324  h->height != h1->height ||
325  h->mb_width != h1->mb_width ||
326  h->mb_height != h1->mb_height ||
327  !h->ps.sps ||
328  h->ps.sps->bit_depth_luma != h1->ps.sps->bit_depth_luma ||
329  h->ps.sps->chroma_format_idc != h1->ps.sps->chroma_format_idc ||
330  h->ps.sps->vui.matrix_coeffs != h1->ps.sps->vui.matrix_coeffs)) {
331  need_reinit = 1;
332  }
333 
334  /* copy block_offset since frame_start may not be called */
335  memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset));
336 
337  // SPS/PPS
338  for (i = 0; i < FF_ARRAY_ELEMS(h->ps.sps_list); i++) {
339  ret = av_buffer_replace(&h->ps.sps_list[i], h1->ps.sps_list[i]);
340  if (ret < 0)
341  return ret;
342  }
343  for (i = 0; i < FF_ARRAY_ELEMS(h->ps.pps_list); i++) {
344  ret = av_buffer_replace(&h->ps.pps_list[i], h1->ps.pps_list[i]);
345  if (ret < 0)
346  return ret;
347  }
348 
349  ret = av_buffer_replace(&h->ps.pps_ref, h1->ps.pps_ref);
350  if (ret < 0)
351  return ret;
352  h->ps.pps = NULL;
353  h->ps.sps = NULL;
354  if (h1->ps.pps_ref) {
355  h->ps.pps = (const PPS*)h->ps.pps_ref->data;
356  h->ps.sps = h->ps.pps->sps;
357  }
358 
359  if (need_reinit || !inited) {
360  h->width = h1->width;
361  h->height = h1->height;
362  h->mb_height = h1->mb_height;
363  h->mb_width = h1->mb_width;
364  h->mb_num = h1->mb_num;
365  h->mb_stride = h1->mb_stride;
366  h->b_stride = h1->b_stride;
367  h->x264_build = h1->x264_build;
368 
369  if (h->context_initialized || h1->context_initialized) {
370  if ((err = h264_slice_header_init(h)) < 0) {
371  av_log(h->avctx, AV_LOG_ERROR, "h264_slice_header_init() failed");
372  return err;
373  }
374  }
375 
376  /* copy block_offset since frame_start may not be called */
377  memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset));
378  }
379 
380  h->avctx->coded_height = h1->avctx->coded_height;
381  h->avctx->coded_width = h1->avctx->coded_width;
382  h->avctx->width = h1->avctx->width;
383  h->avctx->height = h1->avctx->height;
384  h->width_from_caller = h1->width_from_caller;
385  h->height_from_caller = h1->height_from_caller;
386  h->coded_picture_number = h1->coded_picture_number;
387  h->first_field = h1->first_field;
388  h->picture_structure = h1->picture_structure;
389  h->mb_aff_frame = h1->mb_aff_frame;
390  h->droppable = h1->droppable;
391 
392  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
393  ret = ff_h264_replace_picture(h, &h->DPB[i], &h1->DPB[i]);
394  if (ret < 0)
395  return ret;
396  }
397 
398  h->cur_pic_ptr = REBASE_PICTURE(h1->cur_pic_ptr, h, h1);
399  ret = ff_h264_replace_picture(h, &h->cur_pic, &h1->cur_pic);
400  if (ret < 0)
401  return ret;
402 
403  h->enable_er = h1->enable_er;
404  h->workaround_bugs = h1->workaround_bugs;
405  h->droppable = h1->droppable;
406 
407  // extradata/NAL handling
408  h->is_avc = h1->is_avc;
409  h->nal_length_size = h1->nal_length_size;
410 
411  memcpy(&h->poc, &h1->poc, sizeof(h->poc));
412 
413  memcpy(h->short_ref, h1->short_ref, sizeof(h->short_ref));
414  memcpy(h->long_ref, h1->long_ref, sizeof(h->long_ref));
415  memcpy(h->delayed_pic, h1->delayed_pic, sizeof(h->delayed_pic));
416  memcpy(h->last_pocs, h1->last_pocs, sizeof(h->last_pocs));
417 
418  h->next_output_pic = h1->next_output_pic;
419  h->next_outputed_poc = h1->next_outputed_poc;
420  h->poc_offset = h1->poc_offset;
421 
422  memcpy(h->mmco, h1->mmco, sizeof(h->mmco));
423  h->nb_mmco = h1->nb_mmco;
424  h->mmco_reset = h1->mmco_reset;
425  h->explicit_ref_marking = h1->explicit_ref_marking;
426  h->long_ref_count = h1->long_ref_count;
427  h->short_ref_count = h1->short_ref_count;
428 
429  copy_picture_range(h->short_ref, h1->short_ref, 32, h, h1);
430  copy_picture_range(h->long_ref, h1->long_ref, 32, h, h1);
431  copy_picture_range(h->delayed_pic, h1->delayed_pic,
432  FF_ARRAY_ELEMS(h->delayed_pic), h, h1);
433 
434  h->frame_recovered = h1->frame_recovered;
435 
436  ret = ff_h264_sei_ctx_replace(&h->sei, &h1->sei);
437  if (ret < 0)
438  return ret;
439 
440  h->sei.common.unregistered.x264_build = h1->sei.common.unregistered.x264_build;
441 
442  if (!h->cur_pic_ptr)
443  return 0;
444 
445  if (!h->droppable) {
447  h->poc.prev_poc_msb = h->poc.poc_msb;
448  h->poc.prev_poc_lsb = h->poc.poc_lsb;
449  }
450  h->poc.prev_frame_num_offset = h->poc.frame_num_offset;
451  h->poc.prev_frame_num = h->poc.frame_num;
452 
453  h->recovery_frame = h1->recovery_frame;
454 
455  return err;
456 }
457 
459  const AVCodecContext *src)
460 {
461  H264Context *h = dst->priv_data;
462  const H264Context *h1 = src->priv_data;
463 
464  h->is_avc = h1->is_avc;
465  h->nal_length_size = h1->nal_length_size;
466 
467  return 0;
468 }
469 
471 {
472  H264Picture *pic;
473  int i, ret;
474  const int pixel_shift = h->pixel_shift;
475 
476  if (!ff_thread_can_start_frame(h->avctx)) {
477  av_log(h->avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
478  return -1;
479  }
480 
482  h->cur_pic_ptr = NULL;
483 
485  if (i < 0) {
486  av_log(h->avctx, AV_LOG_ERROR, "no frame buffer available\n");
487  return i;
488  }
489  pic = &h->DPB[i];
490 
491  pic->reference = h->droppable ? 0 : h->picture_structure;
492 #if FF_API_FRAME_PICTURE_NUMBER
494  pic->f->coded_picture_number = h->coded_picture_number++;
496 #endif
497  pic->field_picture = h->picture_structure != PICT_FRAME;
498  pic->frame_num = h->poc.frame_num;
499  /*
500  * Zero key_frame here; IDR markings per slice in frame or fields are ORed
501  * in later.
502  * See decode_nal_units().
503  */
504  pic->f->key_frame = 0;
505  pic->mmco_reset = 0;
506  pic->recovered = 0;
507  pic->invalid_gap = 0;
508  pic->sei_recovery_frame_cnt = h->sei.recovery_point.recovery_frame_cnt;
509 
510  pic->f->pict_type = h->slice_ctx[0].slice_type;
511 
512  pic->f->crop_left = h->crop_left;
513  pic->f->crop_right = h->crop_right;
514  pic->f->crop_top = h->crop_top;
515  pic->f->crop_bottom = h->crop_bottom;
516 
517  pic->needs_fg = h->sei.common.film_grain_characteristics.present && !h->avctx->hwaccel &&
518  !(h->avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN);
519 
520  if ((ret = alloc_picture(h, pic)) < 0)
521  return ret;
522 
523  h->cur_pic_ptr = pic;
524  ff_h264_unref_picture(h, &h->cur_pic);
525  if (CONFIG_ERROR_RESILIENCE) {
526  ff_h264_set_erpic(&h->er.cur_pic, NULL);
527  }
528 
529  if ((ret = ff_h264_ref_picture(h, &h->cur_pic, h->cur_pic_ptr)) < 0)
530  return ret;
531 
532  for (i = 0; i < h->nb_slice_ctx; i++) {
533  h->slice_ctx[i].linesize = h->cur_pic_ptr->f->linesize[0];
534  h->slice_ctx[i].uvlinesize = h->cur_pic_ptr->f->linesize[1];
535  }
536 
537  if (CONFIG_ERROR_RESILIENCE && h->enable_er) {
538  ff_er_frame_start(&h->er);
539  ff_h264_set_erpic(&h->er.last_pic, NULL);
540  ff_h264_set_erpic(&h->er.next_pic, NULL);
541  }
542 
543  for (i = 0; i < 16; i++) {
544  h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
545  h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
546  }
547  for (i = 0; i < 16; i++) {
548  h->block_offset[16 + i] =
549  h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
550  h->block_offset[48 + 16 + i] =
551  h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
552  }
553 
554  /* We mark the current picture as non-reference after allocating it, so
555  * that if we break out due to an error it can be released automatically
556  * in the next ff_mpv_frame_start().
557  */
558  h->cur_pic_ptr->reference = 0;
559 
560  h->cur_pic_ptr->field_poc[0] = h->cur_pic_ptr->field_poc[1] = INT_MAX;
561 
562  h->next_output_pic = NULL;
563 
564  h->postpone_filter = 0;
565 
566  h->mb_aff_frame = h->ps.sps->mb_aff && (h->picture_structure == PICT_FRAME);
567 
568  if (h->sei.common.unregistered.x264_build >= 0)
569  h->x264_build = h->sei.common.unregistered.x264_build;
570 
571  assert(h->cur_pic_ptr->long_ref == 0);
572 
573  return 0;
574 }
575 
577  uint8_t *src_y,
578  uint8_t *src_cb, uint8_t *src_cr,
579  int linesize, int uvlinesize,
580  int simple)
581 {
582  uint8_t *top_border;
583  int top_idx = 1;
584  const int pixel_shift = h->pixel_shift;
585  int chroma444 = CHROMA444(h);
586  int chroma422 = CHROMA422(h);
587 
588  src_y -= linesize;
589  src_cb -= uvlinesize;
590  src_cr -= uvlinesize;
591 
592  if (!simple && FRAME_MBAFF(h)) {
593  if (sl->mb_y & 1) {
594  if (!MB_MBAFF(sl)) {
595  top_border = sl->top_borders[0][sl->mb_x];
596  AV_COPY128(top_border, src_y + 15 * linesize);
597  if (pixel_shift)
598  AV_COPY128(top_border + 16, src_y + 15 * linesize + 16);
599  if (simple || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
600  if (chroma444) {
601  if (pixel_shift) {
602  AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
603  AV_COPY128(top_border + 48, src_cb + 15 * uvlinesize + 16);
604  AV_COPY128(top_border + 64, src_cr + 15 * uvlinesize);
605  AV_COPY128(top_border + 80, src_cr + 15 * uvlinesize + 16);
606  } else {
607  AV_COPY128(top_border + 16, src_cb + 15 * uvlinesize);
608  AV_COPY128(top_border + 32, src_cr + 15 * uvlinesize);
609  }
610  } else if (chroma422) {
611  if (pixel_shift) {
612  AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
613  AV_COPY128(top_border + 48, src_cr + 15 * uvlinesize);
614  } else {
615  AV_COPY64(top_border + 16, src_cb + 15 * uvlinesize);
616  AV_COPY64(top_border + 24, src_cr + 15 * uvlinesize);
617  }
618  } else {
619  if (pixel_shift) {
620  AV_COPY128(top_border + 32, src_cb + 7 * uvlinesize);
621  AV_COPY128(top_border + 48, src_cr + 7 * uvlinesize);
622  } else {
623  AV_COPY64(top_border + 16, src_cb + 7 * uvlinesize);
624  AV_COPY64(top_border + 24, src_cr + 7 * uvlinesize);
625  }
626  }
627  }
628  }
629  } else if (MB_MBAFF(sl)) {
630  top_idx = 0;
631  } else
632  return;
633  }
634 
635  top_border = sl->top_borders[top_idx][sl->mb_x];
636  /* There are two lines saved, the line above the top macroblock
637  * of a pair, and the line above the bottom macroblock. */
638  AV_COPY128(top_border, src_y + 16 * linesize);
639  if (pixel_shift)
640  AV_COPY128(top_border + 16, src_y + 16 * linesize + 16);
641 
642  if (simple || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
643  if (chroma444) {
644  if (pixel_shift) {
645  AV_COPY128(top_border + 32, src_cb + 16 * linesize);
646  AV_COPY128(top_border + 48, src_cb + 16 * linesize + 16);
647  AV_COPY128(top_border + 64, src_cr + 16 * linesize);
648  AV_COPY128(top_border + 80, src_cr + 16 * linesize + 16);
649  } else {
650  AV_COPY128(top_border + 16, src_cb + 16 * linesize);
651  AV_COPY128(top_border + 32, src_cr + 16 * linesize);
652  }
653  } else if (chroma422) {
654  if (pixel_shift) {
655  AV_COPY128(top_border + 32, src_cb + 16 * uvlinesize);
656  AV_COPY128(top_border + 48, src_cr + 16 * uvlinesize);
657  } else {
658  AV_COPY64(top_border + 16, src_cb + 16 * uvlinesize);
659  AV_COPY64(top_border + 24, src_cr + 16 * uvlinesize);
660  }
661  } else {
662  if (pixel_shift) {
663  AV_COPY128(top_border + 32, src_cb + 8 * uvlinesize);
664  AV_COPY128(top_border + 48, src_cr + 8 * uvlinesize);
665  } else {
666  AV_COPY64(top_border + 16, src_cb + 8 * uvlinesize);
667  AV_COPY64(top_border + 24, src_cr + 8 * uvlinesize);
668  }
669  }
670  }
671 }
672 
673 /**
674  * Initialize implicit_weight table.
675  * @param field 0/1 initialize the weight for interlaced MBAFF
676  * -1 initializes the rest
677  */
679 {
680  int ref0, ref1, i, cur_poc, ref_start, ref_count0, ref_count1;
681 
682  for (i = 0; i < 2; i++) {
683  sl->pwt.luma_weight_flag[i] = 0;
684  sl->pwt.chroma_weight_flag[i] = 0;
685  }
686 
687  if (field < 0) {
688  if (h->picture_structure == PICT_FRAME) {
689  cur_poc = h->cur_pic_ptr->poc;
690  } else {
691  cur_poc = h->cur_pic_ptr->field_poc[h->picture_structure - 1];
692  }
693  if (sl->ref_count[0] == 1 && sl->ref_count[1] == 1 && !FRAME_MBAFF(h) &&
694  sl->ref_list[0][0].poc + (int64_t)sl->ref_list[1][0].poc == 2LL * cur_poc) {
695  sl->pwt.use_weight = 0;
696  sl->pwt.use_weight_chroma = 0;
697  return;
698  }
699  ref_start = 0;
700  ref_count0 = sl->ref_count[0];
701  ref_count1 = sl->ref_count[1];
702  } else {
703  cur_poc = h->cur_pic_ptr->field_poc[field];
704  ref_start = 16;
705  ref_count0 = 16 + 2 * sl->ref_count[0];
706  ref_count1 = 16 + 2 * sl->ref_count[1];
707  }
708 
709  sl->pwt.use_weight = 2;
710  sl->pwt.use_weight_chroma = 2;
711  sl->pwt.luma_log2_weight_denom = 5;
713 
714  for (ref0 = ref_start; ref0 < ref_count0; ref0++) {
715  int64_t poc0 = sl->ref_list[0][ref0].poc;
716  for (ref1 = ref_start; ref1 < ref_count1; ref1++) {
717  int w = 32;
718  if (!sl->ref_list[0][ref0].parent->long_ref && !sl->ref_list[1][ref1].parent->long_ref) {
719  int poc1 = sl->ref_list[1][ref1].poc;
720  int td = av_clip_int8(poc1 - poc0);
721  if (td) {
722  int tb = av_clip_int8(cur_poc - poc0);
723  int tx = (16384 + (FFABS(td) >> 1)) / td;
724  int dist_scale_factor = (tb * tx + 32) >> 8;
725  if (dist_scale_factor >= -64 && dist_scale_factor <= 128)
726  w = 64 - dist_scale_factor;
727  }
728  }
729  if (field < 0) {
730  sl->pwt.implicit_weight[ref0][ref1][0] =
731  sl->pwt.implicit_weight[ref0][ref1][1] = w;
732  } else {
733  sl->pwt.implicit_weight[ref0][ref1][field] = w;
734  }
735  }
736  }
737 }
738 
739 /**
740  * initialize scan tables
741  */
743 {
744  int i;
745  for (i = 0; i < 16; i++) {
746 #define TRANSPOSE(x) ((x) >> 2) | (((x) << 2) & 0xF)
747  h->zigzag_scan[i] = TRANSPOSE(ff_zigzag_scan[i]);
748  h->field_scan[i] = TRANSPOSE(field_scan[i]);
749 #undef TRANSPOSE
750  }
751  for (i = 0; i < 64; i++) {
752 #define TRANSPOSE(x) ((x) >> 3) | (((x) & 7) << 3)
753  h->zigzag_scan8x8[i] = TRANSPOSE(ff_zigzag_direct[i]);
754  h->zigzag_scan8x8_cavlc[i] = TRANSPOSE(zigzag_scan8x8_cavlc[i]);
755  h->field_scan8x8[i] = TRANSPOSE(field_scan8x8[i]);
756  h->field_scan8x8_cavlc[i] = TRANSPOSE(field_scan8x8_cavlc[i]);
757 #undef TRANSPOSE
758  }
759  if (h->ps.sps->transform_bypass) { // FIXME same ugly
760  memcpy(h->zigzag_scan_q0 , ff_zigzag_scan , sizeof(h->zigzag_scan_q0 ));
761  memcpy(h->zigzag_scan8x8_q0 , ff_zigzag_direct , sizeof(h->zigzag_scan8x8_q0 ));
762  memcpy(h->zigzag_scan8x8_cavlc_q0 , zigzag_scan8x8_cavlc , sizeof(h->zigzag_scan8x8_cavlc_q0));
763  memcpy(h->field_scan_q0 , field_scan , sizeof(h->field_scan_q0 ));
764  memcpy(h->field_scan8x8_q0 , field_scan8x8 , sizeof(h->field_scan8x8_q0 ));
765  memcpy(h->field_scan8x8_cavlc_q0 , field_scan8x8_cavlc , sizeof(h->field_scan8x8_cavlc_q0 ));
766  } else {
767  memcpy(h->zigzag_scan_q0 , h->zigzag_scan , sizeof(h->zigzag_scan_q0 ));
768  memcpy(h->zigzag_scan8x8_q0 , h->zigzag_scan8x8 , sizeof(h->zigzag_scan8x8_q0 ));
769  memcpy(h->zigzag_scan8x8_cavlc_q0 , h->zigzag_scan8x8_cavlc , sizeof(h->zigzag_scan8x8_cavlc_q0));
770  memcpy(h->field_scan_q0 , h->field_scan , sizeof(h->field_scan_q0 ));
771  memcpy(h->field_scan8x8_q0 , h->field_scan8x8 , sizeof(h->field_scan8x8_q0 ));
772  memcpy(h->field_scan8x8_cavlc_q0 , h->field_scan8x8_cavlc , sizeof(h->field_scan8x8_cavlc_q0 ));
773  }
774 }
775 
776 static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
777 {
778 #define HWACCEL_MAX (CONFIG_H264_DXVA2_HWACCEL + \
779  (CONFIG_H264_D3D11VA_HWACCEL * 2) + \
780  CONFIG_H264_NVDEC_HWACCEL + \
781  CONFIG_H264_VAAPI_HWACCEL + \
782  CONFIG_H264_VIDEOTOOLBOX_HWACCEL + \
783  CONFIG_H264_VDPAU_HWACCEL)
784  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmt = pix_fmts;
785  const enum AVPixelFormat *choices = pix_fmts;
786  int i;
787 
788  switch (h->ps.sps->bit_depth_luma) {
789  case 9:
790  if (CHROMA444(h)) {
791  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
792  *fmt++ = AV_PIX_FMT_GBRP9;
793  } else
794  *fmt++ = AV_PIX_FMT_YUV444P9;
795  } else if (CHROMA422(h))
796  *fmt++ = AV_PIX_FMT_YUV422P9;
797  else
798  *fmt++ = AV_PIX_FMT_YUV420P9;
799  break;
800  case 10:
801 #if CONFIG_H264_VIDEOTOOLBOX_HWACCEL
802  if (h->avctx->colorspace != AVCOL_SPC_RGB)
803  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
804 #endif
805  if (CHROMA444(h)) {
806  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
807  *fmt++ = AV_PIX_FMT_GBRP10;
808  } else
809  *fmt++ = AV_PIX_FMT_YUV444P10;
810  } else if (CHROMA422(h))
811  *fmt++ = AV_PIX_FMT_YUV422P10;
812  else
813  *fmt++ = AV_PIX_FMT_YUV420P10;
814  break;
815  case 12:
816  if (CHROMA444(h)) {
817  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
818  *fmt++ = AV_PIX_FMT_GBRP12;
819  } else
820  *fmt++ = AV_PIX_FMT_YUV444P12;
821  } else if (CHROMA422(h))
822  *fmt++ = AV_PIX_FMT_YUV422P12;
823  else
824  *fmt++ = AV_PIX_FMT_YUV420P12;
825  break;
826  case 14:
827  if (CHROMA444(h)) {
828  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
829  *fmt++ = AV_PIX_FMT_GBRP14;
830  } else
831  *fmt++ = AV_PIX_FMT_YUV444P14;
832  } else if (CHROMA422(h))
833  *fmt++ = AV_PIX_FMT_YUV422P14;
834  else
835  *fmt++ = AV_PIX_FMT_YUV420P14;
836  break;
837  case 8:
838 #if CONFIG_H264_VDPAU_HWACCEL
839  *fmt++ = AV_PIX_FMT_VDPAU;
840 #endif
841 #if CONFIG_H264_NVDEC_HWACCEL
842  *fmt++ = AV_PIX_FMT_CUDA;
843 #endif
844 #if CONFIG_H264_VIDEOTOOLBOX_HWACCEL
845  if (h->avctx->colorspace != AVCOL_SPC_RGB)
846  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
847 #endif
848  if (CHROMA444(h)) {
849  if (h->avctx->colorspace == AVCOL_SPC_RGB)
850  *fmt++ = AV_PIX_FMT_GBRP;
851  else if (h->avctx->color_range == AVCOL_RANGE_JPEG)
852  *fmt++ = AV_PIX_FMT_YUVJ444P;
853  else
854  *fmt++ = AV_PIX_FMT_YUV444P;
855  } else if (CHROMA422(h)) {
856  if (h->avctx->color_range == AVCOL_RANGE_JPEG)
857  *fmt++ = AV_PIX_FMT_YUVJ422P;
858  else
859  *fmt++ = AV_PIX_FMT_YUV422P;
860  } else {
861 #if CONFIG_H264_DXVA2_HWACCEL
862  *fmt++ = AV_PIX_FMT_DXVA2_VLD;
863 #endif
864 #if CONFIG_H264_D3D11VA_HWACCEL
865  *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
866  *fmt++ = AV_PIX_FMT_D3D11;
867 #endif
868 #if CONFIG_H264_VAAPI_HWACCEL
869  *fmt++ = AV_PIX_FMT_VAAPI;
870 #endif
871  if (h->avctx->codec->pix_fmts)
872  choices = h->avctx->codec->pix_fmts;
873  else if (h->avctx->color_range == AVCOL_RANGE_JPEG)
874  *fmt++ = AV_PIX_FMT_YUVJ420P;
875  else
876  *fmt++ = AV_PIX_FMT_YUV420P;
877  }
878  break;
879  default:
880  av_log(h->avctx, AV_LOG_ERROR,
881  "Unsupported bit depth %d\n", h->ps.sps->bit_depth_luma);
882  return AVERROR_INVALIDDATA;
883  }
884 
885  *fmt = AV_PIX_FMT_NONE;
886 
887  for (i=0; choices[i] != AV_PIX_FMT_NONE; i++)
888  if (choices[i] == h->avctx->pix_fmt && !force_callback)
889  return choices[i];
890  return ff_thread_get_format(h->avctx, choices);
891 }
892 
893 /* export coded and cropped frame dimensions to AVCodecContext */
895 {
896  const SPS *sps = (const SPS*)h->ps.sps;
897  int cr = sps->crop_right;
898  int cl = sps->crop_left;
899  int ct = sps->crop_top;
900  int cb = sps->crop_bottom;
901  int width = h->width - (cr + cl);
902  int height = h->height - (ct + cb);
903  av_assert0(sps->crop_right + sps->crop_left < (unsigned)h->width);
904  av_assert0(sps->crop_top + sps->crop_bottom < (unsigned)h->height);
905 
906  /* handle container cropping */
907  if (h->width_from_caller > 0 && h->height_from_caller > 0 &&
908  !sps->crop_top && !sps->crop_left &&
909  FFALIGN(h->width_from_caller, 16) == FFALIGN(width, 16) &&
910  FFALIGN(h->height_from_caller, 16) == FFALIGN(height, 16) &&
911  h->width_from_caller <= width &&
912  h->height_from_caller <= height) {
913  width = h->width_from_caller;
914  height = h->height_from_caller;
915  cl = 0;
916  ct = 0;
917  cr = h->width - width;
918  cb = h->height - height;
919  } else {
920  h->width_from_caller = 0;
921  h->height_from_caller = 0;
922  }
923 
924  h->avctx->coded_width = h->width;
925  h->avctx->coded_height = h->height;
926  h->avctx->width = width;
927  h->avctx->height = height;
928  h->crop_right = cr;
929  h->crop_left = cl;
930  h->crop_top = ct;
931  h->crop_bottom = cb;
932 }
933 
935 {
936  const SPS *sps = h->ps.sps;
937  int i, ret;
938 
939  if (!sps) {
941  goto fail;
942  }
943 
944  ff_set_sar(h->avctx, sps->vui.sar);
945  av_pix_fmt_get_chroma_sub_sample(h->avctx->pix_fmt,
946  &h->chroma_x_shift, &h->chroma_y_shift);
947 
948  if (sps->timing_info_present_flag) {
949  int64_t den = sps->time_scale;
950  if (h->x264_build < 44U)
951  den *= 2;
952  av_reduce(&h->avctx->framerate.den, &h->avctx->framerate.num,
953  sps->num_units_in_tick * h->avctx->ticks_per_frame, den, 1 << 30);
954  }
955 
957 
958  h->first_field = 0;
959  h->prev_interlaced_frame = 1;
960 
963  if (ret < 0) {
964  av_log(h->avctx, AV_LOG_ERROR, "Could not allocate memory\n");
965  goto fail;
966  }
967 
968  if (sps->bit_depth_luma < 8 || sps->bit_depth_luma > 14 ||
969  sps->bit_depth_luma == 11 || sps->bit_depth_luma == 13
970  ) {
971  av_log(h->avctx, AV_LOG_ERROR, "Unsupported bit depth %d\n",
972  sps->bit_depth_luma);
974  goto fail;
975  }
976 
977  h->cur_bit_depth_luma =
978  h->avctx->bits_per_raw_sample = sps->bit_depth_luma;
979  h->cur_chroma_format_idc = sps->chroma_format_idc;
980  h->pixel_shift = sps->bit_depth_luma > 8;
981  h->chroma_format_idc = sps->chroma_format_idc;
982  h->bit_depth_luma = sps->bit_depth_luma;
983 
984  ff_h264dsp_init(&h->h264dsp, sps->bit_depth_luma,
985  sps->chroma_format_idc);
986  ff_h264chroma_init(&h->h264chroma, sps->bit_depth_chroma);
987  ff_h264qpel_init(&h->h264qpel, sps->bit_depth_luma);
988  ff_h264_pred_init(&h->hpc, AV_CODEC_ID_H264, sps->bit_depth_luma,
989  sps->chroma_format_idc);
990  ff_videodsp_init(&h->vdsp, sps->bit_depth_luma);
991 
992  if (!HAVE_THREADS || !(h->avctx->active_thread_type & FF_THREAD_SLICE)) {
993  ff_h264_slice_context_init(h, &h->slice_ctx[0]);
994  } else {
995  for (i = 0; i < h->nb_slice_ctx; i++) {
996  H264SliceContext *sl = &h->slice_ctx[i];
997 
998  sl->h264 = h;
999  sl->intra4x4_pred_mode = h->intra4x4_pred_mode + i * 8 * 2 * h->mb_stride;
1000  sl->mvd_table[0] = h->mvd_table[0] + i * 8 * 2 * h->mb_stride;
1001  sl->mvd_table[1] = h->mvd_table[1] + i * 8 * 2 * h->mb_stride;
1002 
1004  }
1005  }
1006 
1007  h->context_initialized = 1;
1008 
1009  return 0;
1010 fail:
1012  h->context_initialized = 0;
1013  return ret;
1014 }
1015 
1017 {
1018  switch (a) {
1022  default:
1023  return a;
1024  }
1025 }
1026 
1027 static int h264_init_ps(H264Context *h, const H264SliceContext *sl, int first_slice)
1028 {
1029  const SPS *sps;
1030  int needs_reinit = 0, must_reinit, ret;
1031 
1032  if (first_slice) {
1033  av_buffer_unref(&h->ps.pps_ref);
1034  h->ps.pps = NULL;
1035  h->ps.pps_ref = av_buffer_ref(h->ps.pps_list[sl->pps_id]);
1036  if (!h->ps.pps_ref)
1037  return AVERROR(ENOMEM);
1038  h->ps.pps = (const PPS*)h->ps.pps_ref->data;
1039  }
1040 
1041  if (h->ps.sps != h->ps.pps->sps) {
1042  h->ps.sps = (const SPS*)h->ps.pps->sps;
1043 
1044  if (h->mb_width != h->ps.sps->mb_width ||
1045  h->mb_height != h->ps.sps->mb_height ||
1046  h->cur_bit_depth_luma != h->ps.sps->bit_depth_luma ||
1047  h->cur_chroma_format_idc != h->ps.sps->chroma_format_idc
1048  )
1049  needs_reinit = 1;
1050 
1051  if (h->bit_depth_luma != h->ps.sps->bit_depth_luma ||
1052  h->chroma_format_idc != h->ps.sps->chroma_format_idc)
1053  needs_reinit = 1;
1054  }
1055  sps = h->ps.sps;
1056 
1057  must_reinit = (h->context_initialized &&
1058  ( 16*sps->mb_width != h->avctx->coded_width
1059  || 16*sps->mb_height != h->avctx->coded_height
1060  || h->cur_bit_depth_luma != sps->bit_depth_luma
1061  || h->cur_chroma_format_idc != sps->chroma_format_idc
1062  || h->mb_width != sps->mb_width
1063  || h->mb_height != sps->mb_height
1064  ));
1065  if (h->avctx->pix_fmt == AV_PIX_FMT_NONE
1066  || (non_j_pixfmt(h->avctx->pix_fmt) != non_j_pixfmt(get_pixel_format(h, 0))))
1067  must_reinit = 1;
1068 
1069  if (first_slice && av_cmp_q(sps->vui.sar, h->avctx->sample_aspect_ratio))
1070  must_reinit = 1;
1071 
1072  if (!h->setup_finished) {
1073  h->avctx->profile = ff_h264_get_profile(sps);
1074  h->avctx->level = sps->level_idc;
1075  h->avctx->refs = sps->ref_frame_count;
1076 
1077  h->mb_width = sps->mb_width;
1078  h->mb_height = sps->mb_height;
1079  h->mb_num = h->mb_width * h->mb_height;
1080  h->mb_stride = h->mb_width + 1;
1081 
1082  h->b_stride = h->mb_width * 4;
1083 
1084  h->chroma_y_shift = sps->chroma_format_idc <= 1; // 400 uses yuv420p
1085 
1086  h->width = 16 * h->mb_width;
1087  h->height = 16 * h->mb_height;
1088 
1089  init_dimensions(h);
1090 
1091  if (sps->vui.video_signal_type_present_flag) {
1092  h->avctx->color_range = sps->vui.video_full_range_flag > 0 ? AVCOL_RANGE_JPEG
1093  : AVCOL_RANGE_MPEG;
1094  if (sps->vui.colour_description_present_flag) {
1095  if (h->avctx->colorspace != sps->vui.matrix_coeffs)
1096  needs_reinit = 1;
1097  h->avctx->color_primaries = sps->vui.colour_primaries;
1098  h->avctx->color_trc = sps->vui.transfer_characteristics;
1099  h->avctx->colorspace = sps->vui.matrix_coeffs;
1100  }
1101  }
1102 
1103  if (h->sei.common.alternative_transfer.present &&
1104  av_color_transfer_name(h->sei.common.alternative_transfer.preferred_transfer_characteristics) &&
1105  h->sei.common.alternative_transfer.preferred_transfer_characteristics != AVCOL_TRC_UNSPECIFIED) {
1106  h->avctx->color_trc = h->sei.common.alternative_transfer.preferred_transfer_characteristics;
1107  }
1108  }
1109  h->avctx->chroma_sample_location = sps->vui.chroma_location;
1110 
1111  if (!h->context_initialized || must_reinit || needs_reinit) {
1112  int flush_changes = h->context_initialized;
1113  h->context_initialized = 0;
1114  if (sl != h->slice_ctx) {
1115  av_log(h->avctx, AV_LOG_ERROR,
1116  "changing width %d -> %d / height %d -> %d on "
1117  "slice %d\n",
1118  h->width, h->avctx->coded_width,
1119  h->height, h->avctx->coded_height,
1120  h->current_slice + 1);
1121  return AVERROR_INVALIDDATA;
1122  }
1123 
1124  av_assert1(first_slice);
1125 
1126  if (flush_changes)
1128 
1129  if ((ret = get_pixel_format(h, 1)) < 0)
1130  return ret;
1131  h->avctx->pix_fmt = ret;
1132 
1133  av_log(h->avctx, AV_LOG_VERBOSE, "Reinit context to %dx%d, "
1134  "pix_fmt: %s\n", h->width, h->height, av_get_pix_fmt_name(h->avctx->pix_fmt));
1135 
1136  if ((ret = h264_slice_header_init(h)) < 0) {
1137  av_log(h->avctx, AV_LOG_ERROR,
1138  "h264_slice_header_init() failed\n");
1139  return ret;
1140  }
1141  }
1142 
1143  return 0;
1144 }
1145 
1147 {
1148  const SPS *sps = h->ps.sps;
1149  H264Picture *cur = h->cur_pic_ptr;
1150  AVFrame *out = cur->f;
1151  int ret;
1152 
1153  out->interlaced_frame = 0;
1154  out->repeat_pict = 0;
1155 
1156  /* Signal interlacing information externally. */
1157  /* Prioritize picture timing SEI information over used
1158  * decoding process if it exists. */
1159  if (h->sei.picture_timing.present) {
1160  int ret = ff_h264_sei_process_picture_timing(&h->sei.picture_timing, sps,
1161  h->avctx);
1162  if (ret < 0) {
1163  av_log(h->avctx, AV_LOG_ERROR, "Error processing a picture timing SEI\n");
1164  if (h->avctx->err_recognition & AV_EF_EXPLODE)
1165  return ret;
1166  h->sei.picture_timing.present = 0;
1167  }
1168  }
1169 
1170  if (sps->pic_struct_present_flag && h->sei.picture_timing.present) {
1171  H264SEIPictureTiming *pt = &h->sei.picture_timing;
1172  switch (pt->pic_struct) {
1174  break;
1177  out->interlaced_frame = 1;
1178  break;
1182  out->interlaced_frame = 1;
1183  else
1184  // try to flag soft telecine progressive
1185  out->interlaced_frame = h->prev_interlaced_frame;
1186  break;
1189  /* Signal the possibility of telecined film externally
1190  * (pic_struct 5,6). From these hints, let the applications
1191  * decide if they apply deinterlacing. */
1192  out->repeat_pict = 1;
1193  break;
1195  out->repeat_pict = 2;
1196  break;
1198  out->repeat_pict = 4;
1199  break;
1200  }
1201 
1202  if ((pt->ct_type & 3) &&
1203  pt->pic_struct <= H264_SEI_PIC_STRUCT_BOTTOM_TOP)
1204  out->interlaced_frame = (pt->ct_type & (1 << 1)) != 0;
1205  } else {
1206  /* Derive interlacing flag from used decoding process. */
1207  out->interlaced_frame = FIELD_OR_MBAFF_PICTURE(h);
1208  }
1209  h->prev_interlaced_frame = out->interlaced_frame;
1210 
1211  if (cur->field_poc[0] != cur->field_poc[1]) {
1212  /* Derive top_field_first from field pocs. */
1213  out->top_field_first = cur->field_poc[0] < cur->field_poc[1];
1214  } else {
1215  if (sps->pic_struct_present_flag && h->sei.picture_timing.present) {
1216  /* Use picture timing SEI information. Even if it is a
1217  * information of a past frame, better than nothing. */
1218  if (h->sei.picture_timing.pic_struct == H264_SEI_PIC_STRUCT_TOP_BOTTOM ||
1219  h->sei.picture_timing.pic_struct == H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP)
1220  out->top_field_first = 1;
1221  else
1222  out->top_field_first = 0;
1223  } else if (out->interlaced_frame) {
1224  /* Default to top field first when pic_struct_present_flag
1225  * is not set but interlaced frame detected */
1226  out->top_field_first = 1;
1227  } else {
1228  /* Most likely progressive */
1229  out->top_field_first = 0;
1230  }
1231  }
1232 
1233  ret = ff_h2645_sei_to_frame(out, &h->sei.common, AV_CODEC_ID_H264, h->avctx,
1234  &sps->vui, sps->bit_depth_luma, sps->bit_depth_chroma,
1235  cur->poc + (unsigned)(h->poc_offset << 5));
1236  if (ret < 0)
1237  return ret;
1238 
1239  if (h->sei.picture_timing.timecode_cnt > 0) {
1240  uint32_t *tc_sd;
1241  char tcbuf[AV_TIMECODE_STR_SIZE];
1242 
1245  sizeof(uint32_t)*4);
1246  if (!tcside)
1247  return AVERROR(ENOMEM);
1248 
1249  tc_sd = (uint32_t*)tcside->data;
1250  tc_sd[0] = h->sei.picture_timing.timecode_cnt;
1251 
1252  for (int i = 0; i < tc_sd[0]; i++) {
1253  int drop = h->sei.picture_timing.timecode[i].dropframe;
1254  int hh = h->sei.picture_timing.timecode[i].hours;
1255  int mm = h->sei.picture_timing.timecode[i].minutes;
1256  int ss = h->sei.picture_timing.timecode[i].seconds;
1257  int ff = h->sei.picture_timing.timecode[i].frame;
1258 
1259  tc_sd[i + 1] = av_timecode_get_smpte(h->avctx->framerate, drop, hh, mm, ss, ff);
1260  av_timecode_make_smpte_tc_string2(tcbuf, h->avctx->framerate, tc_sd[i + 1], 0, 0);
1261  av_dict_set(&out->metadata, "timecode", tcbuf, 0);
1262  }
1263  h->sei.picture_timing.timecode_cnt = 0;
1264  }
1265 
1266  return 0;
1267 }
1268 
1270 {
1271  const SPS *sps = h->ps.sps;
1272  H264Picture *out = h->cur_pic_ptr;
1273  H264Picture *cur = h->cur_pic_ptr;
1274  int i, pics, out_of_order, out_idx;
1275 
1276  cur->mmco_reset = h->mmco_reset;
1277  h->mmco_reset = 0;
1278 
1279  if (sps->bitstream_restriction_flag ||
1280  h->avctx->strict_std_compliance >= FF_COMPLIANCE_STRICT) {
1281  h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, sps->num_reorder_frames);
1282  }
1283 
1284  for (i = 0; 1; i++) {
1285  if(i == H264_MAX_DPB_FRAMES || cur->poc < h->last_pocs[i]){
1286  if(i)
1287  h->last_pocs[i-1] = cur->poc;
1288  break;
1289  } else if(i) {
1290  h->last_pocs[i-1]= h->last_pocs[i];
1291  }
1292  }
1293  out_of_order = H264_MAX_DPB_FRAMES - i;
1294  if( cur->f->pict_type == AV_PICTURE_TYPE_B
1295  || (h->last_pocs[H264_MAX_DPB_FRAMES-2] > INT_MIN && h->last_pocs[H264_MAX_DPB_FRAMES-1] - (int64_t)h->last_pocs[H264_MAX_DPB_FRAMES-2] > 2))
1296  out_of_order = FFMAX(out_of_order, 1);
1297  if (out_of_order == H264_MAX_DPB_FRAMES) {
1298  av_log(h->avctx, AV_LOG_VERBOSE, "Invalid POC %d<%d\n", cur->poc, h->last_pocs[0]);
1299  for (i = 1; i < H264_MAX_DPB_FRAMES; i++)
1300  h->last_pocs[i] = INT_MIN;
1301  h->last_pocs[0] = cur->poc;
1302  cur->mmco_reset = 1;
1303  } else if(h->avctx->has_b_frames < out_of_order && !sps->bitstream_restriction_flag){
1304  int loglevel = h->avctx->frame_num > 1 ? AV_LOG_WARNING : AV_LOG_VERBOSE;
1305  av_log(h->avctx, loglevel, "Increasing reorder buffer to %d\n", out_of_order);
1306  h->avctx->has_b_frames = out_of_order;
1307  }
1308 
1309  pics = 0;
1310  while (h->delayed_pic[pics])
1311  pics++;
1312 
1314 
1315  h->delayed_pic[pics++] = cur;
1316  if (cur->reference == 0)
1317  cur->reference = DELAYED_PIC_REF;
1318 
1319  out = h->delayed_pic[0];
1320  out_idx = 0;
1321  for (i = 1; h->delayed_pic[i] &&
1322  !h->delayed_pic[i]->f->key_frame &&
1323  !h->delayed_pic[i]->mmco_reset;
1324  i++)
1325  if (h->delayed_pic[i]->poc < out->poc) {
1326  out = h->delayed_pic[i];
1327  out_idx = i;
1328  }
1329  if (h->avctx->has_b_frames == 0 &&
1330  (h->delayed_pic[0]->f->key_frame || h->delayed_pic[0]->mmco_reset))
1331  h->next_outputed_poc = INT_MIN;
1332  out_of_order = out->poc < h->next_outputed_poc;
1333 
1334  if (out_of_order || pics > h->avctx->has_b_frames) {
1335  out->reference &= ~DELAYED_PIC_REF;
1336  for (i = out_idx; h->delayed_pic[i]; i++)
1337  h->delayed_pic[i] = h->delayed_pic[i + 1];
1338  }
1339  if (!out_of_order && pics > h->avctx->has_b_frames) {
1340  h->next_output_pic = out;
1341  if (out_idx == 0 && h->delayed_pic[0] && (h->delayed_pic[0]->f->key_frame || h->delayed_pic[0]->mmco_reset)) {
1342  h->next_outputed_poc = INT_MIN;
1343  } else
1344  h->next_outputed_poc = out->poc;
1345 
1346  if (out->recovered) {
1347  // We have reached an recovery point and all frames after it in
1348  // display order are "recovered".
1349  h->frame_recovered |= FRAME_RECOVERED_SEI;
1350  }
1351  out->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_SEI);
1352 
1353  if (!out->recovered) {
1354  if (!(h->avctx->flags & AV_CODEC_FLAG_OUTPUT_CORRUPT) &&
1355  !(h->avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL)) {
1356  h->next_output_pic = NULL;
1357  } else {
1358  out->f->flags |= AV_FRAME_FLAG_CORRUPT;
1359  }
1360  }
1361  } else {
1362  av_log(h->avctx, AV_LOG_DEBUG, "no picture %s\n", out_of_order ? "ooo" : "");
1363  }
1364 
1365  return 0;
1366 }
1367 
1368 /* This function is called right after decoding the slice header for a first
1369  * slice in a field (or a frame). It decides whether we are decoding a new frame
1370  * or a second field in a pair and does the necessary setup.
1371  */
1373  const H2645NAL *nal, int first_slice)
1374 {
1375  int i;
1376  const SPS *sps;
1377 
1378  int last_pic_structure, last_pic_droppable, ret;
1379 
1380  ret = h264_init_ps(h, sl, first_slice);
1381  if (ret < 0)
1382  return ret;
1383 
1384  sps = h->ps.sps;
1385 
1386  if (sps && sps->bitstream_restriction_flag &&
1387  h->avctx->has_b_frames < sps->num_reorder_frames) {
1388  h->avctx->has_b_frames = sps->num_reorder_frames;
1389  }
1390 
1391  last_pic_droppable = h->droppable;
1392  last_pic_structure = h->picture_structure;
1393  h->droppable = (nal->ref_idc == 0);
1394  h->picture_structure = sl->picture_structure;
1395 
1396  h->poc.frame_num = sl->frame_num;
1397  h->poc.poc_lsb = sl->poc_lsb;
1398  h->poc.delta_poc_bottom = sl->delta_poc_bottom;
1399  h->poc.delta_poc[0] = sl->delta_poc[0];
1400  h->poc.delta_poc[1] = sl->delta_poc[1];
1401 
1402  if (nal->type == H264_NAL_IDR_SLICE)
1403  h->poc_offset = sl->idr_pic_id;
1404  else if (h->picture_intra_only)
1405  h->poc_offset = 0;
1406 
1407  /* Shorten frame num gaps so we don't have to allocate reference
1408  * frames just to throw them away */
1409  if (h->poc.frame_num != h->poc.prev_frame_num) {
1410  int unwrap_prev_frame_num = h->poc.prev_frame_num;
1411  int max_frame_num = 1 << sps->log2_max_frame_num;
1412 
1413  if (unwrap_prev_frame_num > h->poc.frame_num)
1414  unwrap_prev_frame_num -= max_frame_num;
1415 
1416  if ((h->poc.frame_num - unwrap_prev_frame_num) > sps->ref_frame_count) {
1417  unwrap_prev_frame_num = (h->poc.frame_num - sps->ref_frame_count) - 1;
1418  if (unwrap_prev_frame_num < 0)
1419  unwrap_prev_frame_num += max_frame_num;
1420 
1421  h->poc.prev_frame_num = unwrap_prev_frame_num;
1422  }
1423  }
1424 
1425  /* See if we have a decoded first field looking for a pair...
1426  * Here, we're using that to see if we should mark previously
1427  * decode frames as "finished".
1428  * We have to do that before the "dummy" in-between frame allocation,
1429  * since that can modify h->cur_pic_ptr. */
1430  if (h->first_field) {
1431  int last_field = last_pic_structure == PICT_BOTTOM_FIELD;
1432  av_assert0(h->cur_pic_ptr);
1433  av_assert0(h->cur_pic_ptr->f->buf[0]);
1434  assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF);
1435 
1436  /* Mark old field/frame as completed */
1437  if (h->cur_pic_ptr->tf.owner[last_field] == h->avctx) {
1438  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, last_field);
1439  }
1440 
1441  /* figure out if we have a complementary field pair */
1442  if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
1443  /* Previous field is unmatched. Don't display it, but let it
1444  * remain for reference if marked as such. */
1445  if (last_pic_structure != PICT_FRAME) {
1446  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
1447  last_pic_structure == PICT_TOP_FIELD);
1448  }
1449  } else {
1450  if (h->cur_pic_ptr->frame_num != h->poc.frame_num) {
1451  /* This and previous field were reference, but had
1452  * different frame_nums. Consider this field first in
1453  * pair. Throw away previous field except for reference
1454  * purposes. */
1455  if (last_pic_structure != PICT_FRAME) {
1456  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
1457  last_pic_structure == PICT_TOP_FIELD);
1458  }
1459  } else {
1460  /* Second field in complementary pair */
1461  if (!((last_pic_structure == PICT_TOP_FIELD &&
1462  h->picture_structure == PICT_BOTTOM_FIELD) ||
1463  (last_pic_structure == PICT_BOTTOM_FIELD &&
1464  h->picture_structure == PICT_TOP_FIELD))) {
1465  av_log(h->avctx, AV_LOG_ERROR,
1466  "Invalid field mode combination %d/%d\n",
1467  last_pic_structure, h->picture_structure);
1468  h->picture_structure = last_pic_structure;
1469  h->droppable = last_pic_droppable;
1470  return AVERROR_INVALIDDATA;
1471  } else if (last_pic_droppable != h->droppable) {
1472  avpriv_request_sample(h->avctx,
1473  "Found reference and non-reference fields in the same frame, which");
1474  h->picture_structure = last_pic_structure;
1475  h->droppable = last_pic_droppable;
1476  return AVERROR_PATCHWELCOME;
1477  }
1478  }
1479  }
1480  }
1481 
1482  while (h->poc.frame_num != h->poc.prev_frame_num && !h->first_field &&
1483  h->poc.frame_num != (h->poc.prev_frame_num + 1) % (1 << sps->log2_max_frame_num)) {
1484  const H264Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
1485  av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",
1486  h->poc.frame_num, h->poc.prev_frame_num);
1487  if (!sps->gaps_in_frame_num_allowed_flag)
1488  for(i=0; i<FF_ARRAY_ELEMS(h->last_pocs); i++)
1489  h->last_pocs[i] = INT_MIN;
1490  ret = h264_frame_start(h);
1491  if (ret < 0) {
1492  h->first_field = 0;
1493  return ret;
1494  }
1495 
1496  h->poc.prev_frame_num++;
1497  h->poc.prev_frame_num %= 1 << sps->log2_max_frame_num;
1498  h->cur_pic_ptr->frame_num = h->poc.prev_frame_num;
1499  h->cur_pic_ptr->invalid_gap = !sps->gaps_in_frame_num_allowed_flag;
1500  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
1501  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
1502 
1503  h->explicit_ref_marking = 0;
1505  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1506  return ret;
1507  /* Error concealment: If a ref is missing, copy the previous ref
1508  * in its place.
1509  * FIXME: Avoiding a memcpy would be nice, but ref handling makes
1510  * many assumptions about there being no actual duplicates.
1511  * FIXME: This does not copy padding for out-of-frame motion
1512  * vectors. Given we are concealing a lost frame, this probably
1513  * is not noticeable by comparison, but it should be fixed. */
1514  if (h->short_ref_count) {
1515  int c[4] = {
1516  1<<(h->ps.sps->bit_depth_luma-1),
1517  1<<(h->ps.sps->bit_depth_chroma-1),
1518  1<<(h->ps.sps->bit_depth_chroma-1),
1519  -1
1520  };
1521 
1522  if (prev &&
1523  h->short_ref[0]->f->width == prev->f->width &&
1524  h->short_ref[0]->f->height == prev->f->height &&
1525  h->short_ref[0]->f->format == prev->f->format) {
1526  ff_thread_await_progress(&prev->tf, INT_MAX, 0);
1527  if (prev->field_picture)
1528  ff_thread_await_progress(&prev->tf, INT_MAX, 1);
1529  ff_thread_release_ext_buffer(h->avctx, &h->short_ref[0]->tf);
1530  h->short_ref[0]->tf.f = h->short_ref[0]->f;
1531  ret = ff_thread_ref_frame(&h->short_ref[0]->tf, &prev->tf);
1532  if (ret < 0)
1533  return ret;
1534  h->short_ref[0]->poc = prev->poc + 2U;
1535  ff_thread_report_progress(&h->short_ref[0]->tf, INT_MAX, 0);
1536  if (h->short_ref[0]->field_picture)
1537  ff_thread_report_progress(&h->short_ref[0]->tf, INT_MAX, 1);
1538  } else if (!h->frame_recovered && !h->avctx->hwaccel)
1539  ff_color_frame(h->short_ref[0]->f, c);
1540  h->short_ref[0]->frame_num = h->poc.prev_frame_num;
1541  }
1542  }
1543 
1544  /* See if we have a decoded first field looking for a pair...
1545  * We're using that to see whether to continue decoding in that
1546  * frame, or to allocate a new one. */
1547  if (h->first_field) {
1548  av_assert0(h->cur_pic_ptr);
1549  av_assert0(h->cur_pic_ptr->f->buf[0]);
1550  assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF);
1551 
1552  /* figure out if we have a complementary field pair */
1553  if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
1554  /* Previous field is unmatched. Don't display it, but let it
1555  * remain for reference if marked as such. */
1556  h->missing_fields ++;
1557  h->cur_pic_ptr = NULL;
1558  h->first_field = FIELD_PICTURE(h);
1559  } else {
1560  h->missing_fields = 0;
1561  if (h->cur_pic_ptr->frame_num != h->poc.frame_num) {
1562  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
1563  h->picture_structure==PICT_BOTTOM_FIELD);
1564  /* This and the previous field had different frame_nums.
1565  * Consider this field first in pair. Throw away previous
1566  * one except for reference purposes. */
1567  h->first_field = 1;
1568  h->cur_pic_ptr = NULL;
1569  } else if (h->cur_pic_ptr->reference & DELAYED_PIC_REF) {
1570  /* This frame was already output, we cannot draw into it
1571  * anymore.
1572  */
1573  h->first_field = 1;
1574  h->cur_pic_ptr = NULL;
1575  } else {
1576  /* Second field in complementary pair */
1577  h->first_field = 0;
1578  }
1579  }
1580  } else {
1581  /* Frame or first field in a potentially complementary pair */
1582  h->first_field = FIELD_PICTURE(h);
1583  }
1584 
1585  if (!FIELD_PICTURE(h) || h->first_field) {
1586  if (h264_frame_start(h) < 0) {
1587  h->first_field = 0;
1588  return AVERROR_INVALIDDATA;
1589  }
1590  } else {
1591  int field = h->picture_structure == PICT_BOTTOM_FIELD;
1593  h->cur_pic_ptr->tf.owner[field] = h->avctx;
1594  }
1595  /* Some macroblocks can be accessed before they're available in case
1596  * of lost slices, MBAFF or threading. */
1597  if (FIELD_PICTURE(h)) {
1598  for(i = (h->picture_structure == PICT_BOTTOM_FIELD); i<h->mb_height; i++)
1599  memset(h->slice_table + i*h->mb_stride, -1, (h->mb_stride - (i+1==h->mb_height)) * sizeof(*h->slice_table));
1600  } else {
1601  memset(h->slice_table, -1,
1602  (h->mb_height * h->mb_stride - 1) * sizeof(*h->slice_table));
1603  }
1604 
1605  ret = ff_h264_init_poc(h->cur_pic_ptr->field_poc, &h->cur_pic_ptr->poc,
1606  h->ps.sps, &h->poc, h->picture_structure, nal->ref_idc);
1607  if (ret < 0)
1608  return ret;
1609 
1610  memcpy(h->mmco, sl->mmco, sl->nb_mmco * sizeof(*h->mmco));
1611  h->nb_mmco = sl->nb_mmco;
1612  h->explicit_ref_marking = sl->explicit_ref_marking;
1613 
1614  h->picture_idr = nal->type == H264_NAL_IDR_SLICE;
1615 
1616  if (h->sei.recovery_point.recovery_frame_cnt >= 0) {
1617  const int sei_recovery_frame_cnt = h->sei.recovery_point.recovery_frame_cnt;
1618 
1619  if (h->poc.frame_num != sei_recovery_frame_cnt || sl->slice_type_nos != AV_PICTURE_TYPE_I)
1620  h->valid_recovery_point = 1;
1621 
1622  if ( h->recovery_frame < 0
1623  || av_mod_uintp2(h->recovery_frame - h->poc.frame_num, h->ps.sps->log2_max_frame_num) > sei_recovery_frame_cnt) {
1624  h->recovery_frame = av_mod_uintp2(h->poc.frame_num + sei_recovery_frame_cnt, h->ps.sps->log2_max_frame_num);
1625 
1626  if (!h->valid_recovery_point)
1627  h->recovery_frame = h->poc.frame_num;
1628  }
1629  }
1630 
1631  h->cur_pic_ptr->f->key_frame |= (nal->type == H264_NAL_IDR_SLICE);
1632 
1633  if (nal->type == H264_NAL_IDR_SLICE ||
1634  (h->recovery_frame == h->poc.frame_num && nal->ref_idc)) {
1635  h->recovery_frame = -1;
1636  h->cur_pic_ptr->recovered = 1;
1637  }
1638  // If we have an IDR, all frames after it in decoded order are
1639  // "recovered".
1640  if (nal->type == H264_NAL_IDR_SLICE)
1641  h->frame_recovered |= FRAME_RECOVERED_IDR;
1642 #if 1
1643  h->cur_pic_ptr->recovered |= h->frame_recovered;
1644 #else
1645  h->cur_pic_ptr->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_IDR);
1646 #endif
1647 
1648  /* Set the frame properties/side data. Only done for the second field in
1649  * field coded frames, since some SEI information is present for each field
1650  * and is merged by the SEI parsing code. */
1651  if (!FIELD_PICTURE(h) || !h->first_field || h->missing_fields > 1) {
1653  if (ret < 0)
1654  return ret;
1655 
1657  if (ret < 0)
1658  return ret;
1659  }
1660 
1661  return 0;
1662 }
1663 
1665  const H2645NAL *nal)
1666 {
1667  const SPS *sps;
1668  const PPS *pps;
1669  int ret;
1670  unsigned int slice_type, tmp, i;
1671  int field_pic_flag, bottom_field_flag;
1672  int first_slice = sl == h->slice_ctx && !h->current_slice;
1673  int picture_structure;
1674 
1675  if (first_slice)
1676  av_assert0(!h->setup_finished);
1677 
1678  sl->first_mb_addr = get_ue_golomb_long(&sl->gb);
1679 
1680  slice_type = get_ue_golomb_31(&sl->gb);
1681  if (slice_type > 9) {
1682  av_log(h->avctx, AV_LOG_ERROR,
1683  "slice type %d too large at %d\n",
1684  slice_type, sl->first_mb_addr);
1685  return AVERROR_INVALIDDATA;
1686  }
1687  if (slice_type > 4) {
1688  slice_type -= 5;
1689  sl->slice_type_fixed = 1;
1690  } else
1691  sl->slice_type_fixed = 0;
1692 
1693  slice_type = ff_h264_golomb_to_pict_type[slice_type];
1694  sl->slice_type = slice_type;
1695  sl->slice_type_nos = slice_type & 3;
1696 
1697  if (nal->type == H264_NAL_IDR_SLICE &&
1699  av_log(h->avctx, AV_LOG_ERROR, "A non-intra slice in an IDR NAL unit.\n");
1700  return AVERROR_INVALIDDATA;
1701  }
1702 
1703  sl->pps_id = get_ue_golomb(&sl->gb);
1704  if (sl->pps_id >= MAX_PPS_COUNT) {
1705  av_log(h->avctx, AV_LOG_ERROR, "pps_id %u out of range\n", sl->pps_id);
1706  return AVERROR_INVALIDDATA;
1707  }
1708  if (!h->ps.pps_list[sl->pps_id]) {
1709  av_log(h->avctx, AV_LOG_ERROR,
1710  "non-existing PPS %u referenced\n",
1711  sl->pps_id);
1712  return AVERROR_INVALIDDATA;
1713  }
1714  pps = (const PPS*)h->ps.pps_list[sl->pps_id]->data;
1715  sps = pps->sps;
1716 
1717  sl->frame_num = get_bits(&sl->gb, sps->log2_max_frame_num);
1718  if (!first_slice) {
1719  if (h->poc.frame_num != sl->frame_num) {
1720  av_log(h->avctx, AV_LOG_ERROR, "Frame num change from %d to %d\n",
1721  h->poc.frame_num, sl->frame_num);
1722  return AVERROR_INVALIDDATA;
1723  }
1724  }
1725 
1726  sl->mb_mbaff = 0;
1727 
1728  if (sps->frame_mbs_only_flag) {
1729  picture_structure = PICT_FRAME;
1730  } else {
1731  if (!sps->direct_8x8_inference_flag && slice_type == AV_PICTURE_TYPE_B) {
1732  av_log(h->avctx, AV_LOG_ERROR, "This stream was generated by a broken encoder, invalid 8x8 inference\n");
1733  return -1;
1734  }
1735  field_pic_flag = get_bits1(&sl->gb);
1736  if (field_pic_flag) {
1737  bottom_field_flag = get_bits1(&sl->gb);
1738  picture_structure = PICT_TOP_FIELD + bottom_field_flag;
1739  } else {
1740  picture_structure = PICT_FRAME;
1741  }
1742  }
1743  sl->picture_structure = picture_structure;
1744  sl->mb_field_decoding_flag = picture_structure != PICT_FRAME;
1745 
1746  if (picture_structure == PICT_FRAME) {
1747  sl->curr_pic_num = sl->frame_num;
1748  sl->max_pic_num = 1 << sps->log2_max_frame_num;
1749  } else {
1750  sl->curr_pic_num = 2 * sl->frame_num + 1;
1751  sl->max_pic_num = 1 << (sps->log2_max_frame_num + 1);
1752  }
1753 
1754  if (nal->type == H264_NAL_IDR_SLICE) {
1755  unsigned idr_pic_id = get_ue_golomb_long(&sl->gb);
1756  if (idr_pic_id < 65536) {
1757  sl->idr_pic_id = idr_pic_id;
1758  } else
1759  av_log(h->avctx, AV_LOG_WARNING, "idr_pic_id is invalid\n");
1760  }
1761 
1762  sl->poc_lsb = 0;
1763  sl->delta_poc_bottom = 0;
1764  if (sps->poc_type == 0) {
1765  sl->poc_lsb = get_bits(&sl->gb, sps->log2_max_poc_lsb);
1766 
1767  if (pps->pic_order_present == 1 && picture_structure == PICT_FRAME)
1768  sl->delta_poc_bottom = get_se_golomb(&sl->gb);
1769  }
1770 
1771  sl->delta_poc[0] = sl->delta_poc[1] = 0;
1772  if (sps->poc_type == 1 && !sps->delta_pic_order_always_zero_flag) {
1773  sl->delta_poc[0] = get_se_golomb(&sl->gb);
1774 
1775  if (pps->pic_order_present == 1 && picture_structure == PICT_FRAME)
1776  sl->delta_poc[1] = get_se_golomb(&sl->gb);
1777  }
1778 
1779  sl->redundant_pic_count = 0;
1780  if (pps->redundant_pic_cnt_present)
1781  sl->redundant_pic_count = get_ue_golomb(&sl->gb);
1782 
1783  if (sl->slice_type_nos == AV_PICTURE_TYPE_B)
1784  sl->direct_spatial_mv_pred = get_bits1(&sl->gb);
1785 
1787  &sl->gb, pps, sl->slice_type_nos,
1788  picture_structure, h->avctx);
1789  if (ret < 0)
1790  return ret;
1791 
1792  if (sl->slice_type_nos != AV_PICTURE_TYPE_I) {
1794  if (ret < 0) {
1795  sl->ref_count[1] = sl->ref_count[0] = 0;
1796  return ret;
1797  }
1798  }
1799 
1800  sl->pwt.use_weight = 0;
1801  for (i = 0; i < 2; i++) {
1802  sl->pwt.luma_weight_flag[i] = 0;
1803  sl->pwt.chroma_weight_flag[i] = 0;
1804  }
1805  if ((pps->weighted_pred && sl->slice_type_nos == AV_PICTURE_TYPE_P) ||
1806  (pps->weighted_bipred_idc == 1 &&
1809  sl->slice_type_nos, &sl->pwt,
1810  picture_structure, h->avctx);
1811  if (ret < 0)
1812  return ret;
1813  }
1814 
1815  sl->explicit_ref_marking = 0;
1816  if (nal->ref_idc) {
1817  ret = ff_h264_decode_ref_pic_marking(sl, &sl->gb, nal, h->avctx);
1818  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1819  return AVERROR_INVALIDDATA;
1820  }
1821 
1822  if (sl->slice_type_nos != AV_PICTURE_TYPE_I && pps->cabac) {
1823  tmp = get_ue_golomb_31(&sl->gb);
1824  if (tmp > 2) {
1825  av_log(h->avctx, AV_LOG_ERROR, "cabac_init_idc %u overflow\n", tmp);
1826  return AVERROR_INVALIDDATA;
1827  }
1828  sl->cabac_init_idc = tmp;
1829  }
1830 
1831  sl->last_qscale_diff = 0;
1832  tmp = pps->init_qp + (unsigned)get_se_golomb(&sl->gb);
1833  if (tmp > 51 + 6 * (sps->bit_depth_luma - 8)) {
1834  av_log(h->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp);
1835  return AVERROR_INVALIDDATA;
1836  }
1837  sl->qscale = tmp;
1838  sl->chroma_qp[0] = get_chroma_qp(pps, 0, sl->qscale);
1839  sl->chroma_qp[1] = get_chroma_qp(pps, 1, sl->qscale);
1840  // FIXME qscale / qp ... stuff
1841  if (sl->slice_type == AV_PICTURE_TYPE_SP)
1842  get_bits1(&sl->gb); /* sp_for_switch_flag */
1843  if (sl->slice_type == AV_PICTURE_TYPE_SP ||
1845  get_se_golomb(&sl->gb); /* slice_qs_delta */
1846 
1847  sl->deblocking_filter = 1;
1848  sl->slice_alpha_c0_offset = 0;
1849  sl->slice_beta_offset = 0;
1850  if (pps->deblocking_filter_parameters_present) {
1851  tmp = get_ue_golomb_31(&sl->gb);
1852  if (tmp > 2) {
1853  av_log(h->avctx, AV_LOG_ERROR,
1854  "deblocking_filter_idc %u out of range\n", tmp);
1855  return AVERROR_INVALIDDATA;
1856  }
1857  sl->deblocking_filter = tmp;
1858  if (sl->deblocking_filter < 2)
1859  sl->deblocking_filter ^= 1; // 1<->0
1860 
1861  if (sl->deblocking_filter) {
1862  int slice_alpha_c0_offset_div2 = get_se_golomb(&sl->gb);
1863  int slice_beta_offset_div2 = get_se_golomb(&sl->gb);
1864  if (slice_alpha_c0_offset_div2 > 6 ||
1865  slice_alpha_c0_offset_div2 < -6 ||
1866  slice_beta_offset_div2 > 6 ||
1867  slice_beta_offset_div2 < -6) {
1868  av_log(h->avctx, AV_LOG_ERROR,
1869  "deblocking filter parameters %d %d out of range\n",
1870  slice_alpha_c0_offset_div2, slice_beta_offset_div2);
1871  return AVERROR_INVALIDDATA;
1872  }
1873  sl->slice_alpha_c0_offset = slice_alpha_c0_offset_div2 * 2;
1874  sl->slice_beta_offset = slice_beta_offset_div2 * 2;
1875  }
1876  }
1877 
1878  return 0;
1879 }
1880 
1881 /* do all the per-slice initialization needed before we can start decoding the
1882  * actual MBs */
1884  const H2645NAL *nal)
1885 {
1886  int i, j, ret = 0;
1887 
1888  if (h->picture_idr && nal->type != H264_NAL_IDR_SLICE) {
1889  av_log(h->avctx, AV_LOG_ERROR, "Invalid mix of IDR and non-IDR slices\n");
1890  return AVERROR_INVALIDDATA;
1891  }
1892 
1893  av_assert1(h->mb_num == h->mb_width * h->mb_height);
1894  if (sl->first_mb_addr << FIELD_OR_MBAFF_PICTURE(h) >= h->mb_num ||
1895  sl->first_mb_addr >= h->mb_num) {
1896  av_log(h->avctx, AV_LOG_ERROR, "first_mb_in_slice overflow\n");
1897  return AVERROR_INVALIDDATA;
1898  }
1899  sl->resync_mb_x = sl->mb_x = sl->first_mb_addr % h->mb_width;
1900  sl->resync_mb_y = sl->mb_y = (sl->first_mb_addr / h->mb_width) <<
1902  if (h->picture_structure == PICT_BOTTOM_FIELD)
1903  sl->resync_mb_y = sl->mb_y = sl->mb_y + 1;
1904  av_assert1(sl->mb_y < h->mb_height);
1905 
1906  ret = ff_h264_build_ref_list(h, sl);
1907  if (ret < 0)
1908  return ret;
1909 
1910  if (h->ps.pps->weighted_bipred_idc == 2 &&
1912  implicit_weight_table(h, sl, -1);
1913  if (FRAME_MBAFF(h)) {
1914  implicit_weight_table(h, sl, 0);
1915  implicit_weight_table(h, sl, 1);
1916  }
1917  }
1918 
1921  if (!h->setup_finished)
1923 
1924  if (h->avctx->skip_loop_filter >= AVDISCARD_ALL ||
1925  (h->avctx->skip_loop_filter >= AVDISCARD_NONKEY &&
1926  h->nal_unit_type != H264_NAL_IDR_SLICE) ||
1927  (h->avctx->skip_loop_filter >= AVDISCARD_NONINTRA &&
1929  (h->avctx->skip_loop_filter >= AVDISCARD_BIDIR &&
1931  (h->avctx->skip_loop_filter >= AVDISCARD_NONREF &&
1932  nal->ref_idc == 0))
1933  sl->deblocking_filter = 0;
1934 
1935  if (sl->deblocking_filter == 1 && h->nb_slice_ctx > 1) {
1936  if (h->avctx->flags2 & AV_CODEC_FLAG2_FAST) {
1937  /* Cheat slightly for speed:
1938  * Do not bother to deblock across slices. */
1939  sl->deblocking_filter = 2;
1940  } else {
1941  h->postpone_filter = 1;
1942  }
1943  }
1944  sl->qp_thresh = 15 -
1946  FFMAX3(0,
1947  h->ps.pps->chroma_qp_index_offset[0],
1948  h->ps.pps->chroma_qp_index_offset[1]) +
1949  6 * (h->ps.sps->bit_depth_luma - 8);
1950 
1951  sl->slice_num = ++h->current_slice;
1952 
1953  if (sl->slice_num)
1954  h->slice_row[(sl->slice_num-1)&(MAX_SLICES-1)]= sl->resync_mb_y;
1955  if ( h->slice_row[sl->slice_num&(MAX_SLICES-1)] + 3 >= sl->resync_mb_y
1956  && h->slice_row[sl->slice_num&(MAX_SLICES-1)] <= sl->resync_mb_y
1957  && sl->slice_num >= MAX_SLICES) {
1958  //in case of ASO this check needs to be updated depending on how we decide to assign slice numbers in this case
1959  av_log(h->avctx, AV_LOG_WARNING, "Possibly too many slices (%d >= %d), increase MAX_SLICES and recompile if there are artifacts\n", sl->slice_num, MAX_SLICES);
1960  }
1961 
1962  for (j = 0; j < 2; j++) {
1963  int id_list[16];
1964  int *ref2frm = h->ref2frm[sl->slice_num & (MAX_SLICES - 1)][j];
1965  for (i = 0; i < 16; i++) {
1966  id_list[i] = 60;
1967  if (j < sl->list_count && i < sl->ref_count[j] &&
1968  sl->ref_list[j][i].parent->f->buf[0]) {
1969  int k;
1970  AVBuffer *buf = sl->ref_list[j][i].parent->f->buf[0]->buffer;
1971  for (k = 0; k < h->short_ref_count; k++)
1972  if (h->short_ref[k]->f->buf[0]->buffer == buf) {
1973  id_list[i] = k;
1974  break;
1975  }
1976  for (k = 0; k < h->long_ref_count; k++)
1977  if (h->long_ref[k] && h->long_ref[k]->f->buf[0]->buffer == buf) {
1978  id_list[i] = h->short_ref_count + k;
1979  break;
1980  }
1981  }
1982  }
1983 
1984  ref2frm[0] =
1985  ref2frm[1] = -1;
1986  for (i = 0; i < 16; i++)
1987  ref2frm[i + 2] = 4 * id_list[i] + (sl->ref_list[j][i].reference & 3);
1988  ref2frm[18 + 0] =
1989  ref2frm[18 + 1] = -1;
1990  for (i = 16; i < 48; i++)
1991  ref2frm[i + 4] = 4 * id_list[(i - 16) >> 1] +
1992  (sl->ref_list[j][i].reference & 3);
1993  }
1994 
1995  if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
1996  av_log(h->avctx, AV_LOG_DEBUG,
1997  "slice:%d %c mb:%d %c%s%s frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n",
1998  sl->slice_num,
1999  (h->picture_structure == PICT_FRAME ? 'F' : h->picture_structure == PICT_TOP_FIELD ? 'T' : 'B'),
2000  sl->mb_y * h->mb_width + sl->mb_x,
2002  sl->slice_type_fixed ? " fix" : "",
2003  nal->type == H264_NAL_IDR_SLICE ? " IDR" : "",
2004  h->poc.frame_num,
2005  h->cur_pic_ptr->field_poc[0],
2006  h->cur_pic_ptr->field_poc[1],
2007  sl->ref_count[0], sl->ref_count[1],
2008  sl->qscale,
2009  sl->deblocking_filter,
2011  sl->pwt.use_weight,
2012  sl->pwt.use_weight == 1 && sl->pwt.use_weight_chroma ? "c" : "",
2013  sl->slice_type == AV_PICTURE_TYPE_B ? (sl->direct_spatial_mv_pred ? "SPAT" : "TEMP") : "");
2014  }
2015 
2016  return 0;
2017 }
2018 
2020 {
2021  H264SliceContext *sl = h->slice_ctx + h->nb_slice_ctx_queued;
2022  int first_slice = sl == h->slice_ctx && !h->current_slice;
2023  int ret;
2024 
2025  sl->gb = nal->gb;
2026 
2027  ret = h264_slice_header_parse(h, sl, nal);
2028  if (ret < 0)
2029  return ret;
2030 
2031  // discard redundant pictures
2032  if (sl->redundant_pic_count > 0) {
2033  sl->ref_count[0] = sl->ref_count[1] = 0;
2034  return 0;
2035  }
2036 
2037  if (sl->first_mb_addr == 0 || !h->current_slice) {
2038  if (h->setup_finished) {
2039  av_log(h->avctx, AV_LOG_ERROR, "Too many fields\n");
2040  return AVERROR_INVALIDDATA;
2041  }
2042  }
2043 
2044  if (sl->first_mb_addr == 0) { // FIXME better field boundary detection
2045  if (h->current_slice) {
2046  // this slice starts a new field
2047  // first decode any pending queued slices
2048  if (h->nb_slice_ctx_queued) {
2049  H264SliceContext tmp_ctx;
2050 
2052  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
2053  return ret;
2054 
2055  memcpy(&tmp_ctx, h->slice_ctx, sizeof(tmp_ctx));
2056  memcpy(h->slice_ctx, sl, sizeof(tmp_ctx));
2057  memcpy(sl, &tmp_ctx, sizeof(tmp_ctx));
2058  sl = h->slice_ctx;
2059  }
2060 
2061  if (h->cur_pic_ptr && FIELD_PICTURE(h) && h->first_field) {
2062  ret = ff_h264_field_end(h, h->slice_ctx, 1);
2063  if (ret < 0)
2064  return ret;
2065  } else if (h->cur_pic_ptr && !FIELD_PICTURE(h) && !h->first_field && h->nal_unit_type == H264_NAL_IDR_SLICE) {
2066  av_log(h, AV_LOG_WARNING, "Broken frame packetizing\n");
2067  ret = ff_h264_field_end(h, h->slice_ctx, 1);
2068  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
2069  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
2070  h->cur_pic_ptr = NULL;
2071  if (ret < 0)
2072  return ret;
2073  } else
2074  return AVERROR_INVALIDDATA;
2075  }
2076 
2077  if (!h->first_field) {
2078  if (h->cur_pic_ptr && !h->droppable) {
2079  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
2080  h->picture_structure == PICT_BOTTOM_FIELD);
2081  }
2082  h->cur_pic_ptr = NULL;
2083  }
2084  }
2085 
2086  if (!h->current_slice)
2087  av_assert0(sl == h->slice_ctx);
2088 
2089  if (h->current_slice == 0 && !h->first_field) {
2090  if (
2091  (h->avctx->skip_frame >= AVDISCARD_NONREF && !h->nal_ref_idc) ||
2092  (h->avctx->skip_frame >= AVDISCARD_BIDIR && sl->slice_type_nos == AV_PICTURE_TYPE_B) ||
2093  (h->avctx->skip_frame >= AVDISCARD_NONINTRA && sl->slice_type_nos != AV_PICTURE_TYPE_I) ||
2094  (h->avctx->skip_frame >= AVDISCARD_NONKEY && h->nal_unit_type != H264_NAL_IDR_SLICE && h->sei.recovery_point.recovery_frame_cnt < 0) ||
2095  h->avctx->skip_frame >= AVDISCARD_ALL) {
2096  return 0;
2097  }
2098  }
2099 
2100  if (!first_slice) {
2101  const PPS *pps = (const PPS*)h->ps.pps_list[sl->pps_id]->data;
2102 
2103  if (h->ps.pps->sps_id != pps->sps_id ||
2104  h->ps.pps->transform_8x8_mode != pps->transform_8x8_mode /*||
2105  (h->setup_finished && h->ps.pps != pps)*/) {
2106  av_log(h->avctx, AV_LOG_ERROR, "PPS changed between slices\n");
2107  return AVERROR_INVALIDDATA;
2108  }
2109  if (h->ps.sps != pps->sps) {
2110  av_log(h->avctx, AV_LOG_ERROR,
2111  "SPS changed in the middle of the frame\n");
2112  return AVERROR_INVALIDDATA;
2113  }
2114  }
2115 
2116  if (h->current_slice == 0) {
2117  ret = h264_field_start(h, sl, nal, first_slice);
2118  if (ret < 0)
2119  return ret;
2120  } else {
2121  if (h->picture_structure != sl->picture_structure ||
2122  h->droppable != (nal->ref_idc == 0)) {
2123  av_log(h->avctx, AV_LOG_ERROR,
2124  "Changing field mode (%d -> %d) between slices is not allowed\n",
2125  h->picture_structure, sl->picture_structure);
2126  return AVERROR_INVALIDDATA;
2127  } else if (!h->cur_pic_ptr) {
2128  av_log(h->avctx, AV_LOG_ERROR,
2129  "unset cur_pic_ptr on slice %d\n",
2130  h->current_slice + 1);
2131  return AVERROR_INVALIDDATA;
2132  }
2133  }
2134 
2135  ret = h264_slice_init(h, sl, nal);
2136  if (ret < 0)
2137  return ret;
2138 
2139  h->nb_slice_ctx_queued++;
2140 
2141  return 0;
2142 }
2143 
2145 {
2146  switch (sl->slice_type) {
2147  case AV_PICTURE_TYPE_P:
2148  return 0;
2149  case AV_PICTURE_TYPE_B:
2150  return 1;
2151  case AV_PICTURE_TYPE_I:
2152  return 2;
2153  case AV_PICTURE_TYPE_SP:
2154  return 3;
2155  case AV_PICTURE_TYPE_SI:
2156  return 4;
2157  default:
2158  return AVERROR_INVALIDDATA;
2159  }
2160 }
2161 
2163  H264SliceContext *sl,
2164  int mb_type, int top_xy,
2165  int left_xy[LEFT_MBS],
2166  int top_type,
2167  int left_type[LEFT_MBS],
2168  int mb_xy, int list)
2169 {
2170  int b_stride = h->b_stride;
2171  int16_t(*mv_dst)[2] = &sl->mv_cache[list][scan8[0]];
2172  int8_t *ref_cache = &sl->ref_cache[list][scan8[0]];
2173  if (IS_INTER(mb_type) || IS_DIRECT(mb_type)) {
2174  if (USES_LIST(top_type, list)) {
2175  const int b_xy = h->mb2b_xy[top_xy] + 3 * b_stride;
2176  const int b8_xy = 4 * top_xy + 2;
2177  const int *ref2frm = &h->ref2frm[h->slice_table[top_xy] & (MAX_SLICES - 1)][list][(MB_MBAFF(sl) ? 20 : 2)];
2178  AV_COPY128(mv_dst - 1 * 8, h->cur_pic.motion_val[list][b_xy + 0]);
2179  ref_cache[0 - 1 * 8] =
2180  ref_cache[1 - 1 * 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 0]];
2181  ref_cache[2 - 1 * 8] =
2182  ref_cache[3 - 1 * 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 1]];
2183  } else {
2184  AV_ZERO128(mv_dst - 1 * 8);
2185  AV_WN32A(&ref_cache[0 - 1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2186  }
2187 
2188  if (!IS_INTERLACED(mb_type ^ left_type[LTOP])) {
2189  if (USES_LIST(left_type[LTOP], list)) {
2190  const int b_xy = h->mb2b_xy[left_xy[LTOP]] + 3;
2191  const int b8_xy = 4 * left_xy[LTOP] + 1;
2192  const int *ref2frm = &h->ref2frm[h->slice_table[left_xy[LTOP]] & (MAX_SLICES - 1)][list][(MB_MBAFF(sl) ? 20 : 2)];
2193  AV_COPY32(mv_dst - 1 + 0, h->cur_pic.motion_val[list][b_xy + b_stride * 0]);
2194  AV_COPY32(mv_dst - 1 + 8, h->cur_pic.motion_val[list][b_xy + b_stride * 1]);
2195  AV_COPY32(mv_dst - 1 + 16, h->cur_pic.motion_val[list][b_xy + b_stride * 2]);
2196  AV_COPY32(mv_dst - 1 + 24, h->cur_pic.motion_val[list][b_xy + b_stride * 3]);
2197  ref_cache[-1 + 0] =
2198  ref_cache[-1 + 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 2 * 0]];
2199  ref_cache[-1 + 16] =
2200  ref_cache[-1 + 24] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 2 * 1]];
2201  } else {
2202  AV_ZERO32(mv_dst - 1 + 0);
2203  AV_ZERO32(mv_dst - 1 + 8);
2204  AV_ZERO32(mv_dst - 1 + 16);
2205  AV_ZERO32(mv_dst - 1 + 24);
2206  ref_cache[-1 + 0] =
2207  ref_cache[-1 + 8] =
2208  ref_cache[-1 + 16] =
2209  ref_cache[-1 + 24] = LIST_NOT_USED;
2210  }
2211  }
2212  }
2213 
2214  if (!USES_LIST(mb_type, list)) {
2215  fill_rectangle(mv_dst, 4, 4, 8, pack16to32(0, 0), 4);
2216  AV_WN32A(&ref_cache[0 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2217  AV_WN32A(&ref_cache[1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2218  AV_WN32A(&ref_cache[2 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2219  AV_WN32A(&ref_cache[3 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2220  return;
2221  }
2222 
2223  {
2224  int8_t *ref = &h->cur_pic.ref_index[list][4 * mb_xy];
2225  const int *ref2frm = &h->ref2frm[sl->slice_num & (MAX_SLICES - 1)][list][(MB_MBAFF(sl) ? 20 : 2)];
2226  uint32_t ref01 = (pack16to32(ref2frm[ref[0]], ref2frm[ref[1]]) & 0x00FF00FF) * 0x0101;
2227  uint32_t ref23 = (pack16to32(ref2frm[ref[2]], ref2frm[ref[3]]) & 0x00FF00FF) * 0x0101;
2228  AV_WN32A(&ref_cache[0 * 8], ref01);
2229  AV_WN32A(&ref_cache[1 * 8], ref01);
2230  AV_WN32A(&ref_cache[2 * 8], ref23);
2231  AV_WN32A(&ref_cache[3 * 8], ref23);
2232  }
2233 
2234  {
2235  int16_t(*mv_src)[2] = &h->cur_pic.motion_val[list][4 * sl->mb_x + 4 * sl->mb_y * b_stride];
2236  AV_COPY128(mv_dst + 8 * 0, mv_src + 0 * b_stride);
2237  AV_COPY128(mv_dst + 8 * 1, mv_src + 1 * b_stride);
2238  AV_COPY128(mv_dst + 8 * 2, mv_src + 2 * b_stride);
2239  AV_COPY128(mv_dst + 8 * 3, mv_src + 3 * b_stride);
2240  }
2241 }
2242 
2243 /**
2244  * @return non zero if the loop filter can be skipped
2245  */
2246 static int fill_filter_caches(const H264Context *h, H264SliceContext *sl, int mb_type)
2247 {
2248  const int mb_xy = sl->mb_xy;
2249  int top_xy, left_xy[LEFT_MBS];
2250  int top_type, left_type[LEFT_MBS];
2251  uint8_t *nnz;
2252  uint8_t *nnz_cache;
2253 
2254  top_xy = mb_xy - (h->mb_stride << MB_FIELD(sl));
2255 
2256  left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1;
2257  if (FRAME_MBAFF(h)) {
2258  const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.mb_type[mb_xy - 1]);
2259  const int curr_mb_field_flag = IS_INTERLACED(mb_type);
2260  if (sl->mb_y & 1) {
2261  if (left_mb_field_flag != curr_mb_field_flag)
2262  left_xy[LTOP] -= h->mb_stride;
2263  } else {
2264  if (curr_mb_field_flag)
2265  top_xy += h->mb_stride &
2266  (((h->cur_pic.mb_type[top_xy] >> 7) & 1) - 1);
2267  if (left_mb_field_flag != curr_mb_field_flag)
2268  left_xy[LBOT] += h->mb_stride;
2269  }
2270  }
2271 
2272  sl->top_mb_xy = top_xy;
2273  sl->left_mb_xy[LTOP] = left_xy[LTOP];
2274  sl->left_mb_xy[LBOT] = left_xy[LBOT];
2275  {
2276  /* For sufficiently low qp, filtering wouldn't do anything.
2277  * This is a conservative estimate: could also check beta_offset
2278  * and more accurate chroma_qp. */
2279  int qp_thresh = sl->qp_thresh; // FIXME strictly we should store qp_thresh for each mb of a slice
2280  int qp = h->cur_pic.qscale_table[mb_xy];
2281  if (qp <= qp_thresh &&
2282  (left_xy[LTOP] < 0 ||
2283  ((qp + h->cur_pic.qscale_table[left_xy[LTOP]] + 1) >> 1) <= qp_thresh) &&
2284  (top_xy < 0 ||
2285  ((qp + h->cur_pic.qscale_table[top_xy] + 1) >> 1) <= qp_thresh)) {
2286  if (!FRAME_MBAFF(h))
2287  return 1;
2288  if ((left_xy[LTOP] < 0 ||
2289  ((qp + h->cur_pic.qscale_table[left_xy[LBOT]] + 1) >> 1) <= qp_thresh) &&
2290  (top_xy < h->mb_stride ||
2291  ((qp + h->cur_pic.qscale_table[top_xy - h->mb_stride] + 1) >> 1) <= qp_thresh))
2292  return 1;
2293  }
2294  }
2295 
2296  top_type = h->cur_pic.mb_type[top_xy];
2297  left_type[LTOP] = h->cur_pic.mb_type[left_xy[LTOP]];
2298  left_type[LBOT] = h->cur_pic.mb_type[left_xy[LBOT]];
2299  if (sl->deblocking_filter == 2) {
2300  if (h->slice_table[top_xy] != sl->slice_num)
2301  top_type = 0;
2302  if (h->slice_table[left_xy[LBOT]] != sl->slice_num)
2303  left_type[LTOP] = left_type[LBOT] = 0;
2304  } else {
2305  if (h->slice_table[top_xy] == 0xFFFF)
2306  top_type = 0;
2307  if (h->slice_table[left_xy[LBOT]] == 0xFFFF)
2308  left_type[LTOP] = left_type[LBOT] = 0;
2309  }
2310  sl->top_type = top_type;
2311  sl->left_type[LTOP] = left_type[LTOP];
2312  sl->left_type[LBOT] = left_type[LBOT];
2313 
2314  if (IS_INTRA(mb_type))
2315  return 0;
2316 
2317  fill_filter_caches_inter(h, sl, mb_type, top_xy, left_xy,
2318  top_type, left_type, mb_xy, 0);
2319  if (sl->list_count == 2)
2320  fill_filter_caches_inter(h, sl, mb_type, top_xy, left_xy,
2321  top_type, left_type, mb_xy, 1);
2322 
2323  nnz = h->non_zero_count[mb_xy];
2324  nnz_cache = sl->non_zero_count_cache;
2325  AV_COPY32(&nnz_cache[4 + 8 * 1], &nnz[0]);
2326  AV_COPY32(&nnz_cache[4 + 8 * 2], &nnz[4]);
2327  AV_COPY32(&nnz_cache[4 + 8 * 3], &nnz[8]);
2328  AV_COPY32(&nnz_cache[4 + 8 * 4], &nnz[12]);
2329  sl->cbp = h->cbp_table[mb_xy];
2330 
2331  if (top_type) {
2332  nnz = h->non_zero_count[top_xy];
2333  AV_COPY32(&nnz_cache[4 + 8 * 0], &nnz[3 * 4]);
2334  }
2335 
2336  if (left_type[LTOP]) {
2337  nnz = h->non_zero_count[left_xy[LTOP]];
2338  nnz_cache[3 + 8 * 1] = nnz[3 + 0 * 4];
2339  nnz_cache[3 + 8 * 2] = nnz[3 + 1 * 4];
2340  nnz_cache[3 + 8 * 3] = nnz[3 + 2 * 4];
2341  nnz_cache[3 + 8 * 4] = nnz[3 + 3 * 4];
2342  }
2343 
2344  /* CAVLC 8x8dct requires NNZ values for residual decoding that differ
2345  * from what the loop filter needs */
2346  if (!CABAC(h) && h->ps.pps->transform_8x8_mode) {
2347  if (IS_8x8DCT(top_type)) {
2348  nnz_cache[4 + 8 * 0] =
2349  nnz_cache[5 + 8 * 0] = (h->cbp_table[top_xy] & 0x4000) >> 12;
2350  nnz_cache[6 + 8 * 0] =
2351  nnz_cache[7 + 8 * 0] = (h->cbp_table[top_xy] & 0x8000) >> 12;
2352  }
2353  if (IS_8x8DCT(left_type[LTOP])) {
2354  nnz_cache[3 + 8 * 1] =
2355  nnz_cache[3 + 8 * 2] = (h->cbp_table[left_xy[LTOP]] & 0x2000) >> 12; // FIXME check MBAFF
2356  }
2357  if (IS_8x8DCT(left_type[LBOT])) {
2358  nnz_cache[3 + 8 * 3] =
2359  nnz_cache[3 + 8 * 4] = (h->cbp_table[left_xy[LBOT]] & 0x8000) >> 12; // FIXME check MBAFF
2360  }
2361 
2362  if (IS_8x8DCT(mb_type)) {
2363  nnz_cache[scan8[0]] =
2364  nnz_cache[scan8[1]] =
2365  nnz_cache[scan8[2]] =
2366  nnz_cache[scan8[3]] = (sl->cbp & 0x1000) >> 12;
2367 
2368  nnz_cache[scan8[0 + 4]] =
2369  nnz_cache[scan8[1 + 4]] =
2370  nnz_cache[scan8[2 + 4]] =
2371  nnz_cache[scan8[3 + 4]] = (sl->cbp & 0x2000) >> 12;
2372 
2373  nnz_cache[scan8[0 + 8]] =
2374  nnz_cache[scan8[1 + 8]] =
2375  nnz_cache[scan8[2 + 8]] =
2376  nnz_cache[scan8[3 + 8]] = (sl->cbp & 0x4000) >> 12;
2377 
2378  nnz_cache[scan8[0 + 12]] =
2379  nnz_cache[scan8[1 + 12]] =
2380  nnz_cache[scan8[2 + 12]] =
2381  nnz_cache[scan8[3 + 12]] = (sl->cbp & 0x8000) >> 12;
2382  }
2383  }
2384 
2385  return 0;
2386 }
2387 
2388 static void loop_filter(const H264Context *h, H264SliceContext *sl, int start_x, int end_x)
2389 {
2390  uint8_t *dest_y, *dest_cb, *dest_cr;
2391  int linesize, uvlinesize, mb_x, mb_y;
2392  const int end_mb_y = sl->mb_y + FRAME_MBAFF(h);
2393  const int old_slice_type = sl->slice_type;
2394  const int pixel_shift = h->pixel_shift;
2395  const int block_h = 16 >> h->chroma_y_shift;
2396 
2397  if (h->postpone_filter)
2398  return;
2399 
2400  if (sl->deblocking_filter) {
2401  for (mb_x = start_x; mb_x < end_x; mb_x++)
2402  for (mb_y = end_mb_y - FRAME_MBAFF(h); mb_y <= end_mb_y; mb_y++) {
2403  int mb_xy, mb_type;
2404  mb_xy = sl->mb_xy = mb_x + mb_y * h->mb_stride;
2405  mb_type = h->cur_pic.mb_type[mb_xy];
2406 
2407  if (FRAME_MBAFF(h))
2408  sl->mb_mbaff =
2409  sl->mb_field_decoding_flag = !!IS_INTERLACED(mb_type);
2410 
2411  sl->mb_x = mb_x;
2412  sl->mb_y = mb_y;
2413  dest_y = h->cur_pic.f->data[0] +
2414  ((mb_x << pixel_shift) + mb_y * sl->linesize) * 16;
2415  dest_cb = h->cur_pic.f->data[1] +
2416  (mb_x << pixel_shift) * (8 << CHROMA444(h)) +
2417  mb_y * sl->uvlinesize * block_h;
2418  dest_cr = h->cur_pic.f->data[2] +
2419  (mb_x << pixel_shift) * (8 << CHROMA444(h)) +
2420  mb_y * sl->uvlinesize * block_h;
2421  // FIXME simplify above
2422 
2423  if (MB_FIELD(sl)) {
2424  linesize = sl->mb_linesize = sl->linesize * 2;
2425  uvlinesize = sl->mb_uvlinesize = sl->uvlinesize * 2;
2426  if (mb_y & 1) { // FIXME move out of this function?
2427  dest_y -= sl->linesize * 15;
2428  dest_cb -= sl->uvlinesize * (block_h - 1);
2429  dest_cr -= sl->uvlinesize * (block_h - 1);
2430  }
2431  } else {
2432  linesize = sl->mb_linesize = sl->linesize;
2433  uvlinesize = sl->mb_uvlinesize = sl->uvlinesize;
2434  }
2435  backup_mb_border(h, sl, dest_y, dest_cb, dest_cr, linesize,
2436  uvlinesize, 0);
2437  if (fill_filter_caches(h, sl, mb_type))
2438  continue;
2439  sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0, h->cur_pic.qscale_table[mb_xy]);
2440  sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1, h->cur_pic.qscale_table[mb_xy]);
2441 
2442  if (FRAME_MBAFF(h)) {
2443  ff_h264_filter_mb(h, sl, mb_x, mb_y, dest_y, dest_cb, dest_cr,
2444  linesize, uvlinesize);
2445  } else {
2446  ff_h264_filter_mb_fast(h, sl, mb_x, mb_y, dest_y, dest_cb,
2447  dest_cr, linesize, uvlinesize);
2448  }
2449  }
2450  }
2451  sl->slice_type = old_slice_type;
2452  sl->mb_x = end_x;
2453  sl->mb_y = end_mb_y - FRAME_MBAFF(h);
2454  sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0, sl->qscale);
2455  sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1, sl->qscale);
2456 }
2457 
2459 {
2460  const int mb_xy = sl->mb_x + sl->mb_y * h->mb_stride;
2461  int mb_type = (h->slice_table[mb_xy - 1] == sl->slice_num) ?
2462  h->cur_pic.mb_type[mb_xy - 1] :
2463  (h->slice_table[mb_xy - h->mb_stride] == sl->slice_num) ?
2464  h->cur_pic.mb_type[mb_xy - h->mb_stride] : 0;
2465  sl->mb_mbaff = sl->mb_field_decoding_flag = IS_INTERLACED(mb_type) ? 1 : 0;
2466 }
2467 
2468 /**
2469  * Draw edges and report progress for the last MB row.
2470  */
2472 {
2473  int top = 16 * (sl->mb_y >> FIELD_PICTURE(h));
2474  int pic_height = 16 * h->mb_height >> FIELD_PICTURE(h);
2475  int height = 16 << FRAME_MBAFF(h);
2476  int deblock_border = (16 + 4) << FRAME_MBAFF(h);
2477 
2478  if (sl->deblocking_filter) {
2479  if ((top + height) >= pic_height)
2480  height += deblock_border;
2481  top -= deblock_border;
2482  }
2483 
2484  if (top >= pic_height || (top + height) < 0)
2485  return;
2486 
2487  height = FFMIN(height, pic_height - top);
2488  if (top < 0) {
2489  height = top + height;
2490  top = 0;
2491  }
2492 
2493  ff_h264_draw_horiz_band(h, sl, top, height);
2494 
2495  if (h->droppable || h->er.error_occurred)
2496  return;
2497 
2498  ff_thread_report_progress(&h->cur_pic_ptr->tf, top + height - 1,
2499  h->picture_structure == PICT_BOTTOM_FIELD);
2500 }
2501 
2503  int startx, int starty,
2504  int endx, int endy, int status)
2505 {
2506  if (!sl->h264->enable_er)
2507  return;
2508 
2509  if (CONFIG_ERROR_RESILIENCE) {
2510  ff_er_add_slice(sl->er, startx, starty, endx, endy, status);
2511  }
2512 }
2513 
2514 static int decode_slice(struct AVCodecContext *avctx, void *arg)
2515 {
2516  H264SliceContext *sl = arg;
2517  const H264Context *h = sl->h264;
2518  int lf_x_start = sl->mb_x;
2519  int orig_deblock = sl->deblocking_filter;
2520  int ret;
2521 
2522  sl->linesize = h->cur_pic_ptr->f->linesize[0];
2523  sl->uvlinesize = h->cur_pic_ptr->f->linesize[1];
2524 
2525  ret = alloc_scratch_buffers(sl, sl->linesize);
2526  if (ret < 0)
2527  return ret;
2528 
2529  sl->mb_skip_run = -1;
2530 
2531  av_assert0(h->block_offset[15] == (4 * ((scan8[15] - scan8[0]) & 7) << h->pixel_shift) + 4 * sl->linesize * ((scan8[15] - scan8[0]) >> 3));
2532 
2533  if (h->postpone_filter)
2534  sl->deblocking_filter = 0;
2535 
2536  sl->is_complex = FRAME_MBAFF(h) || h->picture_structure != PICT_FRAME ||
2537  (CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY));
2538 
2539  if (!(h->avctx->active_thread_type & FF_THREAD_SLICE) && h->picture_structure == PICT_FRAME && sl->er->error_status_table) {
2540  const int start_i = av_clip(sl->resync_mb_x + sl->resync_mb_y * h->mb_width, 0, h->mb_num - 1);
2541  if (start_i) {
2542  int prev_status = sl->er->error_status_table[sl->er->mb_index2xy[start_i - 1]];
2543  prev_status &= ~ VP_START;
2544  if (prev_status != (ER_MV_END | ER_DC_END | ER_AC_END))
2545  sl->er->error_occurred = 1;
2546  }
2547  }
2548 
2549  if (h->ps.pps->cabac) {
2550  /* realign */
2551  align_get_bits(&sl->gb);
2552 
2553  /* init cabac */
2555  sl->gb.buffer + get_bits_count(&sl->gb) / 8,
2556  (get_bits_left(&sl->gb) + 7) / 8);
2557  if (ret < 0)
2558  return ret;
2559 
2561 
2562  for (;;) {
2563  int ret, eos;
2564  if (sl->mb_x + sl->mb_y * h->mb_width >= sl->next_slice_idx) {
2565  av_log(h->avctx, AV_LOG_ERROR, "Slice overlaps with next at %d\n",
2566  sl->next_slice_idx);
2567  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2568  sl->mb_y, ER_MB_ERROR);
2569  return AVERROR_INVALIDDATA;
2570  }
2571 
2572  ret = ff_h264_decode_mb_cabac(h, sl);
2573 
2574  if (ret >= 0)
2575  ff_h264_hl_decode_mb(h, sl);
2576 
2577  // FIXME optimal? or let mb_decode decode 16x32 ?
2578  if (ret >= 0 && FRAME_MBAFF(h)) {
2579  sl->mb_y++;
2580 
2581  ret = ff_h264_decode_mb_cabac(h, sl);
2582 
2583  if (ret >= 0)
2584  ff_h264_hl_decode_mb(h, sl);
2585  sl->mb_y--;
2586  }
2587  eos = get_cabac_terminate(&sl->cabac);
2588 
2589  if ((h->workaround_bugs & FF_BUG_TRUNCATED) &&
2590  sl->cabac.bytestream > sl->cabac.bytestream_end + 2) {
2591  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x - 1,
2592  sl->mb_y, ER_MB_END);
2593  if (sl->mb_x >= lf_x_start)
2594  loop_filter(h, sl, lf_x_start, sl->mb_x + 1);
2595  goto finish;
2596  }
2597  if (sl->cabac.bytestream > sl->cabac.bytestream_end + 2 )
2598  av_log(h->avctx, AV_LOG_DEBUG, "bytestream overread %"PTRDIFF_SPECIFIER"\n", sl->cabac.bytestream_end - sl->cabac.bytestream);
2599  if (ret < 0 || sl->cabac.bytestream > sl->cabac.bytestream_end + 4) {
2600  av_log(h->avctx, AV_LOG_ERROR,
2601  "error while decoding MB %d %d, bytestream %"PTRDIFF_SPECIFIER"\n",
2602  sl->mb_x, sl->mb_y,
2603  sl->cabac.bytestream_end - sl->cabac.bytestream);
2604  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2605  sl->mb_y, ER_MB_ERROR);
2606  return AVERROR_INVALIDDATA;
2607  }
2608 
2609  if (++sl->mb_x >= h->mb_width) {
2610  loop_filter(h, sl, lf_x_start, sl->mb_x);
2611  sl->mb_x = lf_x_start = 0;
2612  decode_finish_row(h, sl);
2613  ++sl->mb_y;
2614  if (FIELD_OR_MBAFF_PICTURE(h)) {
2615  ++sl->mb_y;
2616  if (FRAME_MBAFF(h) && sl->mb_y < h->mb_height)
2618  }
2619  }
2620 
2621  if (eos || sl->mb_y >= h->mb_height) {
2622  ff_tlog(h->avctx, "slice end %d %d\n",
2623  get_bits_count(&sl->gb), sl->gb.size_in_bits);
2624  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x - 1,
2625  sl->mb_y, ER_MB_END);
2626  if (sl->mb_x > lf_x_start)
2627  loop_filter(h, sl, lf_x_start, sl->mb_x);
2628  goto finish;
2629  }
2630  }
2631  } else {
2632  for (;;) {
2633  int ret;
2634 
2635  if (sl->mb_x + sl->mb_y * h->mb_width >= sl->next_slice_idx) {
2636  av_log(h->avctx, AV_LOG_ERROR, "Slice overlaps with next at %d\n",
2637  sl->next_slice_idx);
2638  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2639  sl->mb_y, ER_MB_ERROR);
2640  return AVERROR_INVALIDDATA;
2641  }
2642 
2643  ret = ff_h264_decode_mb_cavlc(h, sl);
2644 
2645  if (ret >= 0)
2646  ff_h264_hl_decode_mb(h, sl);
2647 
2648  // FIXME optimal? or let mb_decode decode 16x32 ?
2649  if (ret >= 0 && FRAME_MBAFF(h)) {
2650  sl->mb_y++;
2651  ret = ff_h264_decode_mb_cavlc(h, sl);
2652 
2653  if (ret >= 0)
2654  ff_h264_hl_decode_mb(h, sl);
2655  sl->mb_y--;
2656  }
2657 
2658  if (ret < 0) {
2659  av_log(h->avctx, AV_LOG_ERROR,
2660  "error while decoding MB %d %d\n", sl->mb_x, sl->mb_y);
2661  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2662  sl->mb_y, ER_MB_ERROR);
2663  return ret;
2664  }
2665 
2666  if (++sl->mb_x >= h->mb_width) {
2667  loop_filter(h, sl, lf_x_start, sl->mb_x);
2668  sl->mb_x = lf_x_start = 0;
2669  decode_finish_row(h, sl);
2670  ++sl->mb_y;
2671  if (FIELD_OR_MBAFF_PICTURE(h)) {
2672  ++sl->mb_y;
2673  if (FRAME_MBAFF(h) && sl->mb_y < h->mb_height)
2675  }
2676  if (sl->mb_y >= h->mb_height) {
2677  ff_tlog(h->avctx, "slice end %d %d\n",
2678  get_bits_count(&sl->gb), sl->gb.size_in_bits);
2679 
2680  if ( get_bits_left(&sl->gb) == 0
2681  || get_bits_left(&sl->gb) > 0 && !(h->avctx->err_recognition & AV_EF_AGGRESSIVE)) {
2682  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
2683  sl->mb_x - 1, sl->mb_y, ER_MB_END);
2684 
2685  goto finish;
2686  } else {
2687  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
2688  sl->mb_x, sl->mb_y, ER_MB_END);
2689 
2690  return AVERROR_INVALIDDATA;
2691  }
2692  }
2693  }
2694 
2695  if (get_bits_left(&sl->gb) <= 0 && sl->mb_skip_run <= 0) {
2696  ff_tlog(h->avctx, "slice end %d %d\n",
2697  get_bits_count(&sl->gb), sl->gb.size_in_bits);
2698 
2699  if (get_bits_left(&sl->gb) == 0) {
2700  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
2701  sl->mb_x - 1, sl->mb_y, ER_MB_END);
2702  if (sl->mb_x > lf_x_start)
2703  loop_filter(h, sl, lf_x_start, sl->mb_x);
2704 
2705  goto finish;
2706  } else {
2707  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2708  sl->mb_y, ER_MB_ERROR);
2709 
2710  return AVERROR_INVALIDDATA;
2711  }
2712  }
2713  }
2714  }
2715 
2716 finish:
2717  sl->deblocking_filter = orig_deblock;
2718  return 0;
2719 }
2720 
2721 /**
2722  * Call decode_slice() for each context.
2723  *
2724  * @param h h264 master context
2725  */
2727 {
2728  AVCodecContext *const avctx = h->avctx;
2729  H264SliceContext *sl;
2730  int context_count = h->nb_slice_ctx_queued;
2731  int ret = 0;
2732  int i, j;
2733 
2734  h->slice_ctx[0].next_slice_idx = INT_MAX;
2735 
2736  if (h->avctx->hwaccel || context_count < 1)
2737  return 0;
2738 
2739  av_assert0(context_count && h->slice_ctx[context_count - 1].mb_y < h->mb_height);
2740 
2741  if (context_count == 1) {
2742 
2743  h->slice_ctx[0].next_slice_idx = h->mb_width * h->mb_height;
2744  h->postpone_filter = 0;
2745 
2746  ret = decode_slice(avctx, &h->slice_ctx[0]);
2747  h->mb_y = h->slice_ctx[0].mb_y;
2748  if (ret < 0)
2749  goto finish;
2750  } else {
2751  av_assert0(context_count > 0);
2752  for (i = 0; i < context_count; i++) {
2753  int next_slice_idx = h->mb_width * h->mb_height;
2754  int slice_idx;
2755 
2756  sl = &h->slice_ctx[i];
2757 
2758  /* make sure none of those slices overlap */
2759  slice_idx = sl->mb_y * h->mb_width + sl->mb_x;
2760  for (j = 0; j < context_count; j++) {
2761  H264SliceContext *sl2 = &h->slice_ctx[j];
2762  int slice_idx2 = sl2->mb_y * h->mb_width + sl2->mb_x;
2763 
2764  if (i == j || slice_idx2 < slice_idx)
2765  continue;
2766  next_slice_idx = FFMIN(next_slice_idx, slice_idx2);
2767  }
2768  sl->next_slice_idx = next_slice_idx;
2769  }
2770 
2771  avctx->execute(avctx, decode_slice, h->slice_ctx,
2772  NULL, context_count, sizeof(h->slice_ctx[0]));
2773 
2774  /* pull back stuff from slices to master context */
2775  sl = &h->slice_ctx[context_count - 1];
2776  h->mb_y = sl->mb_y;
2777 
2778  if (h->postpone_filter) {
2779  h->postpone_filter = 0;
2780 
2781  for (i = 0; i < context_count; i++) {
2782  int y_end, x_end;
2783 
2784  sl = &h->slice_ctx[i];
2785  y_end = FFMIN(sl->mb_y + 1, h->mb_height);
2786  x_end = (sl->mb_y >= h->mb_height) ? h->mb_width : sl->mb_x;
2787 
2788  for (j = sl->resync_mb_y; j < y_end; j += 1 + FIELD_OR_MBAFF_PICTURE(h)) {
2789  sl->mb_y = j;
2790  loop_filter(h, sl, j > sl->resync_mb_y ? 0 : sl->resync_mb_x,
2791  j == y_end - 1 ? x_end : h->mb_width);
2792  }
2793  }
2794  }
2795  }
2796 
2797 finish:
2798  h->nb_slice_ctx_queued = 0;
2799  return ret;
2800 }
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:38
er_add_slice
static void er_add_slice(H264SliceContext *sl, int startx, int starty, int endx, int endy, int status)
Definition: h264_slice.c:2502
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:82
ff_h264_filter_mb_fast
void ff_h264_filter_mb_fast(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
Definition: h264_loopfilter.c:416
h264_slice_header_init
static int h264_slice_header_init(H264Context *h)
Definition: h264_slice.c:934
implicit_weight_table
static void implicit_weight_table(const H264Context *h, H264SliceContext *sl, int field)
Initialize implicit_weight table.
Definition: h264_slice.c:678
H264SliceContext::mb_xy
int mb_xy
Definition: h264dec.h:224
ff_h264_unref_picture
void ff_h264_unref_picture(H264Context *h, H264Picture *pic)
Definition: h264_picture.c:36
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(size_t size, AVBufferRef *(*alloc)(size_t size))
Allocate and initialize a buffer pool.
Definition: buffer.c:280
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_TIMECODE_STR_SIZE
#define AV_TIMECODE_STR_SIZE
Definition: timecode.h:33
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:253
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
H264SliceContext::ref_cache
int8_t ref_cache[2][5 *8]
Definition: h264dec.h:292
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
ff_h264_free_tables
void ff_h264_free_tables(H264Context *h)
Definition: h264dec.c:134
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
av_clip
#define av_clip
Definition: common.h:95
h264_init_ps
static int h264_init_ps(H264Context *h, const H264SliceContext *sl, int first_slice)
Definition: h264_slice.c:1027
H264SliceContext::max_pic_num
int max_pic_num
Definition: h264dec.h:324
H264SliceContext::nb_mmco
int nb_mmco
Definition: h264dec.h:315
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:664
CHROMA422
#define CHROMA422(h)
Definition: h264dec.h:92
FF_BUG_TRUNCATED
#define FF_BUG_TRUNCATED
Definition: avcodec.h:1338
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
cabac.h
H264Picture::poc
int poc
frame POC
Definition: h264dec.h:129
h264_export_frame_props
static int h264_export_frame_props(H264Context *h)
Definition: h264_slice.c:1146
ff_h264_sei_ctx_replace
static int ff_h264_sei_ctx_replace(H264SEIContext *dst, const H264SEIContext *src)
Definition: h264_sei.h:132
ff_thread_release_ext_buffer
void ff_thread_release_ext_buffer(AVCodecContext *avctx, ThreadFrame *f)
Unref a ThreadFrame.
Definition: pthread_frame.c:978
H264Picture::f
AVFrame * f
Definition: h264dec.h:108
out
FILE * out
Definition: movenc.c:54
ff_thread_get_format
#define ff_thread_get_format
Definition: thread.h:65
cb
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:239
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:674
av_clip_int8
#define av_clip_int8
Definition: common.h:104
zigzag_scan8x8_cavlc
static const uint8_t zigzag_scan8x8_cavlc[64+1]
Definition: h264_slice.c:99
ff_h264_replace_picture
int ff_h264_replace_picture(H264Context *h, H264Picture *dst, const H264Picture *src)
Definition: h264_picture.c:145
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
ff_thread_can_start_frame
int ff_thread_can_start_frame(AVCodecContext *avctx)
Definition: pthread_frame.c:893
ff_h2645_sei_to_frame
int ff_h2645_sei_to_frame(AVFrame *frame, H2645SEI *sei, enum AVCodecID codec_id, AVCodecContext *avctx, const H2645VUI *vui, unsigned bit_depth_luma, unsigned bit_depth_chroma, int seed)
Definition: h2645_sei.c:459
H264Picture::ref_index
int8_t * ref_index[2]
Definition: h264dec.h:126
HWACCEL_MAX
#define HWACCEL_MAX
MB_MBAFF
#define MB_MBAFF(h)
Definition: h264dec.h:65
H264SliceContext::mvd_table
uint8_t(*[2] mvd_table)[2]
Definition: h264dec.h:305
ff_h264_set_erpic
void ff_h264_set_erpic(ERPicture *dst, H264Picture *src)
Definition: h264_picture.c:196
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:256
H264_SEI_PIC_STRUCT_TOP_BOTTOM
@ H264_SEI_PIC_STRUCT_TOP_BOTTOM
3: top field, bottom field, in that order
Definition: h264_sei.h:35
H264Picture::pps
const PPS * pps
Definition: h264dec.h:153
AV_FRAME_DATA_S12M_TIMECODE
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:152
av_mod_uintp2
#define av_mod_uintp2
Definition: common.h:122
GetBitContext::size_in_bits
int size_in_bits
Definition: get_bits.h:110
H2645NAL::ref_idc
int ref_idc
H.264 only, nal_ref_idc.
Definition: h2645_parse.h:57
ff_h264_slice_context_init
void ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl)
Init slice context.
Definition: h264dec.c:258
ERContext::mb_index2xy
int * mb_index2xy
Definition: error_resilience.h:59
predict_field_decoding_flag
static void predict_field_decoding_flag(const H264Context *h, H264SliceContext *sl)
Definition: h264_slice.c:2458
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
pixdesc.h
AVFrame::width
int width
Definition: frame.h:402
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:661
get_ue_golomb
static int get_ue_golomb(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to 8190.
Definition: golomb.h:53
internal.h
ff_h264_update_thread_context
int ff_h264_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: h264_slice.c:308
alloc_scratch_buffers
static int alloc_scratch_buffers(H264SliceContext *sl, int linesize)
Definition: h264_slice.c:131
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:561
FRAME_RECOVERED_IDR
#define FRAME_RECOVERED_IDR
We have seen an IDR, so all the following frames in coded order are correctly decodable.
Definition: h264dec.h:516
decode_finish_row
static void decode_finish_row(const H264Context *h, H264SliceContext *sl)
Draw edges and report progress for the last MB row.
Definition: h264_slice.c:2471
H264SliceContext::ref_count
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264dec.h:260
FF_COMPLIANCE_STRICT
#define FF_COMPLIANCE_STRICT
Strictly conform to all the things in the spec no matter what consequences.
Definition: defs.h:59
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:459
ff_er_frame_start
void ff_er_frame_start(ERContext *s)
Definition: error_resilience.c:787
H264Picture::qscale_table
int8_t * qscale_table
Definition: h264dec.h:114
H264SliceContext::left_mb_xy
int left_mb_xy[LEFT_MBS]
Definition: h264dec.h:204
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:247
H264PredWeightTable::use_weight_chroma
int use_weight_chroma
Definition: h264_parse.h:71
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:37
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
Definition: pixfmt.h:588
AV_WN32A
#define AV_WN32A(p, v)
Definition: intreadwrite.h:538
ff_h264_update_thread_context_for_user
int ff_h264_update_thread_context_for_user(AVCodecContext *dst, const AVCodecContext *src)
Definition: h264_slice.c:458
ff_er_add_slice
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
Definition: error_resilience.c:821
H264Picture::ref_index_buf
AVBufferRef * ref_index_buf[2]
Definition: h264dec.h:125
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
ff_h264_pred_weight_table
int ff_h264_pred_weight_table(GetBitContext *gb, const SPS *sps, const int *ref_count, int slice_type_nos, H264PredWeightTable *pwt, int picture_structure, void *logctx)
Definition: h264_parse.c:29
FRAME_RECOVERED_SEI
#define FRAME_RECOVERED_SEI
Sufficient number of frames have been decoded since a SEI recovery point, so all the following frames...
Definition: h264dec.h:521
H264SliceContext::is_complex
int is_complex
Definition: h264dec.h:231
ER_DC_END
#define ER_DC_END
Definition: error_resilience.h:35
ff_h264_decode_ref_pic_list_reordering
int ff_h264_decode_ref_pic_list_reordering(H264SliceContext *sl, void *logctx)
Definition: h264_refs.c:422
mpegutils.h
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:550
H264Picture::invalid_gap
int invalid_gap
Definition: h264dec.h:148
av_timecode_get_smpte
uint32_t av_timecode_get_smpte(AVRational rate, int drop, int hh, int mm, int ss, int ff)
Convert sei info to SMPTE 12M binary representation.
Definition: timecode.c:69
H264Picture::pps_buf
AVBufferRef * pps_buf
Definition: h264dec.h:152
thread.h
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
ThreadFrame::f
AVFrame * f
Definition: threadframe.h:28
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1372
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:351
H264SliceContext::mb_x
int mb_x
Definition: h264dec.h:223
H264Picture::frame_num
int frame_num
frame_num (raw frame_num from slice header)
Definition: h264dec.h:130
H264SliceContext::next_slice_idx
int next_slice_idx
Definition: h264dec.h:229
H264SliceContext
Definition: h264dec.h:170
fill_filter_caches_inter
static av_always_inline void fill_filter_caches_inter(const H264Context *h, H264SliceContext *sl, int mb_type, int top_xy, int left_xy[LEFT_MBS], int top_type, int left_type[LEFT_MBS], int mb_xy, int list)
Definition: h264_slice.c:2162
golomb.h
exp golomb vlc stuff
MB_FIELD
#define MB_FIELD(sl)
Definition: h264dec.h:66
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:325
ff_h264_filter_mb
void ff_h264_filter_mb(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
Definition: h264_loopfilter.c:716
H264SliceContext::mv_cache
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
Definition: h264dec.h:291
AV_CODEC_FLAG_OUTPUT_CORRUPT
#define AV_CODEC_FLAG_OUTPUT_CORRUPT
Output even those frames that might be corrupted.
Definition: avcodec.h:224
AVHWAccel
Definition: avcodec.h:2097
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:477
finish
static void finish(void)
Definition: movenc.c:342
get_chroma_qp
static av_always_inline int get_chroma_qp(const PPS *pps, int t, int qscale)
Get the chroma qp.
Definition: h264dec.h:647
H264Picture::mmco_reset
int mmco_reset
MMCO_RESET set this 1.
Definition: h264dec.h:131
fail
#define fail()
Definition: checkasm.h:135
copy_picture_range
static void copy_picture_range(H264Picture **to, H264Picture **from, int count, H264Context *new_base, H264Context *old_base)
Definition: h264_slice.c:292
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:475
timecode.h
h264_select_output_frame
static int h264_select_output_frame(H264Context *h)
Definition: h264_slice.c:1269
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:457
USES_LIST
#define USES_LIST(a, list)
Definition: mpegutils.h:92
CABACContext::bytestream
const uint8_t * bytestream
Definition: cabac.h:45
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:422
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2916
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
H264Picture::mb_stride
int mb_stride
Definition: h264dec.h:156
IN_RANGE
#define IN_RANGE(a, b, size)
Definition: h264_slice.c:285
scan8
static const uint8_t scan8[16 *3+3]
Definition: h264_parse.h:40
ff_h264_flush_change
void ff_h264_flush_change(H264Context *h)
Definition: h264dec.c:439
ff_h264qpel_init
av_cold void ff_h264qpel_init(H264QpelContext *c, int bit_depth)
Definition: h264qpel.c:49
ff_h264_sei_process_picture_timing
int ff_h264_sei_process_picture_timing(H264SEIPictureTiming *h, const SPS *sps, void *logctx)
Parse the contents of a picture timing message given an active SPS.
Definition: h264_sei.c:65
h264_frame_start
static int h264_frame_start(H264Context *h)
Definition: h264_slice.c:470
H264SliceContext::deblocking_filter
int deblocking_filter
disable_deblocking_filter_idc with 1 <-> 0
Definition: h264dec.h:186
H264PredWeightTable::luma_log2_weight_denom
int luma_log2_weight_denom
Definition: h264_parse.h:72
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:260
H264Picture::f_grain
AVFrame * f_grain
Definition: h264dec.h:111
H264SliceContext::picture_structure
int picture_structure
Definition: h264dec.h:233
ff_h264_golomb_to_pict_type
const uint8_t ff_h264_golomb_to_pict_type[5]
Definition: h264data.c:37
release_unused_pictures
static void release_unused_pictures(H264Context *h, int remove_current)
Definition: h264_slice.c:118
H264PredWeightTable::use_weight
int use_weight
Definition: h264_parse.h:70
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
H264SliceContext::direct_spatial_mv_pred
int direct_spatial_mv_pred
Definition: h264dec.h:244
H264SliceContext::slice_num
int slice_num
Definition: h264dec.h:175
pack16to32
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
Definition: h264_parse.h:127
non_j_pixfmt
static enum AVPixelFormat non_j_pixfmt(enum AVPixelFormat a)
Definition: h264_slice.c:1016
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:462
ff_h264_init_cabac_states
void ff_h264_init_cabac_states(const H264Context *h, H264SliceContext *sl)
Definition: h264_cabac.c:1262
ff_h264_hl_decode_mb
void ff_h264_hl_decode_mb(const H264Context *h, H264SliceContext *sl)
Definition: h264_mb.c:799
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
film_grain_params.h
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
ff_color_frame
void ff_color_frame(AVFrame *frame, const int color[4])
Definition: utils.c:409
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:548
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
av_buffer_pool_get
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:384
ff_h264_queue_decode_slice
int ff_h264_queue_decode_slice(H264Context *h, const H2645NAL *nal)
Submit a slice for decoding.
Definition: h264_slice.c:2019
width
#define width
H264Context::DPB
H264Picture DPB[H264_MAX_PICTURE_COUNT]
Definition: h264dec.h:339
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:127
H264PredWeightTable::chroma_log2_weight_denom
int chroma_log2_weight_denom
Definition: h264_parse.h:73
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:50
AV_ZERO32
#define AV_ZERO32(d)
Definition: intreadwrite.h:629
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:413
FIELD_PICTURE
#define FIELD_PICTURE(h)
Definition: h264dec.h:68
ff_h264_execute_ref_pic_marking
int ff_h264_execute_ref_pic_marking(H264Context *h)
Execute the reference picture marking (memory management control operations).
Definition: h264_refs.c:609
H264_MAX_DPB_FRAMES
@ H264_MAX_DPB_FRAMES
Definition: h264.h:76
ff_h264_decode_ref_pic_marking
int ff_h264_decode_ref_pic_marking(H264SliceContext *sl, GetBitContext *gb, const H2645NAL *nal, void *logctx)
Definition: h264_refs.c:833
from
const char * from
Definition: jacosubdec.c:66
to
const char * to
Definition: webvttdec.c:35
h264_slice_header_parse
static int h264_slice_header_parse(const H264Context *h, H264SliceContext *sl, const H2645NAL *nal)
Definition: h264_slice.c:1664
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
H264PredWeightTable::chroma_weight_flag
int chroma_weight_flag[2]
7.4.3.2 chroma_weight_lX_flag
Definition: h264_parse.h:75
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
h264data.h
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:456
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
H264Ref::parent
H264Picture * parent
Definition: h264dec.h:167
PICT_TOP_FIELD
#define PICT_TOP_FIELD
Definition: mpegutils.h:36
decode.h
field_scan8x8_cavlc
static const uint8_t field_scan8x8_cavlc[64+1]
Definition: h264_slice.c:79
H264SliceContext::slice_alpha_c0_offset
int slice_alpha_c0_offset
Definition: h264dec.h:187
IS_INTRA
#define IS_INTRA(x, y)
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
AVFrame::crop_right
size_t crop_right
Definition: frame.h:728
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
H264SliceContext::slice_type
int slice_type
Definition: h264dec.h:176
H264SliceContext::resync_mb_x
int resync_mb_x
Definition: h264dec.h:225
H264Picture::sei_recovery_frame_cnt
int sei_recovery_frame_cnt
Definition: h264dec.h:149
AVDISCARD_BIDIR
@ AVDISCARD_BIDIR
discard all bidirectional frames
Definition: defs.h:73
get_se_golomb
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:239
H2645NAL::type
int type
NAL unit type.
Definition: h2645_parse.h:52
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:79
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
H264Context::enable_er
int enable_er
Definition: h264dec.h:544
ff_h264_draw_horiz_band
void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height)
Definition: h264dec.c:99
H264SliceContext::curr_pic_num
int curr_pic_num
Definition: h264dec.h:323
ff_thread_ref_frame
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
Definition: utils.c:896
arg
const char * arg
Definition: jacosubdec.c:67
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:64
if
if(ret)
Definition: filter_design.txt:179
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:76
threadframe.h
GetBitContext::buffer
const uint8_t * buffer
Definition: get_bits.h:108
alloc_picture
static int alloc_picture(H264Context *h, H264Picture *pic)
Definition: h264_slice.c:190
H264Picture::motion_val_buf
AVBufferRef * motion_val_buf[2]
Definition: h264dec.h:116
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:149
NULL
#define NULL
Definition: coverity.c:32
AV_COPY128
#define AV_COPY128(d, s)
Definition: intreadwrite.h:609
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AV_COPY64
#define AV_COPY64(d, s)
Definition: intreadwrite.h:605
H264SliceContext::edge_emu_buffer
uint8_t * edge_emu_buffer
Definition: h264dec.h:276
SPS
Sequence parameter set.
Definition: h264_ps.h:45
TRANSPOSE
#define TRANSPOSE(x)
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
ER_MB_ERROR
#define ER_MB_ERROR
Definition: error_resilience.h:38
ff_h264_decode_mb_cabac
int ff_h264_decode_mb_cabac(const H264Context *h, H264SliceContext *sl)
Decode a macroblock.
Definition: h264_cabac.c:1920
AV_PICTURE_TYPE_SI
@ AV_PICTURE_TYPE_SI
Switching Intra.
Definition: avutil.h:278
AVFrame::coded_picture_number
attribute_deprecated int coded_picture_number
picture number in bitstream order
Definition: frame.h:459
H264SliceContext::chroma_qp
int chroma_qp[2]
Definition: h264dec.h:181
AV_CODEC_FLAG2_FAST
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:345
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:378
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
av_buffer_pool_uninit
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:322
PPS
Picture parameter set.
Definition: h264_ps.h:105
av_fast_mallocz
void av_fast_mallocz(void *ptr, unsigned int *size, size_t min_size)
Allocate and clear a buffer, reusing the given one if large enough.
Definition: mem.c:560
ff_set_sar
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:106
mathops.h
list
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
Definition: filter_design.txt:25
IS_INTERLACED
#define IS_INTERLACED(a)
Definition: mpegutils.h:76
H264Picture::mb_height
int mb_height
Definition: h264dec.h:155
MAX_PPS_COUNT
#define MAX_PPS_COUNT
Definition: h264_ps.h:39
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:460
H264SliceContext::qscale
int qscale
Definition: h264dec.h:180
get_pixel_format
static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
Definition: h264_slice.c:776
fill_filter_caches
static int fill_filter_caches(const H264Context *h, H264SliceContext *sl, int mb_type)
Definition: h264_slice.c:2246
ERContext::error_occurred
int error_occurred
Definition: error_resilience.h:66
AV_ZERO128
#define AV_ZERO128(d)
Definition: intreadwrite.h:637
init_scan_tables
static void init_scan_tables(H264Context *h)
initialize scan tables
Definition: h264_slice.c:742
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:474
H264SliceContext::top_borders_allocated
int top_borders_allocated[2]
Definition: h264dec.h:280
AV_PICTURE_TYPE_SP
@ AV_PICTURE_TYPE_SP
Switching Predicted.
Definition: avutil.h:279
FIELD_OR_MBAFF_PICTURE
#define FIELD_OR_MBAFF_PICTURE(h)
Definition: h264dec.h:85
H264SliceContext::mb_skip_run
int mb_skip_run
Definition: h264dec.h:230
h264_ps.h
init_dimensions
static void init_dimensions(H264Context *h)
Definition: h264_slice.c:894
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
H264SliceContext::top_type
int top_type
Definition: h264dec.h:207
AVFrame::crop_bottom
size_t crop_bottom
Definition: frame.h:726
H264SliceContext::resync_mb_y
int resync_mb_y
Definition: h264dec.h:226
H264_SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM
@ H264_SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM
6: bottom field, top field, bottom field repeated, in that order
Definition: h264_sei.h:38
DELAYED_PIC_REF
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output.
Definition: diracdec.c:67
H264SEIPictureTiming
Definition: h264_sei.h:54
H264SliceContext::cabac
CABACContext cabac
Cabac.
Definition: h264dec.h:310
H264SliceContext::redundant_pic_count
int redundant_pic_count
Definition: h264dec.h:237
AVFrame::crop_left
size_t crop_left
Definition: frame.h:727
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:75
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:427
ff_zigzag_scan
const uint8_t ff_zigzag_scan[16+1]
Definition: mathtables.c:109
H264Picture::reference
int reference
Definition: h264dec.h:146
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:310
CABAC
#define CABAC(h)
Definition: h264_cabac.c:28
LEFT_MBS
#define LEFT_MBS
Definition: h264dec.h:69
pps
static int FUNC() pps(CodedBitstreamContext *ctx, RWContext *rw, H264RawPPS *current)
Definition: cbs_h264_syntax_template.c:404
rectangle.h
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
H264SliceContext::mb_uvlinesize
ptrdiff_t mb_uvlinesize
Definition: h264dec.h:221
VP_START
#define VP_START
< current MB is the first after a resync marker
Definition: error_resilience.h:30
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:464
H264SliceContext::pwt
H264PredWeightTable pwt
Definition: h264dec.h:190
H264Picture::tf
ThreadFrame tf
Definition: h264dec.h:109
H264Picture::mb_type
uint32_t * mb_type
Definition: h264dec.h:120
ff_h264_decode_mb_cavlc
int ff_h264_decode_mb_cavlc(const H264Context *h, H264SliceContext *sl)
Decode a macroblock.
Definition: h264_cavlc.c:695
H264_SEI_PIC_STRUCT_BOTTOM_TOP
@ H264_SEI_PIC_STRUCT_BOTTOM_TOP
4: bottom field, top field, in that order
Definition: h264_sei.h:36
H264Picture::recovered
int recovered
picture at IDR or recovery point + recovery count
Definition: h264dec.h:147
H2645NAL::gb
GetBitContext gb
Definition: h2645_parse.h:47
H264SliceContext::top_mb_xy
int top_mb_xy
Definition: h264dec.h:202
H264SliceContext::qp_thresh
int qp_thresh
QP threshold to skip loopfilter.
Definition: h264dec.h:182
H2645NAL
Definition: h2645_parse.h:34
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:466
H264SliceContext::top_borders
uint8_t(*[2] top_borders)[(16 *3) *2]
Definition: h264dec.h:277
AVFrameSideData::data
uint8_t * data
Definition: frame.h:238
h264chroma.h
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1527
H264SliceContext::cbp
int cbp
Definition: h264dec.h:248
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:417
H264SliceContext::left_type
int left_type[LEFT_MBS]
Definition: h264dec.h:209
ff_h264_direct_ref_list_init
void ff_h264_direct_ref_list_init(const H264Context *const h, H264SliceContext *sl)
Definition: h264_direct.c:120
H264SliceContext::mb_y
int mb_y
Definition: h264dec.h:223
H264PredWeightTable::implicit_weight
int implicit_weight[48][48][2]
Definition: h264_parse.h:79
height
#define height
decode_slice
static int decode_slice(struct AVCodecContext *avctx, void *arg)
Definition: h264_slice.c:2514
H264SliceContext::explicit_ref_marking
int explicit_ref_marking
Definition: h264dec.h:316
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
pt
int pt
Definition: rtp.c:35
H264SliceContext::uvlinesize
ptrdiff_t uvlinesize
Definition: h264dec.h:219
AVBufferRef::buffer
AVBuffer * buffer
Definition: buffer.h:83
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:333
H264SliceContext::slice_type_nos
int slice_type_nos
S free slice type (SI/SP are remapped to I/P)
Definition: h264dec.h:177
H264SliceContext::delta_poc_bottom
int delta_poc_bottom
Definition: h264dec.h:321
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:119
FRAME_MBAFF
#define FRAME_MBAFF(h)
Definition: h264dec.h:67
IS_DIRECT
#define IS_DIRECT(a)
Definition: mpegutils.h:77
H264_SEI_PIC_STRUCT_FRAME
@ H264_SEI_PIC_STRUCT_FRAME
0: frame
Definition: h264_sei.h:32
get_cabac_terminate
static int av_unused get_cabac_terminate(CABACContext *c)
Definition: cabac_functions.h:187
H264_SEI_PIC_STRUCT_FRAME_TRIPLING
@ H264_SEI_PIC_STRUCT_FRAME_TRIPLING
8: frame tripling
Definition: h264_sei.h:40
field_scan
static const uint8_t field_scan[16+1]
Definition: h264_slice.c:53
loop_filter
static void loop_filter(const H264Context *h, H264SliceContext *sl, int start_x, int end_x)
Definition: h264_slice.c:2388
ff_init_cabac_decoder
int ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size)
Definition: cabac.c:162
H264SliceContext::mb_mbaff
int mb_mbaff
mb_aff_frame && mb_field_decoding_flag
Definition: h264dec.h:235
field_scan8x8
static const uint8_t field_scan8x8[64+1]
Definition: h264_slice.c:60
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:187
av_get_picture_type_char
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:40
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:302
LIST_NOT_USED
#define LIST_NOT_USED
Definition: h264dec.h:389
H264Picture::field_picture
int field_picture
whether or not picture was encoded in separate fields
Definition: h264dec.h:139
h264dec.h
H264SliceContext::poc_lsb
int poc_lsb
Definition: h264dec.h:320
H264SliceContext::first_mb_addr
unsigned int first_mb_addr
Definition: h264dec.h:227
ff_h264_direct_dist_scale_factor
void ff_h264_direct_dist_scale_factor(const H264Context *const h, H264SliceContext *sl)
Definition: h264_direct.c:61
H264Picture::needs_fg
int needs_fg
whether picture needs film grain synthesis (see f_grain)
Definition: h264dec.h:150
AVBuffer
A reference counted buffer type.
Definition: buffer_internal.h:38
H264Context
H264Context.
Definition: h264dec.h:330
AVDISCARD_NONINTRA
@ AVDISCARD_NONINTRA
discard all non intra frames
Definition: defs.h:74
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
av_timecode_make_smpte_tc_string2
char * av_timecode_make_smpte_tc_string2(char *buf, AVRational rate, uint32_t tcsmpte, int prevent_df, int skip_field)
Get the timecode string from the SMPTE timecode format.
Definition: timecode.c:138
AV_CODEC_FLAG2_SHOW_ALL
#define AV_CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
Definition: avcodec.h:368
AV_FRAME_FLAG_CORRUPT
#define AV_FRAME_FLAG_CORRUPT
The frame data may be corrupted, e.g.
Definition: frame.h:584
H264_SEI_PIC_STRUCT_FRAME_DOUBLING
@ H264_SEI_PIC_STRUCT_FRAME_DOUBLING
7: frame doubling
Definition: h264_sei.h:39
ff_h264chroma_init
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:41
H264SliceContext::frame_num
int frame_num
Definition: h264dec.h:318
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:476
display.h
ff_h264_execute_decode_slices
int ff_h264_execute_decode_slices(H264Context *h)
Call decode_slice() for each context.
Definition: h264_slice.c:2726
H264SliceContext::mb_linesize
ptrdiff_t mb_linesize
may be equal to s->linesize or s->linesize * 2, for mbaff
Definition: h264dec.h:220
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
cabac_functions.h
H264Picture::hwaccel_priv_buf
AVBufferRef * hwaccel_priv_buf
Definition: h264dec.h:122
tb
#define tb
Definition: regdef.h:68
AV_COPY32
#define AV_COPY32(d, s)
Definition: intreadwrite.h:601
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:233
ff_h264_parse_ref_count
int ff_h264_parse_ref_count(int *plist_count, int ref_count[2], GetBitContext *gb, const PPS *pps, int slice_type_nos, int picture_structure, void *logctx)
Definition: h264_parse.c:221
ff_h264_alloc_tables
int ff_h264_alloc_tables(H264Context *h)
Allocate tables.
Definition: h264dec.c:179
ff_thread_get_ext_buffer
int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around ff_get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:936
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:644
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:458
H264SliceContext::list_count
unsigned int list_count
Definition: h264dec.h:261
avcodec.h
H264SliceContext::h264
const struct H264Context * h264
Definition: h264dec.h:171
av_cmp_q
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
ff_h264dsp_init
av_cold void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
Definition: h264dsp.c:66
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
av_buffer_allocz
AVBufferRef * av_buffer_allocz(size_t size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:93
ff_h264_ref_picture
int ff_h264_ref_picture(H264Context *h, H264Picture *dst, H264Picture *src)
Definition: h264_picture.c:92
ret
ret
Definition: filter_design.txt:187
AV_EF_AGGRESSIVE
#define AV_EF_AGGRESSIVE
consider things that a sane encoder/muxer should not do as an error
Definition: defs.h:56
ff_h264_init_poc
int ff_h264_init_poc(int pic_field_poc[2], int *pic_poc, const SPS *sps, H264POCContext *pc, int picture_structure, int nal_ref_idc)
Definition: h264_parse.c:279
ff_h264_get_profile
int ff_h264_get_profile(const SPS *sps)
Compute profile from profile_idc and constraint_set?_flags.
Definition: h264_parse.c:531
h264_field_start
static int h264_field_start(H264Context *h, const H264SliceContext *sl, const H2645NAL *nal, int first_slice)
Definition: h264_slice.c:1372
H264SliceContext::last_qscale_diff
int last_qscale_diff
Definition: h264dec.h:183
sps
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
Definition: cbs_h264_syntax_template.c:260
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:540
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:463
U
#define U(x)
Definition: vpx_arith.h:37
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:468
H264SliceContext::pps_id
unsigned int pps_id
Definition: h264dec.h:271
H264SliceContext::linesize
ptrdiff_t linesize
Definition: h264dec.h:219
H264SliceContext::slice_beta_offset
int slice_beta_offset
Definition: h264dec.h:188
AVCodecContext
main external API structure.
Definition: avcodec.h:435
AVFrame::height
int height
Definition: frame.h:402
get_ue_golomb_31
static int get_ue_golomb_31(GetBitContext *gb)
read unsigned exp golomb code, constraint to a max of 31.
Definition: golomb.h:120
MAX_SLICES
#define MAX_SLICES
Definition: dxva2_hevc.c:31
backup_mb_border
static av_always_inline void backup_mb_border(const H264Context *h, H264SliceContext *sl, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int simple)
Definition: h264_slice.c:576
ff_h264_build_ref_list
int ff_h264_build_ref_list(H264Context *h, H264SliceContext *sl)
Definition: h264_refs.c:298
AVCodecContext::execute
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:1545
H264SliceContext::bipred_scratchpad
uint8_t * bipred_scratchpad
Definition: h264dec.h:275
ff_h264_pred_init
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:437
H264Picture::field_poc
int field_poc[2]
top/bottom POC
Definition: h264dec.h:128
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
H264SliceContext::mmco
MMCO mmco[H264_MAX_MMCO_COUNT]
Definition: h264dec.h:314
error_resilience.h
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AVHWAccel::frame_priv_data_size
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
Definition: avcodec.h:2206
H264Picture::mb_width
int mb_width
Definition: h264dec.h:155
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:815
H264Picture
Definition: h264dec.h:107
ERContext::error_status_table
uint8_t * error_status_table
Definition: error_resilience.h:67
find_unused_picture
static int find_unused_picture(H264Context *h)
Definition: h264_slice.c:273
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
h264_slice_init
static int h264_slice_init(H264Context *h, H264SliceContext *sl, const H2645NAL *nal)
Definition: h264_slice.c:1883
ff_h264_field_end
int ff_h264_field_end(H264Context *h, H264SliceContext *sl, int in_setup)
Definition: h264_picture.c:219
CABACContext::bytestream_end
const uint8_t * bytestream_end
Definition: cabac.h:46
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
init_table_pools
static int init_table_pools(H264Context *h)
Definition: h264_slice.c:163
H264Picture::mb_type_buf
AVBufferRef * mb_type_buf
Definition: h264dec.h:119
H264SliceContext::ref_list
H264Ref ref_list[2][48]
0..15: frame refs, 16..47: mbaff field refs.
Definition: h264dec.h:262
LBOT
#define LBOT
Definition: h264dec.h:71
H264SliceContext::non_zero_count_cache
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
Definition: h264dec.h:286
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:81
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
IS_INTER
#define IS_INTER(a)
Definition: mpegutils.h:72
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
get_ue_golomb_long
static unsigned get_ue_golomb_long(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to UINT32_MAX-1.
Definition: golomb.h:104
H264Context::nal_length_size
int nal_length_size
Number of bytes used for nal length (1, 2 or 4)
Definition: h264dec.h:449
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
ER_MB_END
#define ER_MB_END
Definition: error_resilience.h:39
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:236
H264SliceContext::er
ERContext * er
Definition: h264dec.h:173
H264_SEI_PIC_STRUCT_BOTTOM_FIELD
@ H264_SEI_PIC_STRUCT_BOTTOM_FIELD
2: bottom field
Definition: h264_sei.h:34
H264Picture::hwaccel_picture_private
void * hwaccel_picture_private
hardware accelerator private data
Definition: h264dec.h:123
ER_MV_END
#define ER_MV_END
Definition: error_resilience.h:36
H264SliceContext::idr_pic_id
int idr_pic_id
Definition: h264dec.h:319
ff_tlog
#define ff_tlog(ctx,...)
Definition: internal.h:162
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:462
cr
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:240
AVFrame::crop_top
size_t crop_top
Definition: frame.h:725
H264SliceContext::gb
GetBitContext gb
Definition: h264dec.h:172
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:86
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:555
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
H264SliceContext::intra4x4_pred_mode
int8_t * intra4x4_pred_mode
Definition: h264dec.h:199
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
LTOP
#define LTOP
Definition: h264dec.h:70
h264.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:375
H264SliceContext::edge_emu_buffer_allocated
int edge_emu_buffer_allocated
Definition: h264dec.h:279
REBASE_PICTURE
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
Definition: h264_slice.c:287
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
CHROMA444
#define CHROMA444(h)
Definition: h264dec.h:93
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
ff_h264_get_slice_type
int ff_h264_get_slice_type(const H264SliceContext *sl)
Reconstruct bitstream slice_type.
Definition: h264_slice.c:2144
h
h
Definition: vp9dsp_template.c:2038
H264SliceContext::cabac_init_idc
int cabac_init_idc
Definition: h264dec.h:312
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:469
H264PredWeightTable::luma_weight_flag
int luma_weight_flag[2]
7.4.3.2 luma_weight_lX_flag
Definition: h264_parse.h:74
H264_MAX_PICTURE_COUNT
#define H264_MAX_PICTURE_COUNT
Definition: h264dec.h:50
ER_AC_END
#define ER_AC_END
Definition: error_resilience.h:34
H264SliceContext::bipred_scratchpad_allocated
int bipred_scratchpad_allocated
Definition: h264dec.h:278
H264_NAL_IDR_SLICE
@ H264_NAL_IDR_SLICE
Definition: h264.h:39
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:72
H264SliceContext::slice_type_fixed
int slice_type_fixed
Definition: h264dec.h:178
H264Ref::poc
int poc
Definition: h264dec.h:164
IS_8x8DCT
#define IS_8x8DCT(a)
Definition: h264dec.h:96
H264Picture::qscale_table_buf
AVBufferRef * qscale_table_buf
Definition: h264dec.h:113
H264_SEI_PIC_STRUCT_TOP_FIELD
@ H264_SEI_PIC_STRUCT_TOP_FIELD
1: top field
Definition: h264_sei.h:33
H264SliceContext::delta_poc
int delta_poc[2]
Definition: h264dec.h:322
av_color_transfer_name
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
Definition: pixdesc.c:3243
H264Picture::long_ref
int long_ref
1->long term reference 0->short term reference
Definition: h264dec.h:135
H264Ref::reference
int reference
Definition: h264dec.h:163
H264Picture::motion_val
int16_t(*[2] motion_val)[2]
Definition: h264dec.h:117
AV_CODEC_EXPORT_DATA_FILM_GRAIN
#define AV_CODEC_EXPORT_DATA_FILM_GRAIN
Decoding only.
Definition: avcodec.h:408
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:467
H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP
@ H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP
5: top field, bottom field, top field repeated, in that order
Definition: h264_sei.h:37
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2808
H264SliceContext::mb_field_decoding_flag
int mb_field_decoding_flag
Definition: h264dec.h:234
H264Context::is_avc
int is_avc
Used to parse AVC variant of H.264.
Definition: h264dec.h:448