FFmpeg
h264_parse.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include "bytestream.h"
20 #include "get_bits.h"
21 #include "golomb.h"
22 #include "h264.h"
23 #include "h264dec.h"
24 #include "h264_parse.h"
25 #include "h264_ps.h"
26 
28  const int *ref_count, int slice_type_nos,
30  int picture_structure, void *logctx)
31 {
32  int list, i, j;
33  int luma_def, chroma_def;
34 
35  pwt->use_weight = 0;
36  pwt->use_weight_chroma = 0;
37 
39  if (pwt->luma_log2_weight_denom > 7U) {
40  av_log(logctx, AV_LOG_ERROR, "luma_log2_weight_denom %d is out of range\n", pwt->luma_log2_weight_denom);
41  pwt->luma_log2_weight_denom = 0;
42  }
43  luma_def = 1 << pwt->luma_log2_weight_denom;
44 
45  if (sps->chroma_format_idc) {
47  if (pwt->chroma_log2_weight_denom > 7U) {
48  av_log(logctx, AV_LOG_ERROR, "chroma_log2_weight_denom %d is out of range\n", pwt->chroma_log2_weight_denom);
49  pwt->chroma_log2_weight_denom = 0;
50  }
51  chroma_def = 1 << pwt->chroma_log2_weight_denom;
52  }
53 
54  for (list = 0; list < 2; list++) {
55  pwt->luma_weight_flag[list] = 0;
56  pwt->chroma_weight_flag[list] = 0;
57  for (i = 0; i < ref_count[list]; i++) {
58  int luma_weight_flag, chroma_weight_flag;
59 
60  luma_weight_flag = get_bits1(gb);
61  if (luma_weight_flag) {
62  pwt->luma_weight[i][list][0] = get_se_golomb(gb);
63  pwt->luma_weight[i][list][1] = get_se_golomb(gb);
64  if ((int8_t)pwt->luma_weight[i][list][0] != pwt->luma_weight[i][list][0] ||
65  (int8_t)pwt->luma_weight[i][list][1] != pwt->luma_weight[i][list][1])
66  goto out_range_weight;
67  if (pwt->luma_weight[i][list][0] != luma_def ||
68  pwt->luma_weight[i][list][1] != 0) {
69  pwt->use_weight = 1;
70  pwt->luma_weight_flag[list] = 1;
71  }
72  } else {
73  pwt->luma_weight[i][list][0] = luma_def;
74  pwt->luma_weight[i][list][1] = 0;
75  }
76 
77  if (sps->chroma_format_idc) {
78  chroma_weight_flag = get_bits1(gb);
79  if (chroma_weight_flag) {
80  int j;
81  for (j = 0; j < 2; j++) {
82  pwt->chroma_weight[i][list][j][0] = get_se_golomb(gb);
83  pwt->chroma_weight[i][list][j][1] = get_se_golomb(gb);
84  if ((int8_t)pwt->chroma_weight[i][list][j][0] != pwt->chroma_weight[i][list][j][0] ||
85  (int8_t)pwt->chroma_weight[i][list][j][1] != pwt->chroma_weight[i][list][j][1]) {
86  pwt->chroma_weight[i][list][j][0] = chroma_def;
87  pwt->chroma_weight[i][list][j][1] = 0;
88  goto out_range_weight;
89  }
90  if (pwt->chroma_weight[i][list][j][0] != chroma_def ||
91  pwt->chroma_weight[i][list][j][1] != 0) {
92  pwt->use_weight_chroma = 1;
93  pwt->chroma_weight_flag[list] = 1;
94  }
95  }
96  } else {
97  int j;
98  for (j = 0; j < 2; j++) {
99  pwt->chroma_weight[i][list][j][0] = chroma_def;
100  pwt->chroma_weight[i][list][j][1] = 0;
101  }
102  }
103  }
104 
105  // for MBAFF
106  if (picture_structure == PICT_FRAME) {
107  pwt->luma_weight[16 + 2 * i][list][0] = pwt->luma_weight[16 + 2 * i + 1][list][0] = pwt->luma_weight[i][list][0];
108  pwt->luma_weight[16 + 2 * i][list][1] = pwt->luma_weight[16 + 2 * i + 1][list][1] = pwt->luma_weight[i][list][1];
109  if (sps->chroma_format_idc) {
110  for (j = 0; j < 2; j++) {
111  pwt->chroma_weight[16 + 2 * i][list][j][0] = pwt->chroma_weight[16 + 2 * i + 1][list][j][0] = pwt->chroma_weight[i][list][j][0];
112  pwt->chroma_weight[16 + 2 * i][list][j][1] = pwt->chroma_weight[16 + 2 * i + 1][list][j][1] = pwt->chroma_weight[i][list][j][1];
113  }
114  }
115  }
116  }
117  if (slice_type_nos != AV_PICTURE_TYPE_B)
118  break;
119  }
120  pwt->use_weight = pwt->use_weight || pwt->use_weight_chroma;
121  return 0;
122 out_range_weight:
123  avpriv_request_sample(logctx, "Out of range weight");
124  return AVERROR_INVALIDDATA;
125 }
126 
127 /**
128  * Check if the top & left blocks are available if needed and
129  * change the dc mode so it only uses the available blocks.
130  */
131 int ff_h264_check_intra4x4_pred_mode(int8_t *pred_mode_cache, void *logctx,
132  int top_samples_available, int left_samples_available)
133 {
134  static const int8_t top[12] = {
135  -1, 0, LEFT_DC_PRED, -1, -1, -1, -1, -1, 0
136  };
137  static const int8_t left[12] = {
138  0, -1, TOP_DC_PRED, 0, -1, -1, -1, 0, -1, DC_128_PRED
139  };
140  int i;
141 
142  if (!(top_samples_available & 0x8000)) {
143  for (i = 0; i < 4; i++) {
144  int status = top[pred_mode_cache[scan8[0] + i]];
145  if (status < 0) {
146  av_log(logctx, AV_LOG_ERROR,
147  "top block unavailable for requested intra mode %d\n",
148  status);
149  return AVERROR_INVALIDDATA;
150  } else if (status) {
151  pred_mode_cache[scan8[0] + i] = status;
152  }
153  }
154  }
155 
156  if ((left_samples_available & 0x8888) != 0x8888) {
157  static const int mask[4] = { 0x8000, 0x2000, 0x80, 0x20 };
158  for (i = 0; i < 4; i++)
159  if (!(left_samples_available & mask[i])) {
160  int status = left[pred_mode_cache[scan8[0] + 8 * i]];
161  if (status < 0) {
162  av_log(logctx, AV_LOG_ERROR,
163  "left block unavailable for requested intra4x4 mode %d\n",
164  status);
165  return AVERROR_INVALIDDATA;
166  } else if (status) {
167  pred_mode_cache[scan8[0] + 8 * i] = status;
168  }
169  }
170  }
171 
172  return 0;
173 }
174 
175 /**
176  * Check if the top & left blocks are available if needed and
177  * change the dc mode so it only uses the available blocks.
178  */
179 int ff_h264_check_intra_pred_mode(void *logctx, int top_samples_available,
180  int left_samples_available,
181  int mode, int is_chroma)
182 {
183  static const int8_t top[4] = { LEFT_DC_PRED8x8, 1, -1, -1 };
184  static const int8_t left[5] = { TOP_DC_PRED8x8, -1, 2, -1, DC_128_PRED8x8 };
185 
186  if (mode > 3U) {
187  av_log(logctx, AV_LOG_ERROR,
188  "out of range intra chroma pred mode\n");
189  return AVERROR_INVALIDDATA;
190  }
191 
192  if (!(top_samples_available & 0x8000)) {
193  mode = top[mode];
194  if (mode < 0) {
195  av_log(logctx, AV_LOG_ERROR,
196  "top block unavailable for requested intra mode\n");
197  return AVERROR_INVALIDDATA;
198  }
199  }
200 
201  if ((left_samples_available & 0x8080) != 0x8080) {
202  mode = left[mode];
203  if (mode < 0) {
204  av_log(logctx, AV_LOG_ERROR,
205  "left block unavailable for requested intra mode\n");
206  return AVERROR_INVALIDDATA;
207  }
208  if (is_chroma && (left_samples_available & 0x8080)) {
209  // mad cow disease mode, aka MBAFF + constrained_intra_pred
210  mode = ALZHEIMER_DC_L0T_PRED8x8 +
211  (!(left_samples_available & 0x8000)) +
212  2 * (mode == DC_128_PRED8x8);
213  }
214  }
215 
216  return mode;
217 }
218 
219 int ff_h264_parse_ref_count(int *plist_count, int ref_count[2],
220  GetBitContext *gb, const PPS *pps,
221  int slice_type_nos, int picture_structure, void *logctx)
222 {
223  int list_count;
224  int num_ref_idx_active_override_flag;
225 
226  // set defaults, might be overridden a few lines later
227  ref_count[0] = pps->ref_count[0];
228  ref_count[1] = pps->ref_count[1];
229 
230  if (slice_type_nos != AV_PICTURE_TYPE_I) {
231  unsigned max[2];
232  max[0] = max[1] = picture_structure == PICT_FRAME ? 15 : 31;
233 
234  num_ref_idx_active_override_flag = get_bits1(gb);
235 
236  if (num_ref_idx_active_override_flag) {
237  ref_count[0] = get_ue_golomb(gb) + 1;
238  if (slice_type_nos == AV_PICTURE_TYPE_B) {
239  ref_count[1] = get_ue_golomb(gb) + 1;
240  } else
241  // full range is spec-ok in this case, even for frames
242  ref_count[1] = 1;
243  }
244 
245  if (slice_type_nos == AV_PICTURE_TYPE_B)
246  list_count = 2;
247  else
248  list_count = 1;
249 
250  if (ref_count[0] - 1 > max[0] || (list_count == 2 && (ref_count[1] - 1 > max[1]))) {
251  av_log(logctx, AV_LOG_ERROR, "reference overflow %u > %u or %u > %u\n",
252  ref_count[0] - 1, max[0], ref_count[1] - 1, max[1]);
253  ref_count[0] = ref_count[1] = 0;
254  *plist_count = 0;
255  goto fail;
256  } else if (ref_count[1] - 1 > max[1]) {
257  av_log(logctx, AV_LOG_DEBUG, "reference overflow %u > %u \n",
258  ref_count[1] - 1, max[1]);
259  ref_count[1] = 0;
260  }
261 
262  } else {
263  list_count = 0;
264  ref_count[0] = ref_count[1] = 0;
265  }
266 
267  *plist_count = list_count;
268 
269  return 0;
270 fail:
271  *plist_count = 0;
272  ref_count[0] = 0;
273  ref_count[1] = 0;
274  return AVERROR_INVALIDDATA;
275 }
276 
277 int ff_h264_init_poc(int pic_field_poc[2], int *pic_poc,
278  const SPS *sps, H264POCContext *pc,
279  int picture_structure, int nal_ref_idc)
280 {
281  const int max_frame_num = 1 << sps->log2_max_frame_num;
282  int64_t field_poc[2];
283 
285  if (pc->frame_num < pc->prev_frame_num)
286  pc->frame_num_offset += max_frame_num;
287 
288  if (sps->poc_type == 0) {
289  const int max_poc_lsb = 1 << sps->log2_max_poc_lsb;
290 
291  if (pc->poc_lsb < pc->prev_poc_lsb &&
292  pc->prev_poc_lsb - pc->poc_lsb >= max_poc_lsb / 2)
293  pc->poc_msb = pc->prev_poc_msb + max_poc_lsb;
294  else if (pc->poc_lsb > pc->prev_poc_lsb &&
295  pc->prev_poc_lsb - pc->poc_lsb < -max_poc_lsb / 2)
296  pc->poc_msb = pc->prev_poc_msb - max_poc_lsb;
297  else
298  pc->poc_msb = pc->prev_poc_msb;
299  field_poc[0] =
300  field_poc[1] = pc->poc_msb + pc->poc_lsb;
301  if (picture_structure == PICT_FRAME)
302  field_poc[1] += pc->delta_poc_bottom;
303  } else if (sps->poc_type == 1) {
304  int abs_frame_num;
305  int64_t expected_delta_per_poc_cycle, expectedpoc;
306  int i;
307 
308  if (sps->poc_cycle_length != 0)
309  abs_frame_num = pc->frame_num_offset + pc->frame_num;
310  else
311  abs_frame_num = 0;
312 
313  if (nal_ref_idc == 0 && abs_frame_num > 0)
314  abs_frame_num--;
315 
316  expected_delta_per_poc_cycle = 0;
317  for (i = 0; i < sps->poc_cycle_length; i++)
318  // FIXME integrate during sps parse
319  expected_delta_per_poc_cycle += sps->offset_for_ref_frame[i];
320 
321  if (abs_frame_num > 0) {
322  int poc_cycle_cnt = (abs_frame_num - 1) / sps->poc_cycle_length;
323  int frame_num_in_poc_cycle = (abs_frame_num - 1) % sps->poc_cycle_length;
324 
325  expectedpoc = poc_cycle_cnt * expected_delta_per_poc_cycle;
326  for (i = 0; i <= frame_num_in_poc_cycle; i++)
327  expectedpoc = expectedpoc + sps->offset_for_ref_frame[i];
328  } else
329  expectedpoc = 0;
330 
331  if (nal_ref_idc == 0)
332  expectedpoc = expectedpoc + sps->offset_for_non_ref_pic;
333 
334  field_poc[0] = expectedpoc + pc->delta_poc[0];
335  field_poc[1] = field_poc[0] + sps->offset_for_top_to_bottom_field;
336 
337  if (picture_structure == PICT_FRAME)
338  field_poc[1] += pc->delta_poc[1];
339  } else {
340  int poc = 2 * (pc->frame_num_offset + pc->frame_num);
341 
342  if (!nal_ref_idc)
343  poc--;
344 
345  field_poc[0] = poc;
346  field_poc[1] = poc;
347  }
348 
349  if ( field_poc[0] != (int)field_poc[0]
350  || field_poc[1] != (int)field_poc[1])
351  return AVERROR_INVALIDDATA;
352 
353  if (picture_structure != PICT_BOTTOM_FIELD)
354  pic_field_poc[0] = field_poc[0];
355  if (picture_structure != PICT_TOP_FIELD)
356  pic_field_poc[1] = field_poc[1];
357  *pic_poc = FFMIN(pic_field_poc[0], pic_field_poc[1]);
358 
359  return 0;
360 }
361 
362 static int decode_extradata_ps(const uint8_t *data, int size, H264ParamSets *ps,
363  int is_avc, void *logctx)
364 {
365  H2645Packet pkt = { 0 };
366  int i, ret = 0;
367 
368  ret = ff_h2645_packet_split(&pkt, data, size, logctx, is_avc, 2, AV_CODEC_ID_H264, 1, 0);
369  if (ret < 0) {
370  ret = 0;
371  goto fail;
372  }
373 
374  for (i = 0; i < pkt.nb_nals; i++) {
375  H2645NAL *nal = &pkt.nals[i];
376  switch (nal->type) {
377  case H264_NAL_SPS:
378  ret = ff_h264_decode_seq_parameter_set(&nal->gb, logctx, ps, 0);
379  if (ret < 0)
380  goto fail;
381  break;
382  case H264_NAL_PPS:
383  ret = ff_h264_decode_picture_parameter_set(&nal->gb, logctx, ps,
384  nal->size_bits);
385  if (ret < 0)
386  goto fail;
387  break;
388  default:
389  av_log(logctx, AV_LOG_VERBOSE, "Ignoring NAL type %d in extradata\n",
390  nal->type);
391  break;
392  }
393  }
394 
395 fail:
397  return ret;
398 }
399 
400 /* There are (invalid) samples in the wild with mp4-style extradata, where the
401  * parameter sets are stored unescaped (i.e. as RBSP).
402  * This function catches the parameter set decoding failure and tries again
403  * after escaping it */
404 static int decode_extradata_ps_mp4(const uint8_t *buf, int buf_size, H264ParamSets *ps,
405  int err_recognition, void *logctx)
406 {
407  int ret;
408 
409  ret = decode_extradata_ps(buf, buf_size, ps, 1, logctx);
410  if (ret < 0 && !(err_recognition & AV_EF_EXPLODE)) {
411  GetByteContext gbc;
412  PutByteContext pbc;
413  uint8_t *escaped_buf;
414  int escaped_buf_size;
415 
416  av_log(logctx, AV_LOG_WARNING,
417  "SPS decoding failure, trying again after escaping the NAL\n");
418 
419  if (buf_size / 2 >= (INT16_MAX - AV_INPUT_BUFFER_PADDING_SIZE) / 3)
420  return AVERROR(ERANGE);
421  escaped_buf_size = buf_size * 3 / 2 + AV_INPUT_BUFFER_PADDING_SIZE;
422  escaped_buf = av_mallocz(escaped_buf_size);
423  if (!escaped_buf)
424  return AVERROR(ENOMEM);
425 
426  bytestream2_init(&gbc, buf, buf_size);
427  bytestream2_init_writer(&pbc, escaped_buf, escaped_buf_size);
428 
429  while (bytestream2_get_bytes_left(&gbc)) {
430  if (bytestream2_get_bytes_left(&gbc) >= 3 &&
431  bytestream2_peek_be24(&gbc) <= 3) {
432  bytestream2_put_be24(&pbc, 3);
433  bytestream2_skip(&gbc, 2);
434  } else
435  bytestream2_put_byte(&pbc, bytestream2_get_byte(&gbc));
436  }
437 
438  escaped_buf_size = bytestream2_tell_p(&pbc);
439  AV_WB16(escaped_buf, escaped_buf_size - 2);
440 
441  (void)decode_extradata_ps(escaped_buf, escaped_buf_size, ps, 1, logctx);
442  // lorex.mp4 decodes ok even with extradata decoding failing
443  av_freep(&escaped_buf);
444  }
445 
446  return 0;
447 }
448 
450  int *is_avc, int *nal_length_size,
451  int err_recognition, void *logctx)
452 {
453  int ret;
454 
455  if (!data || size <= 0)
456  return -1;
457 
458  if (data[0] == 1) {
459  int i, cnt, nalsize;
460  const uint8_t *p = data;
461 
462  *is_avc = 1;
463 
464  if (size < 7) {
465  av_log(logctx, AV_LOG_ERROR, "avcC %d too short\n", size);
466  return AVERROR_INVALIDDATA;
467  }
468 
469  // Decode sps from avcC
470  cnt = *(p + 5) & 0x1f; // Number of sps
471  p += 6;
472  for (i = 0; i < cnt; i++) {
473  nalsize = AV_RB16(p) + 2;
474  if (nalsize > size - (p - data))
475  return AVERROR_INVALIDDATA;
476  ret = decode_extradata_ps_mp4(p, nalsize, ps, err_recognition, logctx);
477  if (ret < 0) {
478  av_log(logctx, AV_LOG_ERROR,
479  "Decoding sps %d from avcC failed\n", i);
480  return ret;
481  }
482  p += nalsize;
483  }
484  // Decode pps from avcC
485  cnt = *(p++); // Number of pps
486  for (i = 0; i < cnt; i++) {
487  nalsize = AV_RB16(p) + 2;
488  if (nalsize > size - (p - data))
489  return AVERROR_INVALIDDATA;
490  ret = decode_extradata_ps_mp4(p, nalsize, ps, err_recognition, logctx);
491  if (ret < 0) {
492  av_log(logctx, AV_LOG_ERROR,
493  "Decoding pps %d from avcC failed\n", i);
494  return ret;
495  }
496  p += nalsize;
497  }
498  // Store right nal length size that will be used to parse all other nals
499  *nal_length_size = (data[4] & 0x03) + 1;
500  } else {
501  *is_avc = 0;
502  ret = decode_extradata_ps(data, size, ps, 0, logctx);
503  if (ret < 0)
504  return ret;
505  }
506  return size;
507 }
508 
509 /**
510  * Compute profile from profile_idc and constraint_set?_flags.
511  *
512  * @param sps SPS
513  *
514  * @return profile as defined by FF_PROFILE_H264_*
515  */
517 {
518  int profile = sps->profile_idc;
519 
520  switch (sps->profile_idc) {
522  // constraint_set1_flag set to 1
523  profile |= (sps->constraint_set_flags & 1 << 1) ? FF_PROFILE_H264_CONSTRAINED : 0;
524  break;
528  // constraint_set3_flag set to 1
529  profile |= (sps->constraint_set_flags & 1 << 3) ? FF_PROFILE_H264_INTRA : 0;
530  break;
531  }
532 
533  return profile;
534 }
int chroma_format_idc
Definition: h264_ps.h:48
int ff_h264_decode_picture_parameter_set(GetBitContext *gb, AVCodecContext *avctx, H264ParamSets *ps, int bit_length)
Decode PPS.
Definition: h264_ps.c:739
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
#define DC_128_PRED8x8
Definition: h264pred.h:76
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:239
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
int luma_weight_flag[2]
7.4.3.2 luma_weight_lX_flag
Definition: h264_parse.h:35
#define avpriv_request_sample(...)
static int decode_extradata_ps_mp4(const uint8_t *buf, int buf_size, H264ParamSets *ps, int err_recognition, void *logctx)
Definition: h264_parse.c:404
int chroma_weight[48][2][2][2]
Definition: h264_parse.h:39
Sequence parameter set.
Definition: h264_ps.h:44
#define FF_PROFILE_H264_INTRA
Definition: avcodec.h:2935
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264_ps.h:115
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:143
int ff_h264_init_poc(int pic_field_poc[2], int *pic_poc, const SPS *sps, H264POCContext *pc, int picture_structure, int nal_ref_idc)
Definition: h264_parse.c:277
Picture parameter set.
Definition: h264_ps.h:109
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
static AVPacket pkt
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
Definition: bytestream.h:87
int profile_idc
Definition: h264_ps.h:46
int size_bits
Size, in bits, of just the data, excluding the stop bit and any trailing padding. ...
Definition: h2645_parse.h:42
#define FF_PROFILE_H264_HIGH_444_PREDICTIVE
Definition: avcodec.h:2949
#define FF_PROFILE_H264_BASELINE
Definition: avcodec.h:2937
uint8_t
int offset_for_non_ref_pic
Definition: h264_ps.h:54
int frame_num_offset
for POC type 2
Definition: h264_parse.h:51
int chroma_weight_flag[2]
7.4.3.2 chroma_weight_lX_flag
Definition: h264_parse.h:36
#define TOP_DC_PRED8x8
Definition: h264pred.h:75
int ff_h2645_packet_split(H2645Packet *pkt, const uint8_t *buf, int length, void *logctx, int is_nalff, int nal_length_size, enum AVCodecID codec_id, int small_padding, int use_ref)
Split an input packet into NAL units.
Definition: h2645_parse.c:388
void ff_h2645_packet_uninit(H2645Packet *pkt)
Free all the allocated memory in the packet.
Definition: h2645_parse.c:512
int ff_h264_decode_extradata(const uint8_t *data, int size, H264ParamSets *ps, int *is_avc, int *nal_length_size, int err_recognition, void *logctx)
Definition: h264_parse.c:449
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:38
bitstream reader API header.
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
int ff_h264_check_intra4x4_pred_mode(int8_t *pred_mode_cache, void *logctx, int top_samples_available, int left_samples_available)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264_parse.c:131
static int decode_extradata_ps(const uint8_t *data, int size, H264ParamSets *ps, int is_avc, void *logctx)
Definition: h264_parse.c:362
ptrdiff_t size
Definition: opengl_enc.c:100
int luma_weight[48][2][2]
Definition: h264_parse.h:38
#define AV_WB16(p, v)
Definition: intreadwrite.h:405
#define av_log(a,...)
H.264 common definitions.
#define U(x)
Definition: vp56_arith.h:37
H.264 parameter set handling.
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
int chroma_log2_weight_denom
Definition: h264_parse.h:34
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
static int get_ue_golomb(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to 8190.
Definition: golomb.h:55
int poc_type
pic_order_cnt_type
Definition: h264_ps.h:51
static const uint16_t mask[17]
Definition: lzw.c:38
int ff_h264_get_profile(const SPS *sps)
Compute profile from profile_idc and constraint_set?_flags.
Definition: h264_parse.c:516
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:164
#define ALZHEIMER_DC_L0T_PRED8x8
Definition: h264pred.h:79
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:154
#define FF_PROFILE_H264_HIGH_422
Definition: avcodec.h:2945
#define PICT_TOP_FIELD
Definition: mpegutils.h:37
static av_always_inline int bytestream2_tell_p(PutByteContext *p)
Definition: bytestream.h:193
#define fail()
Definition: checkasm.h:120
int offset_for_top_to_bottom_field
Definition: h264_ps.h:55
int ff_h264_parse_ref_count(int *plist_count, int ref_count[2], GetBitContext *gb, const PPS *pps, int slice_type_nos, int picture_structure, void *logctx)
Definition: h264_parse.c:219
int prev_poc_msb
poc_msb of the last reference pic for POC type 0
Definition: h264_parse.h:49
#define FFMIN(a, b)
Definition: common.h:96
int poc_cycle_length
num_ref_frames_in_pic_order_cnt_cycle
Definition: h264_ps.h:56
typedef void(APIENTRY *FF_PFNGLACTIVETEXTUREPROC)(GLenum texture)
H.264 / AVC / MPEG-4 part10 codec.
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:2705
#define LEFT_DC_PRED8x8
Definition: h264pred.h:74
int type
NAL unit type.
Definition: h2645_parse.h:52
if(ret)
int delta_poc_bottom
Definition: h264_parse.h:46
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:206
void * buf
Definition: avisynth_c.h:766
static const uint8_t scan8[16 *3+3]
Definition: h264dec.h:644
int constraint_set_flags
constraint_set[0-3]_flag
Definition: h264_ps.h:101
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
int prev_frame_num
frame_num of the last pic for POC type 1/2
Definition: h264_parse.h:53
static int FUNC() pps(CodedBitstreamContext *ctx, RWContext *rw, H264RawPPS *current)
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
int log2_max_poc_lsb
log2_max_pic_order_cnt_lsb_minus4
Definition: h264_ps.h:52
mfxU16 profile
Definition: qsvenc.c:44
int prev_poc_lsb
poc_lsb of the last reference pic for POC type 0
Definition: h264_parse.h:50
int
int ff_h264_check_intra_pred_mode(void *logctx, int top_samples_available, int left_samples_available, int mode, int is_chroma)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264_parse.c:179
int log2_max_frame_num
log2_max_frame_num_minus4 + 4
Definition: h264_ps.h:50
Bi-dir predicted.
Definition: avutil.h:276
GetBitContext gb
Definition: h2645_parse.h:47
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:790
int delta_poc[2]
Definition: h264_parse.h:47
#define PICT_FRAME
Definition: mpegutils.h:39
int ff_h264_decode_seq_parameter_set(GetBitContext *gb, AVCodecContext *avctx, H264ParamSets *ps, int ignore_truncation)
Decode SPS.
Definition: h264_ps.c:333
H2645NAL * nals
Definition: h2645_parse.h:77
int ff_h264_pred_weight_table(GetBitContext *gb, const SPS *sps, const int *ref_count, int slice_type_nos, H264PredWeightTable *pwt, int picture_structure, void *logctx)
Definition: h264_parse.c:27
#define av_freep(p)
int prev_frame_num_offset
for POC type 2
Definition: h264_parse.h:52
int32_t offset_for_ref_frame[256]
Definition: h264_ps.h:84
#define FF_PROFILE_H264_CONSTRAINED
Definition: avcodec.h:2934
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
H.264 decoder/parser shared code.
#define FF_PROFILE_H264_HIGH_10
Definition: avcodec.h:2942
exp golomb vlc stuff
mode
Use these values in ebur128_init (or&#39;ed).
Definition: ebur128.h:83