FFmpeg
mpeg12dec.c
Go to the documentation of this file.
1 /*
2  * MPEG-1/2 decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2002-2013 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * MPEG-1/2 decoder
26  */
27 
28 #include "config_components.h"
29 
30 #define UNCHECKED_BITSTREAM_READER 1
31 #include <inttypes.h>
32 
33 #include "libavutil/attributes.h"
34 #include "libavutil/emms.h"
35 #include "libavutil/imgutils.h"
36 #include "libavutil/internal.h"
37 #include "libavutil/mem_internal.h"
38 #include "libavutil/reverse.h"
39 #include "libavutil/stereo3d.h"
40 #include "libavutil/timecode.h"
41 
42 #include "avcodec.h"
43 #include "codec_internal.h"
44 #include "decode.h"
45 #include "error_resilience.h"
46 #include "hwaccel_internal.h"
47 #include "hwconfig.h"
48 #include "idctdsp.h"
49 #include "internal.h"
50 #include "mpeg_er.h"
51 #include "mpeg12.h"
52 #include "mpeg12codecs.h"
53 #include "mpeg12data.h"
54 #include "mpeg12dec.h"
55 #include "mpegutils.h"
56 #include "mpegvideo.h"
57 #include "mpegvideodata.h"
58 #include "mpegvideodec.h"
59 #include "profiles.h"
60 #include "startcode.h"
61 #include "thread.h"
62 
63 #define A53_MAX_CC_COUNT 2000
64 
65 typedef struct Mpeg1Context {
67  int mpeg_enc_ctx_allocated; /* true if decoding context allocated */
68  int repeat_field; /* true if we must repeat the field */
69  AVPanScan pan_scan; /* some temporary storage for the panscan */
73  uint8_t afd;
74  int has_afd;
79  AVRational frame_rate_ext; /* MPEG-2 specific framerate modificator */
80  unsigned frame_rate_index;
81  int sync; /* Did we reach a sync point like a GOP/SEQ/KEYFrame? */
83  int tmpgexs;
86  int64_t timecode_frame_start; /*< GOP timecode frame start number, in non drop frame format */
87 } Mpeg1Context;
88 
89 #define MB_TYPE_ZERO_MV 0x20000000
90 
91 static const uint32_t ptype2mb_type[7] = {
94  MB_TYPE_L0,
99 };
100 
101 static const uint32_t btype2mb_type[11] = {
103  MB_TYPE_L1,
105  MB_TYPE_L0,
107  MB_TYPE_L0L1,
113 };
114 
115 /* as H.263, but only 17 codes */
116 static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred)
117 {
118  int code, sign, val, shift;
119 
120  code = get_vlc2(&s->gb, ff_mv_vlc, MV_VLC_BITS, 2);
121  if (code == 0)
122  return pred;
123  if (code < 0)
124  return 0xffff;
125 
126  sign = get_bits1(&s->gb);
127  shift = fcode - 1;
128  val = code;
129  if (shift) {
130  val = (val - 1) << shift;
131  val |= get_bits(&s->gb, shift);
132  val++;
133  }
134  if (sign)
135  val = -val;
136  val += pred;
137 
138  /* modulo decoding */
139  return sign_extend(val, 5 + shift);
140 }
141 
142 #define MAX_INDEX (64 - 1)
143 #define check_scantable_index(ctx, x) \
144  do { \
145  if ((x) > MAX_INDEX) { \
146  av_log(ctx->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", \
147  ctx->mb_x, ctx->mb_y); \
148  return AVERROR_INVALIDDATA; \
149  } \
150  } while (0)
151 
153  int16_t *block, int n)
154 {
155  int level, i, j, run;
156  uint8_t *const scantable = s->intra_scantable.permutated;
157  const uint16_t *quant_matrix = s->inter_matrix;
158  const int qscale = s->qscale;
159 
160  {
161  OPEN_READER(re, &s->gb);
162  i = -1;
163  // special case for first coefficient, no need to add second VLC table
164  UPDATE_CACHE(re, &s->gb);
165  if (((int32_t) GET_CACHE(re, &s->gb)) < 0) {
166  level = (3 * qscale * quant_matrix[0]) >> 5;
167  level = (level - 1) | 1;
168  if (GET_CACHE(re, &s->gb) & 0x40000000)
169  level = -level;
170  block[0] = level;
171  i++;
172  SKIP_BITS(re, &s->gb, 2);
173  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
174  goto end;
175  }
176  /* now quantify & encode AC coefficients */
177  for (;;) {
178  GET_RL_VLC(level, run, re, &s->gb, ff_mpeg1_rl_vlc,
179  TEX_VLC_BITS, 2, 0);
180 
181  if (level != 0) {
182  i += run;
183  if (i > MAX_INDEX)
184  break;
185  j = scantable[i];
186  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
187  level = (level - 1) | 1;
188  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
189  SHOW_SBITS(re, &s->gb, 1);
190  SKIP_BITS(re, &s->gb, 1);
191  } else {
192  /* escape */
193  run = SHOW_UBITS(re, &s->gb, 6) + 1;
194  LAST_SKIP_BITS(re, &s->gb, 6);
195  UPDATE_CACHE(re, &s->gb);
196  level = SHOW_SBITS(re, &s->gb, 8);
197  SKIP_BITS(re, &s->gb, 8);
198  if (level == -128) {
199  level = SHOW_UBITS(re, &s->gb, 8) - 256;
200  SKIP_BITS(re, &s->gb, 8);
201  } else if (level == 0) {
202  level = SHOW_UBITS(re, &s->gb, 8);
203  SKIP_BITS(re, &s->gb, 8);
204  }
205  i += run;
206  if (i > MAX_INDEX)
207  break;
208  j = scantable[i];
209  if (level < 0) {
210  level = -level;
211  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
212  level = (level - 1) | 1;
213  level = -level;
214  } else {
215  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
216  level = (level - 1) | 1;
217  }
218  }
219 
220  block[j] = level;
221  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
222  break;
223  UPDATE_CACHE(re, &s->gb);
224  }
225 end:
226  LAST_SKIP_BITS(re, &s->gb, 2);
227  CLOSE_READER(re, &s->gb);
228  }
229 
231 
232  s->block_last_index[n] = i;
233  return 0;
234 }
235 
237  int16_t *block, int n)
238 {
239  int level, i, j, run;
240  uint8_t *const scantable = s->intra_scantable.permutated;
241  const uint16_t *quant_matrix;
242  const int qscale = s->qscale;
243  int mismatch;
244 
245  mismatch = 1;
246 
247  {
248  OPEN_READER(re, &s->gb);
249  i = -1;
250  if (n < 4)
251  quant_matrix = s->inter_matrix;
252  else
253  quant_matrix = s->chroma_inter_matrix;
254 
255  // Special case for first coefficient, no need to add second VLC table.
256  UPDATE_CACHE(re, &s->gb);
257  if (((int32_t) GET_CACHE(re, &s->gb)) < 0) {
258  level = (3 * qscale * quant_matrix[0]) >> 5;
259  if (GET_CACHE(re, &s->gb) & 0x40000000)
260  level = -level;
261  block[0] = level;
262  mismatch ^= level;
263  i++;
264  SKIP_BITS(re, &s->gb, 2);
265  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
266  goto end;
267  }
268 
269  /* now quantify & encode AC coefficients */
270  for (;;) {
271  GET_RL_VLC(level, run, re, &s->gb, ff_mpeg1_rl_vlc,
272  TEX_VLC_BITS, 2, 0);
273 
274  if (level != 0) {
275  i += run;
276  if (i > MAX_INDEX)
277  break;
278  j = scantable[i];
279  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
280  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
281  SHOW_SBITS(re, &s->gb, 1);
282  SKIP_BITS(re, &s->gb, 1);
283  } else {
284  /* escape */
285  run = SHOW_UBITS(re, &s->gb, 6) + 1;
286  LAST_SKIP_BITS(re, &s->gb, 6);
287  UPDATE_CACHE(re, &s->gb);
288  level = SHOW_SBITS(re, &s->gb, 12);
289  SKIP_BITS(re, &s->gb, 12);
290 
291  i += run;
292  if (i > MAX_INDEX)
293  break;
294  j = scantable[i];
295  if (level < 0) {
296  level = ((-level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
297  level = -level;
298  } else {
299  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
300  }
301  }
302 
303  mismatch ^= level;
304  block[j] = level;
305  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
306  break;
307  UPDATE_CACHE(re, &s->gb);
308  }
309 end:
310  LAST_SKIP_BITS(re, &s->gb, 2);
311  CLOSE_READER(re, &s->gb);
312  }
313  block[63] ^= (mismatch & 1);
314 
316 
317  s->block_last_index[n] = i;
318  return 0;
319 }
320 
322  int16_t *block, int n)
323 {
324  int level, dc, diff, i, j, run;
325  int component;
326  const RL_VLC_ELEM *rl_vlc;
327  uint8_t *const scantable = s->intra_scantable.permutated;
328  const uint16_t *quant_matrix;
329  const int qscale = s->qscale;
330  int mismatch;
331 
332  /* DC coefficient */
333  if (n < 4) {
334  quant_matrix = s->intra_matrix;
335  component = 0;
336  } else {
337  quant_matrix = s->chroma_intra_matrix;
338  component = (n & 1) + 1;
339  }
340  diff = decode_dc(&s->gb, component);
341  dc = s->last_dc[component];
342  dc += diff;
343  s->last_dc[component] = dc;
344  block[0] = dc * (1 << (3 - s->intra_dc_precision));
345  ff_tlog(s->avctx, "dc=%d\n", block[0]);
346  mismatch = block[0] ^ 1;
347  i = 0;
348  if (s->intra_vlc_format)
350  else
352 
353  {
354  OPEN_READER(re, &s->gb);
355  /* now quantify & encode AC coefficients */
356  for (;;) {
357  UPDATE_CACHE(re, &s->gb);
358  GET_RL_VLC(level, run, re, &s->gb, rl_vlc,
359  TEX_VLC_BITS, 2, 0);
360 
361  if (level == 127) {
362  break;
363  } else if (level != 0) {
364  i += run;
365  if (i > MAX_INDEX)
366  break;
367  j = scantable[i];
368  level = (level * qscale * quant_matrix[j]) >> 4;
369  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
370  SHOW_SBITS(re, &s->gb, 1);
371  LAST_SKIP_BITS(re, &s->gb, 1);
372  } else {
373  /* escape */
374  run = SHOW_UBITS(re, &s->gb, 6) + 1;
375  SKIP_BITS(re, &s->gb, 6);
376  level = SHOW_SBITS(re, &s->gb, 12);
377  LAST_SKIP_BITS(re, &s->gb, 12);
378  i += run;
379  if (i > MAX_INDEX)
380  break;
381  j = scantable[i];
382  if (level < 0) {
383  level = (-level * qscale * quant_matrix[j]) >> 4;
384  level = -level;
385  } else {
386  level = (level * qscale * quant_matrix[j]) >> 4;
387  }
388  }
389 
390  mismatch ^= level;
391  block[j] = level;
392  }
393  CLOSE_READER(re, &s->gb);
394  }
395  block[63] ^= mismatch & 1;
396 
398 
399  s->block_last_index[n] = i;
400  return 0;
401 }
402 
403 /******************************************/
404 /* decoding */
405 
406 static inline int get_dmv(MpegEncContext *s)
407 {
408  if (get_bits1(&s->gb))
409  return 1 - (get_bits1(&s->gb) << 1);
410  else
411  return 0;
412 }
413 
414 /* motion type (for MPEG-2) */
415 #define MT_FIELD 1
416 #define MT_FRAME 2
417 #define MT_16X8 2
418 #define MT_DMV 3
419 
420 static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
421 {
422  int i, j, k, cbp, val, mb_type, motion_type;
423  const int mb_block_count = 4 + (1 << s->chroma_format);
424  int ret;
425 
426  ff_tlog(s->avctx, "decode_mb: x=%d y=%d\n", s->mb_x, s->mb_y);
427 
428  av_assert2(s->mb_skipped == 0);
429 
430  if (s->mb_skip_run-- != 0) {
431  if (s->pict_type == AV_PICTURE_TYPE_P) {
432  s->mb_skipped = 1;
433  s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride] =
435  } else {
436  int mb_type;
437 
438  if (s->mb_x)
439  mb_type = s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1];
440  else
441  // FIXME not sure if this is allowed in MPEG at all
442  mb_type = s->current_picture.mb_type[s->mb_width + (s->mb_y - 1) * s->mb_stride - 1];
443  if (IS_INTRA(mb_type)) {
444  av_log(s->avctx, AV_LOG_ERROR, "skip with previntra\n");
445  return AVERROR_INVALIDDATA;
446  }
447  s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride] =
448  mb_type | MB_TYPE_SKIP;
449 
450  if ((s->mv[0][0][0] | s->mv[0][0][1] | s->mv[1][0][0] | s->mv[1][0][1]) == 0)
451  s->mb_skipped = 1;
452  }
453 
454  return 0;
455  }
456 
457  switch (s->pict_type) {
458  default:
459  case AV_PICTURE_TYPE_I:
460  if (get_bits1(&s->gb) == 0) {
461  if (get_bits1(&s->gb) == 0) {
462  av_log(s->avctx, AV_LOG_ERROR,
463  "Invalid mb type in I-frame at %d %d\n",
464  s->mb_x, s->mb_y);
465  return AVERROR_INVALIDDATA;
466  }
467  mb_type = MB_TYPE_QUANT | MB_TYPE_INTRA;
468  } else {
469  mb_type = MB_TYPE_INTRA;
470  }
471  break;
472  case AV_PICTURE_TYPE_P:
473  mb_type = get_vlc2(&s->gb, ff_mb_ptype_vlc, MB_PTYPE_VLC_BITS, 1);
474  if (mb_type < 0) {
475  av_log(s->avctx, AV_LOG_ERROR,
476  "Invalid mb type in P-frame at %d %d\n", s->mb_x, s->mb_y);
477  return AVERROR_INVALIDDATA;
478  }
479  mb_type = ptype2mb_type[mb_type];
480  break;
481  case AV_PICTURE_TYPE_B:
482  mb_type = get_vlc2(&s->gb, ff_mb_btype_vlc, MB_BTYPE_VLC_BITS, 1);
483  if (mb_type < 0) {
484  av_log(s->avctx, AV_LOG_ERROR,
485  "Invalid mb type in B-frame at %d %d\n", s->mb_x, s->mb_y);
486  return AVERROR_INVALIDDATA;
487  }
488  mb_type = btype2mb_type[mb_type];
489  break;
490  }
491  ff_tlog(s->avctx, "mb_type=%x\n", mb_type);
492 // motion_type = 0; /* avoid warning */
493  if (IS_INTRA(mb_type)) {
494  s->bdsp.clear_blocks(s->block[0]);
495 
496  if (!s->chroma_y_shift)
497  s->bdsp.clear_blocks(s->block[6]);
498 
499  /* compute DCT type */
500  // FIXME: add an interlaced_dct coded var?
501  if (s->picture_structure == PICT_FRAME &&
502  !s->frame_pred_frame_dct)
503  s->interlaced_dct = get_bits1(&s->gb);
504 
505  if (IS_QUANT(mb_type))
506  s->qscale = mpeg_get_qscale(s);
507 
508  if (s->concealment_motion_vectors) {
509  /* just parse them */
510  if (s->picture_structure != PICT_FRAME)
511  skip_bits1(&s->gb); /* field select */
512 
513  s->mv[0][0][0] =
514  s->last_mv[0][0][0] =
515  s->last_mv[0][1][0] = mpeg_decode_motion(s, s->mpeg_f_code[0][0],
516  s->last_mv[0][0][0]);
517  s->mv[0][0][1] =
518  s->last_mv[0][0][1] =
519  s->last_mv[0][1][1] = mpeg_decode_motion(s, s->mpeg_f_code[0][1],
520  s->last_mv[0][0][1]);
521 
522  check_marker(s->avctx, &s->gb, "after concealment_motion_vectors");
523  } else {
524  /* reset mv prediction */
525  memset(s->last_mv, 0, sizeof(s->last_mv));
526  }
527  s->mb_intra = 1;
528 
529  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
530  for (i = 0; i < mb_block_count; i++)
531  if ((ret = mpeg2_decode_block_intra(s, *s->pblocks[i], i)) < 0)
532  return ret;
533  } else {
534  for (i = 0; i < 6; i++) {
536  s->intra_matrix,
537  s->intra_scantable.permutated,
538  s->last_dc, *s->pblocks[i],
539  i, s->qscale);
540  if (ret < 0) {
541  av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n",
542  s->mb_x, s->mb_y);
543  return ret;
544  }
545 
546  s->block_last_index[i] = ret;
547  }
548  }
549  } else {
550  if (mb_type & MB_TYPE_ZERO_MV) {
551  av_assert2(mb_type & MB_TYPE_CBP);
552 
553  s->mv_dir = MV_DIR_FORWARD;
554  if (s->picture_structure == PICT_FRAME) {
555  if (s->picture_structure == PICT_FRAME
556  && !s->frame_pred_frame_dct)
557  s->interlaced_dct = get_bits1(&s->gb);
558  s->mv_type = MV_TYPE_16X16;
559  } else {
560  s->mv_type = MV_TYPE_FIELD;
561  mb_type |= MB_TYPE_INTERLACED;
562  s->field_select[0][0] = s->picture_structure - 1;
563  }
564 
565  if (IS_QUANT(mb_type))
566  s->qscale = mpeg_get_qscale(s);
567 
568  s->last_mv[0][0][0] = 0;
569  s->last_mv[0][0][1] = 0;
570  s->last_mv[0][1][0] = 0;
571  s->last_mv[0][1][1] = 0;
572  s->mv[0][0][0] = 0;
573  s->mv[0][0][1] = 0;
574  } else {
575  av_assert2(mb_type & MB_TYPE_L0L1);
576  // FIXME decide if MBs in field pictures are MB_TYPE_INTERLACED
577  /* get additional motion vector type */
578  if (s->picture_structure == PICT_FRAME && s->frame_pred_frame_dct) {
579  motion_type = MT_FRAME;
580  } else {
581  motion_type = get_bits(&s->gb, 2);
582  if (s->picture_structure == PICT_FRAME && HAS_CBP(mb_type))
583  s->interlaced_dct = get_bits1(&s->gb);
584  }
585 
586  if (IS_QUANT(mb_type))
587  s->qscale = mpeg_get_qscale(s);
588 
589  /* motion vectors */
590  s->mv_dir = (mb_type >> 13) & 3;
591  ff_tlog(s->avctx, "motion_type=%d\n", motion_type);
592  switch (motion_type) {
593  case MT_FRAME: /* or MT_16X8 */
594  if (s->picture_structure == PICT_FRAME) {
595  mb_type |= MB_TYPE_16x16;
596  s->mv_type = MV_TYPE_16X16;
597  for (i = 0; i < 2; i++) {
598  if (USES_LIST(mb_type, i)) {
599  /* MT_FRAME */
600  s->mv[i][0][0] =
601  s->last_mv[i][0][0] =
602  s->last_mv[i][1][0] =
603  mpeg_decode_motion(s, s->mpeg_f_code[i][0],
604  s->last_mv[i][0][0]);
605  s->mv[i][0][1] =
606  s->last_mv[i][0][1] =
607  s->last_mv[i][1][1] =
608  mpeg_decode_motion(s, s->mpeg_f_code[i][1],
609  s->last_mv[i][0][1]);
610  /* full_pel: only for MPEG-1 */
611  if (s->full_pel[i]) {
612  s->mv[i][0][0] *= 2;
613  s->mv[i][0][1] *= 2;
614  }
615  }
616  }
617  } else {
618  mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
619  s->mv_type = MV_TYPE_16X8;
620  for (i = 0; i < 2; i++) {
621  if (USES_LIST(mb_type, i)) {
622  /* MT_16X8 */
623  for (j = 0; j < 2; j++) {
624  s->field_select[i][j] = get_bits1(&s->gb);
625  for (k = 0; k < 2; k++) {
626  val = mpeg_decode_motion(s, s->mpeg_f_code[i][k],
627  s->last_mv[i][j][k]);
628  s->last_mv[i][j][k] = val;
629  s->mv[i][j][k] = val;
630  }
631  }
632  }
633  }
634  }
635  break;
636  case MT_FIELD:
637  s->mv_type = MV_TYPE_FIELD;
638  if (s->picture_structure == PICT_FRAME) {
639  mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
640  for (i = 0; i < 2; i++) {
641  if (USES_LIST(mb_type, i)) {
642  for (j = 0; j < 2; j++) {
643  s->field_select[i][j] = get_bits1(&s->gb);
644  val = mpeg_decode_motion(s, s->mpeg_f_code[i][0],
645  s->last_mv[i][j][0]);
646  s->last_mv[i][j][0] = val;
647  s->mv[i][j][0] = val;
648  ff_tlog(s->avctx, "fmx=%d\n", val);
649  val = mpeg_decode_motion(s, s->mpeg_f_code[i][1],
650  s->last_mv[i][j][1] >> 1);
651  s->last_mv[i][j][1] = 2 * val;
652  s->mv[i][j][1] = val;
653  ff_tlog(s->avctx, "fmy=%d\n", val);
654  }
655  }
656  }
657  } else {
658  av_assert0(!s->progressive_sequence);
659  mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED;
660  for (i = 0; i < 2; i++) {
661  if (USES_LIST(mb_type, i)) {
662  s->field_select[i][0] = get_bits1(&s->gb);
663  for (k = 0; k < 2; k++) {
664  val = mpeg_decode_motion(s, s->mpeg_f_code[i][k],
665  s->last_mv[i][0][k]);
666  s->last_mv[i][0][k] = val;
667  s->last_mv[i][1][k] = val;
668  s->mv[i][0][k] = val;
669  }
670  }
671  }
672  }
673  break;
674  case MT_DMV:
675  if (s->progressive_sequence){
676  av_log(s->avctx, AV_LOG_ERROR, "MT_DMV in progressive_sequence\n");
677  return AVERROR_INVALIDDATA;
678  }
679  s->mv_type = MV_TYPE_DMV;
680  for (i = 0; i < 2; i++) {
681  if (USES_LIST(mb_type, i)) {
682  int dmx, dmy, mx, my, m;
683  const int my_shift = s->picture_structure == PICT_FRAME;
684 
685  mx = mpeg_decode_motion(s, s->mpeg_f_code[i][0],
686  s->last_mv[i][0][0]);
687  s->last_mv[i][0][0] = mx;
688  s->last_mv[i][1][0] = mx;
689  dmx = get_dmv(s);
690  my = mpeg_decode_motion(s, s->mpeg_f_code[i][1],
691  s->last_mv[i][0][1] >> my_shift);
692  dmy = get_dmv(s);
693 
694 
695  s->last_mv[i][0][1] = my * (1 << my_shift);
696  s->last_mv[i][1][1] = my * (1 << my_shift);
697 
698  s->mv[i][0][0] = mx;
699  s->mv[i][0][1] = my;
700  s->mv[i][1][0] = mx; // not used
701  s->mv[i][1][1] = my; // not used
702 
703  if (s->picture_structure == PICT_FRAME) {
704  mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED;
705 
706  // m = 1 + 2 * s->top_field_first;
707  m = s->top_field_first ? 1 : 3;
708 
709  /* top -> top pred */
710  s->mv[i][2][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
711  s->mv[i][2][1] = ((my * m + (my > 0)) >> 1) + dmy - 1;
712  m = 4 - m;
713  s->mv[i][3][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
714  s->mv[i][3][1] = ((my * m + (my > 0)) >> 1) + dmy + 1;
715  } else {
716  mb_type |= MB_TYPE_16x16;
717 
718  s->mv[i][2][0] = ((mx + (mx > 0)) >> 1) + dmx;
719  s->mv[i][2][1] = ((my + (my > 0)) >> 1) + dmy;
720  if (s->picture_structure == PICT_TOP_FIELD)
721  s->mv[i][2][1]--;
722  else
723  s->mv[i][2][1]++;
724  }
725  }
726  }
727  break;
728  default:
729  av_log(s->avctx, AV_LOG_ERROR,
730  "00 motion_type at %d %d\n", s->mb_x, s->mb_y);
731  return AVERROR_INVALIDDATA;
732  }
733  }
734 
735  s->mb_intra = 0;
736  if (HAS_CBP(mb_type)) {
737  s->bdsp.clear_blocks(s->block[0]);
738 
739  cbp = get_vlc2(&s->gb, ff_mb_pat_vlc, MB_PAT_VLC_BITS, 1);
740  if (mb_block_count > 6) {
741  cbp *= 1 << mb_block_count - 6;
742  cbp |= get_bits(&s->gb, mb_block_count - 6);
743  s->bdsp.clear_blocks(s->block[6]);
744  }
745  if (cbp <= 0) {
746  av_log(s->avctx, AV_LOG_ERROR,
747  "invalid cbp %d at %d %d\n", cbp, s->mb_x, s->mb_y);
748  return AVERROR_INVALIDDATA;
749  }
750 
751  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
752  cbp <<= 12 - mb_block_count;
753 
754  for (i = 0; i < mb_block_count; i++) {
755  if (cbp & (1 << 11)) {
756  if ((ret = mpeg2_decode_block_non_intra(s, *s->pblocks[i], i)) < 0)
757  return ret;
758  } else {
759  s->block_last_index[i] = -1;
760  }
761  cbp += cbp;
762  }
763  } else {
764  for (i = 0; i < 6; i++) {
765  if (cbp & 32) {
766  if ((ret = mpeg1_decode_block_inter(s, *s->pblocks[i], i)) < 0)
767  return ret;
768  } else {
769  s->block_last_index[i] = -1;
770  }
771  cbp += cbp;
772  }
773  }
774  } else {
775  for (i = 0; i < 12; i++)
776  s->block_last_index[i] = -1;
777  }
778  }
779 
780  s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride] = mb_type;
781 
782  return 0;
783 }
784 
786 {
787  Mpeg1Context *s = avctx->priv_data;
788  MpegEncContext *s2 = &s->mpeg_enc_ctx;
789 
790  if ( avctx->codec_tag != AV_RL32("VCR2")
791  && avctx->codec_tag != AV_RL32("BW10"))
792  avctx->coded_width = avctx->coded_height = 0; // do not trust dimensions from input
793  ff_mpv_decode_init(s2, avctx);
794 
796 
797  s2->chroma_format = 1;
798  s->mpeg_enc_ctx_allocated = 0;
799  s->repeat_field = 0;
800  avctx->color_range = AVCOL_RANGE_MPEG;
801  return 0;
802 }
803 
804 #if HAVE_THREADS
805 static int mpeg_decode_update_thread_context(AVCodecContext *avctx,
806  const AVCodecContext *avctx_from)
807 {
808  Mpeg1Context *ctx = avctx->priv_data, *ctx_from = avctx_from->priv_data;
809  MpegEncContext *s = &ctx->mpeg_enc_ctx, *s1 = &ctx_from->mpeg_enc_ctx;
810  int err;
811 
812  if (avctx == avctx_from ||
813  !ctx_from->mpeg_enc_ctx_allocated ||
814  !s1->context_initialized)
815  return 0;
816 
817  err = ff_mpeg_update_thread_context(avctx, avctx_from);
818  if (err)
819  return err;
820 
821  if (!ctx->mpeg_enc_ctx_allocated)
822  memcpy(s + 1, s1 + 1, sizeof(Mpeg1Context) - sizeof(MpegEncContext));
823 
824  return 0;
825 }
826 #endif
827 
829 #if CONFIG_MPEG1_NVDEC_HWACCEL
831 #endif
832 #if CONFIG_MPEG1_VDPAU_HWACCEL
834 #endif
837 };
838 
840 #if CONFIG_MPEG2_NVDEC_HWACCEL
842 #endif
843 #if CONFIG_MPEG2_VDPAU_HWACCEL
845 #endif
846 #if CONFIG_MPEG2_DXVA2_HWACCEL
848 #endif
849 #if CONFIG_MPEG2_D3D11VA_HWACCEL
852 #endif
853 #if CONFIG_MPEG2_D3D12VA_HWACCEL
855 #endif
856 #if CONFIG_MPEG2_VAAPI_HWACCEL
858 #endif
859 #if CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL
861 #endif
864 };
865 
866 static const enum AVPixelFormat mpeg12_pixfmt_list_422[] = {
869 };
870 
871 static const enum AVPixelFormat mpeg12_pixfmt_list_444[] = {
874 };
875 
877 {
878  Mpeg1Context *s1 = avctx->priv_data;
879  MpegEncContext *s = &s1->mpeg_enc_ctx;
880  const enum AVPixelFormat *pix_fmts;
881 
882  if (CONFIG_GRAY && (avctx->flags & AV_CODEC_FLAG_GRAY))
883  return AV_PIX_FMT_GRAY8;
884 
885  if (s->chroma_format < 2)
889  else if (s->chroma_format == 2)
891  else
893 
894  return ff_get_format(avctx, pix_fmts);
895 }
896 
897 /* Call this function when we know all parameters.
898  * It may be called in different places for MPEG-1 and MPEG-2. */
900 {
901  Mpeg1Context *s1 = avctx->priv_data;
902  MpegEncContext *s = &s1->mpeg_enc_ctx;
903  int ret;
904 
905  if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
906  // MPEG-1 aspect
907  AVRational aspect_inv = av_d2q(ff_mpeg1_aspect[s1->aspect_ratio_info], 255);
908  avctx->sample_aspect_ratio = (AVRational) { aspect_inv.den, aspect_inv.num };
909  } else { // MPEG-2
910  // MPEG-2 aspect
911  if (s1->aspect_ratio_info > 1) {
912  AVRational dar =
913  av_mul_q(av_div_q(ff_mpeg2_aspect[s1->aspect_ratio_info],
914  (AVRational) { s1->pan_scan.width,
915  s1->pan_scan.height }),
916  (AVRational) { s->width, s->height });
917 
918  /* We ignore the spec here and guess a bit as reality does not
919  * match the spec, see for example res_change_ffmpeg_aspect.ts
920  * and sequence-display-aspect.mpg.
921  * issue1613, 621, 562 */
922  if ((s1->pan_scan.width == 0) || (s1->pan_scan.height == 0) ||
923  (av_cmp_q(dar, (AVRational) { 4, 3 }) &&
924  av_cmp_q(dar, (AVRational) { 16, 9 }))) {
925  s->avctx->sample_aspect_ratio =
926  av_div_q(ff_mpeg2_aspect[s1->aspect_ratio_info],
927  (AVRational) { s->width, s->height });
928  } else {
929  s->avctx->sample_aspect_ratio =
930  av_div_q(ff_mpeg2_aspect[s1->aspect_ratio_info],
931  (AVRational) { s1->pan_scan.width, s1->pan_scan.height });
932 // issue1613 4/3 16/9 -> 16/9
933 // res_change_ffmpeg_aspect.ts 4/3 225/44 ->4/3
934 // widescreen-issue562.mpg 4/3 16/9 -> 16/9
935 // s->avctx->sample_aspect_ratio = av_mul_q(s->avctx->sample_aspect_ratio, (AVRational) {s->width, s->height});
936  ff_dlog(avctx, "aspect A %d/%d\n",
937  ff_mpeg2_aspect[s1->aspect_ratio_info].num,
938  ff_mpeg2_aspect[s1->aspect_ratio_info].den);
939  ff_dlog(avctx, "aspect B %d/%d\n", s->avctx->sample_aspect_ratio.num,
940  s->avctx->sample_aspect_ratio.den);
941  }
942  } else {
943  s->avctx->sample_aspect_ratio =
944  ff_mpeg2_aspect[s1->aspect_ratio_info];
945  }
946  } // MPEG-2
947 
948  if (av_image_check_sar(s->width, s->height,
949  avctx->sample_aspect_ratio) < 0) {
950  av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
951  avctx->sample_aspect_ratio.num,
952  avctx->sample_aspect_ratio.den);
953  avctx->sample_aspect_ratio = (AVRational){ 0, 1 };
954  }
955 
956  if ((s1->mpeg_enc_ctx_allocated == 0) ||
957  avctx->coded_width != s->width ||
958  avctx->coded_height != s->height ||
959  s1->save_width != s->width ||
960  s1->save_height != s->height ||
961  av_cmp_q(s1->save_aspect, s->avctx->sample_aspect_ratio) ||
962  (s1->save_progressive_seq != s->progressive_sequence && FFALIGN(s->height, 16) != FFALIGN(s->height, 32)) ||
963  0) {
964  if (s1->mpeg_enc_ctx_allocated) {
966  s1->mpeg_enc_ctx_allocated = 0;
967  }
968 
969  ret = ff_set_dimensions(avctx, s->width, s->height);
970  if (ret < 0)
971  return ret;
972 
973  if (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO && s->bit_rate &&
974  (s->bit_rate != 0x3FFFF*400)) {
975  avctx->rc_max_rate = s->bit_rate;
976  } else if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO && s->bit_rate &&
977  (s->bit_rate != 0x3FFFF*400 || s->vbv_delay != 0xFFFF)) {
978  avctx->bit_rate = s->bit_rate;
979  }
980  s1->save_aspect = s->avctx->sample_aspect_ratio;
981  s1->save_width = s->width;
982  s1->save_height = s->height;
983  s1->save_progressive_seq = s->progressive_sequence;
984 
985  /* low_delay may be forced, in this case we will have B-frames
986  * that behave like P-frames. */
987  avctx->has_b_frames = !s->low_delay;
988 
989  if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
990  // MPEG-1 fps
991  avctx->framerate = ff_mpeg12_frame_rate_tab[s1->frame_rate_index];
992 #if FF_API_TICKS_PER_FRAME
994  avctx->ticks_per_frame = 1;
996 #endif
997 
999  } else { // MPEG-2
1000  // MPEG-2 fps
1001  av_reduce(&s->avctx->framerate.num,
1002  &s->avctx->framerate.den,
1003  ff_mpeg12_frame_rate_tab[s1->frame_rate_index].num * s1->frame_rate_ext.num,
1004  ff_mpeg12_frame_rate_tab[s1->frame_rate_index].den * s1->frame_rate_ext.den,
1005  1 << 30);
1006 #if FF_API_TICKS_PER_FRAME
1008  avctx->ticks_per_frame = 2;
1010 #endif
1011 
1012  switch (s->chroma_format) {
1013  case 1: avctx->chroma_sample_location = AVCHROMA_LOC_LEFT; break;
1014  case 2:
1015  case 3: avctx->chroma_sample_location = AVCHROMA_LOC_TOPLEFT; break;
1016  default: av_assert0(0);
1017  }
1018  } // MPEG-2
1019 
1020  avctx->pix_fmt = mpeg_get_pixelformat(avctx);
1021 
1022  if ((ret = ff_mpv_common_init(s)) < 0)
1023  return ret;
1024 
1025  s1->mpeg_enc_ctx_allocated = 1;
1026  }
1027  return 0;
1028 }
1029 
1030 static int mpeg1_decode_picture(AVCodecContext *avctx, const uint8_t *buf,
1031  int buf_size)
1032 {
1033  Mpeg1Context *s1 = avctx->priv_data;
1034  MpegEncContext *s = &s1->mpeg_enc_ctx;
1035  int ref, f_code, vbv_delay, ret;
1036 
1037  ret = init_get_bits8(&s->gb, buf, buf_size);
1038  if (ret < 0)
1039  return ret;
1040 
1041  ref = get_bits(&s->gb, 10); /* temporal ref */
1042  s->pict_type = get_bits(&s->gb, 3);
1043  if (s->pict_type == 0 || s->pict_type > 3)
1044  return AVERROR_INVALIDDATA;
1045 
1046  vbv_delay = get_bits(&s->gb, 16);
1047  s->vbv_delay = vbv_delay;
1048  if (s->pict_type == AV_PICTURE_TYPE_P ||
1049  s->pict_type == AV_PICTURE_TYPE_B) {
1050  s->full_pel[0] = get_bits1(&s->gb);
1051  f_code = get_bits(&s->gb, 3);
1052  if (f_code == 0 && (avctx->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT)))
1053  return AVERROR_INVALIDDATA;
1054  f_code += !f_code;
1055  s->mpeg_f_code[0][0] = f_code;
1056  s->mpeg_f_code[0][1] = f_code;
1057  }
1058  if (s->pict_type == AV_PICTURE_TYPE_B) {
1059  s->full_pel[1] = get_bits1(&s->gb);
1060  f_code = get_bits(&s->gb, 3);
1061  if (f_code == 0 && (avctx->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT)))
1062  return AVERROR_INVALIDDATA;
1063  f_code += !f_code;
1064  s->mpeg_f_code[1][0] = f_code;
1065  s->mpeg_f_code[1][1] = f_code;
1066  }
1067 
1068  if (avctx->debug & FF_DEBUG_PICT_INFO)
1069  av_log(avctx, AV_LOG_DEBUG,
1070  "vbv_delay %d, ref %d type:%d\n", vbv_delay, ref, s->pict_type);
1071 
1072  s->y_dc_scale = 8;
1073  s->c_dc_scale = 8;
1074  return 0;
1075 }
1076 
1078 {
1079  MpegEncContext *s = &s1->mpeg_enc_ctx;
1080  int horiz_size_ext, vert_size_ext;
1081  int bit_rate_ext;
1082 
1083  skip_bits(&s->gb, 1); /* profile and level esc*/
1084  s->avctx->profile = get_bits(&s->gb, 3);
1085  s->avctx->level = get_bits(&s->gb, 4);
1086  s->progressive_sequence = get_bits1(&s->gb); /* progressive_sequence */
1087  s->chroma_format = get_bits(&s->gb, 2); /* chroma_format 1=420, 2=422, 3=444 */
1088 
1089  if (!s->chroma_format) {
1090  s->chroma_format = 1;
1091  av_log(s->avctx, AV_LOG_WARNING, "Chroma format invalid\n");
1092  }
1093 
1094  horiz_size_ext = get_bits(&s->gb, 2);
1095  vert_size_ext = get_bits(&s->gb, 2);
1096  s->width |= (horiz_size_ext << 12);
1097  s->height |= (vert_size_ext << 12);
1098  bit_rate_ext = get_bits(&s->gb, 12); /* XXX: handle it */
1099  s->bit_rate += (bit_rate_ext << 18) * 400LL;
1100  check_marker(s->avctx, &s->gb, "after bit rate extension");
1101  s->avctx->rc_buffer_size += get_bits(&s->gb, 8) * 1024 * 16 << 10;
1102 
1103  s->low_delay = get_bits1(&s->gb);
1104  if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY)
1105  s->low_delay = 1;
1106 
1107  s1->frame_rate_ext.num = get_bits(&s->gb, 2) + 1;
1108  s1->frame_rate_ext.den = get_bits(&s->gb, 5) + 1;
1109 
1110  ff_dlog(s->avctx, "sequence extension\n");
1111  s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG2VIDEO;
1112 
1113  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1114  av_log(s->avctx, AV_LOG_DEBUG,
1115  "profile: %d, level: %d ps: %d cf:%d vbv buffer: %d, bitrate:%"PRId64"\n",
1116  s->avctx->profile, s->avctx->level, s->progressive_sequence, s->chroma_format,
1117  s->avctx->rc_buffer_size, s->bit_rate);
1118 }
1119 
1121 {
1122  MpegEncContext *s = &s1->mpeg_enc_ctx;
1123  int color_description, w, h;
1124 
1125  skip_bits(&s->gb, 3); /* video format */
1126  color_description = get_bits1(&s->gb);
1127  if (color_description) {
1128  s->avctx->color_primaries = get_bits(&s->gb, 8);
1129  s->avctx->color_trc = get_bits(&s->gb, 8);
1130  s->avctx->colorspace = get_bits(&s->gb, 8);
1131  }
1132  w = get_bits(&s->gb, 14);
1133  skip_bits(&s->gb, 1); // marker
1134  h = get_bits(&s->gb, 14);
1135  // remaining 3 bits are zero padding
1136 
1137  s1->pan_scan.width = 16 * w;
1138  s1->pan_scan.height = 16 * h;
1139 
1140  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1141  av_log(s->avctx, AV_LOG_DEBUG, "sde w:%d, h:%d\n", w, h);
1142 }
1143 
1145 {
1146  MpegEncContext *s = &s1->mpeg_enc_ctx;
1147  int i, nofco;
1148 
1149  nofco = 1;
1150  if (s->progressive_sequence) {
1151  if (s->repeat_first_field) {
1152  nofco++;
1153  if (s->top_field_first)
1154  nofco++;
1155  }
1156  } else {
1157  if (s->picture_structure == PICT_FRAME) {
1158  nofco++;
1159  if (s->repeat_first_field)
1160  nofco++;
1161  }
1162  }
1163  for (i = 0; i < nofco; i++) {
1164  s1->pan_scan.position[i][0] = get_sbits(&s->gb, 16);
1165  skip_bits(&s->gb, 1); // marker
1166  s1->pan_scan.position[i][1] = get_sbits(&s->gb, 16);
1167  skip_bits(&s->gb, 1); // marker
1168  }
1169 
1170  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1171  av_log(s->avctx, AV_LOG_DEBUG,
1172  "pde (%"PRId16",%"PRId16") (%"PRId16",%"PRId16") (%"PRId16",%"PRId16")\n",
1173  s1->pan_scan.position[0][0], s1->pan_scan.position[0][1],
1174  s1->pan_scan.position[1][0], s1->pan_scan.position[1][1],
1175  s1->pan_scan.position[2][0], s1->pan_scan.position[2][1]);
1176 }
1177 
1178 static int load_matrix(MpegEncContext *s, uint16_t matrix0[64],
1179  uint16_t matrix1[64], int intra)
1180 {
1181  int i;
1182 
1183  for (i = 0; i < 64; i++) {
1184  int j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
1185  int v = get_bits(&s->gb, 8);
1186  if (v == 0) {
1187  av_log(s->avctx, AV_LOG_ERROR, "matrix damaged\n");
1188  return AVERROR_INVALIDDATA;
1189  }
1190  if (intra && i == 0 && v != 8) {
1191  av_log(s->avctx, AV_LOG_DEBUG, "intra matrix specifies invalid DC quantizer %d, ignoring\n", v);
1192  v = 8; // needed by pink.mpg / issue1046
1193  }
1194  matrix0[j] = v;
1195  if (matrix1)
1196  matrix1[j] = v;
1197  }
1198  return 0;
1199 }
1200 
1202 {
1203  ff_dlog(s->avctx, "matrix extension\n");
1204 
1205  if (get_bits1(&s->gb))
1206  load_matrix(s, s->chroma_intra_matrix, s->intra_matrix, 1);
1207  if (get_bits1(&s->gb))
1208  load_matrix(s, s->chroma_inter_matrix, s->inter_matrix, 0);
1209  if (get_bits1(&s->gb))
1210  load_matrix(s, s->chroma_intra_matrix, NULL, 1);
1211  if (get_bits1(&s->gb))
1212  load_matrix(s, s->chroma_inter_matrix, NULL, 0);
1213 }
1214 
1216 {
1217  MpegEncContext *s = &s1->mpeg_enc_ctx;
1218 
1219  s->full_pel[0] = s->full_pel[1] = 0;
1220  s->mpeg_f_code[0][0] = get_bits(&s->gb, 4);
1221  s->mpeg_f_code[0][1] = get_bits(&s->gb, 4);
1222  s->mpeg_f_code[1][0] = get_bits(&s->gb, 4);
1223  s->mpeg_f_code[1][1] = get_bits(&s->gb, 4);
1224  s->mpeg_f_code[0][0] += !s->mpeg_f_code[0][0];
1225  s->mpeg_f_code[0][1] += !s->mpeg_f_code[0][1];
1226  s->mpeg_f_code[1][0] += !s->mpeg_f_code[1][0];
1227  s->mpeg_f_code[1][1] += !s->mpeg_f_code[1][1];
1228  if (!s->pict_type && s1->mpeg_enc_ctx_allocated) {
1229  av_log(s->avctx, AV_LOG_ERROR, "Missing picture start code\n");
1230  if (s->avctx->err_recognition & AV_EF_EXPLODE)
1231  return AVERROR_INVALIDDATA;
1232  av_log(s->avctx, AV_LOG_WARNING, "Guessing pict_type from mpeg_f_code\n");
1233  if (s->mpeg_f_code[1][0] == 15 && s->mpeg_f_code[1][1] == 15) {
1234  if (s->mpeg_f_code[0][0] == 15 && s->mpeg_f_code[0][1] == 15)
1235  s->pict_type = AV_PICTURE_TYPE_I;
1236  else
1237  s->pict_type = AV_PICTURE_TYPE_P;
1238  } else
1239  s->pict_type = AV_PICTURE_TYPE_B;
1240  }
1241 
1242  s->intra_dc_precision = get_bits(&s->gb, 2);
1243  s->picture_structure = get_bits(&s->gb, 2);
1244  s->top_field_first = get_bits1(&s->gb);
1245  s->frame_pred_frame_dct = get_bits1(&s->gb);
1246  s->concealment_motion_vectors = get_bits1(&s->gb);
1247  s->q_scale_type = get_bits1(&s->gb);
1248  s->intra_vlc_format = get_bits1(&s->gb);
1249  s->alternate_scan = get_bits1(&s->gb);
1250  s->repeat_first_field = get_bits1(&s->gb);
1251  s->chroma_420_type = get_bits1(&s->gb);
1252  s->progressive_frame = get_bits1(&s->gb);
1253 
1254  if (s->alternate_scan) {
1255  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
1256  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
1257  } else {
1258  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
1259  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
1260  }
1261 
1262  /* composite display not parsed */
1263  ff_dlog(s->avctx, "intra_dc_precision=%d\n", s->intra_dc_precision);
1264  ff_dlog(s->avctx, "picture_structure=%d\n", s->picture_structure);
1265  ff_dlog(s->avctx, "top field first=%d\n", s->top_field_first);
1266  ff_dlog(s->avctx, "repeat first field=%d\n", s->repeat_first_field);
1267  ff_dlog(s->avctx, "conceal=%d\n", s->concealment_motion_vectors);
1268  ff_dlog(s->avctx, "intra_vlc_format=%d\n", s->intra_vlc_format);
1269  ff_dlog(s->avctx, "alternate_scan=%d\n", s->alternate_scan);
1270  ff_dlog(s->avctx, "frame_pred_frame_dct=%d\n", s->frame_pred_frame_dct);
1271  ff_dlog(s->avctx, "progressive_frame=%d\n", s->progressive_frame);
1272 
1273  return 0;
1274 }
1275 
1276 static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
1277 {
1278  AVCodecContext *avctx = s->avctx;
1279  Mpeg1Context *s1 = (Mpeg1Context *) s;
1280  int ret;
1281 
1282  if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)) {
1283  if (s->mb_width * s->mb_height * 11LL / (33 * 2 * 8) > buf_size)
1284  return AVERROR_INVALIDDATA;
1285  }
1286 
1287  /* start frame decoding */
1288  if (s->first_field || s->picture_structure == PICT_FRAME) {
1289  AVFrameSideData *pan_scan;
1290 
1291  if ((ret = ff_mpv_frame_start(s, avctx)) < 0)
1292  return ret;
1293 
1295 
1296  /* first check if we must repeat the frame */
1297  s->current_picture_ptr->f->repeat_pict = 0;
1298  if (s->repeat_first_field) {
1299  if (s->progressive_sequence) {
1300  if (s->top_field_first)
1301  s->current_picture_ptr->f->repeat_pict = 4;
1302  else
1303  s->current_picture_ptr->f->repeat_pict = 2;
1304  } else if (s->progressive_frame) {
1305  s->current_picture_ptr->f->repeat_pict = 1;
1306  }
1307  }
1308 
1309  ret = ff_frame_new_side_data(s->avctx, s->current_picture_ptr->f,
1310  AV_FRAME_DATA_PANSCAN, sizeof(s1->pan_scan),
1311  &pan_scan);
1312  if (ret < 0)
1313  return ret;
1314  if (pan_scan)
1315  memcpy(pan_scan->data, &s1->pan_scan, sizeof(s1->pan_scan));
1316 
1317  if (s1->a53_buf_ref) {
1319  s->avctx, s->current_picture_ptr->f, AV_FRAME_DATA_A53_CC,
1320  &s1->a53_buf_ref, NULL);
1321  if (ret < 0)
1322  return ret;
1323  }
1324 
1325  if (s1->has_stereo3d) {
1326  AVStereo3D *stereo = av_stereo3d_create_side_data(s->current_picture_ptr->f);
1327  if (!stereo)
1328  return AVERROR(ENOMEM);
1329 
1330  *stereo = s1->stereo3d;
1331  s1->has_stereo3d = 0;
1332  }
1333 
1334  if (s1->has_afd) {
1335  AVFrameSideData *sd;
1336  ret = ff_frame_new_side_data(s->avctx, s->current_picture_ptr->f,
1337  AV_FRAME_DATA_AFD, 1, &sd);
1338  if (ret < 0)
1339  return ret;
1340  if (sd)
1341  *sd->data = s1->afd;
1342  s1->has_afd = 0;
1343  }
1344 
1345  if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME))
1346  ff_thread_finish_setup(avctx);
1347  } else { // second field
1348  int i;
1349 
1350  if (!s->current_picture_ptr) {
1351  av_log(s->avctx, AV_LOG_ERROR, "first field missing\n");
1352  return AVERROR_INVALIDDATA;
1353  }
1354 
1355  if (s->avctx->hwaccel) {
1356  if ((ret = FF_HW_SIMPLE_CALL(s->avctx, end_frame)) < 0) {
1357  av_log(avctx, AV_LOG_ERROR,
1358  "hardware accelerator failed to decode first field\n");
1359  return ret;
1360  }
1361  }
1362 
1363  for (i = 0; i < 4; i++) {
1364  s->current_picture.f->data[i] = s->current_picture_ptr->f->data[i];
1365  if (s->picture_structure == PICT_BOTTOM_FIELD)
1366  s->current_picture.f->data[i] +=
1367  s->current_picture_ptr->f->linesize[i];
1368  }
1369  }
1370 
1371  if (avctx->hwaccel) {
1372  if ((ret = FF_HW_CALL(avctx, start_frame, buf, buf_size)) < 0)
1373  return ret;
1374  }
1375 
1376  return 0;
1377 }
1378 
1379 #define DECODE_SLICE_ERROR -1
1380 #define DECODE_SLICE_OK 0
1381 
1382 /**
1383  * Decode a slice.
1384  * MpegEncContext.mb_y must be set to the MB row from the startcode.
1385  * @return DECODE_SLICE_ERROR if the slice is damaged,
1386  * DECODE_SLICE_OK if this slice is OK
1387  */
1388 static int mpeg_decode_slice(MpegEncContext *s, int mb_y,
1389  const uint8_t **buf, int buf_size)
1390 {
1391  AVCodecContext *avctx = s->avctx;
1392  const int lowres = s->avctx->lowres;
1393  const int field_pic = s->picture_structure != PICT_FRAME;
1394  int ret;
1395 
1396  s->resync_mb_x =
1397  s->resync_mb_y = -1;
1398 
1399  av_assert0(mb_y < s->mb_height);
1400 
1401  ret = init_get_bits8(&s->gb, *buf, buf_size);
1402  if (ret < 0)
1403  return ret;
1404 
1405  if (s->codec_id != AV_CODEC_ID_MPEG1VIDEO && s->mb_height > 2800/16)
1406  skip_bits(&s->gb, 3);
1407 
1409  s->interlaced_dct = 0;
1410 
1411  s->qscale = mpeg_get_qscale(s);
1412 
1413  if (s->qscale == 0) {
1414  av_log(s->avctx, AV_LOG_ERROR, "qscale == 0\n");
1415  return AVERROR_INVALIDDATA;
1416  }
1417 
1418  /* extra slice info */
1419  if (skip_1stop_8data_bits(&s->gb) < 0)
1420  return AVERROR_INVALIDDATA;
1421 
1422  s->mb_x = 0;
1423 
1424  if (mb_y == 0 && s->codec_tag == AV_RL32("SLIF")) {
1425  skip_bits1(&s->gb);
1426  } else {
1427  while (get_bits_left(&s->gb) > 0) {
1428  int code = get_vlc2(&s->gb, ff_mbincr_vlc,
1429  MBINCR_VLC_BITS, 2);
1430  if (code < 0) {
1431  av_log(s->avctx, AV_LOG_ERROR, "first mb_incr damaged\n");
1432  return AVERROR_INVALIDDATA;
1433  }
1434  if (code >= 33) {
1435  if (code == 33)
1436  s->mb_x += 33;
1437  /* otherwise, stuffing, nothing to do */
1438  } else {
1439  s->mb_x += code;
1440  break;
1441  }
1442  }
1443  }
1444 
1445  if (s->mb_x >= (unsigned) s->mb_width) {
1446  av_log(s->avctx, AV_LOG_ERROR, "initial skip overflow\n");
1447  return AVERROR_INVALIDDATA;
1448  }
1449 
1450  if (avctx->hwaccel) {
1451  const uint8_t *buf_end, *buf_start = *buf - 4; /* include start_code */
1452  int start_code = -1;
1453  buf_end = avpriv_find_start_code(buf_start + 2, *buf + buf_size, &start_code);
1454  if (buf_end < *buf + buf_size)
1455  buf_end -= 4;
1456  s->mb_y = mb_y;
1457  if (FF_HW_CALL(avctx, decode_slice, buf_start, buf_end - buf_start) < 0)
1458  return DECODE_SLICE_ERROR;
1459  *buf = buf_end;
1460  return DECODE_SLICE_OK;
1461  }
1462 
1463  s->resync_mb_x = s->mb_x;
1464  s->resync_mb_y = s->mb_y = mb_y;
1465  s->mb_skip_run = 0;
1467 
1468  if (s->mb_y == 0 && s->mb_x == 0 && (s->first_field || s->picture_structure == PICT_FRAME)) {
1469  if (s->avctx->debug & FF_DEBUG_PICT_INFO) {
1470  av_log(s->avctx, AV_LOG_DEBUG,
1471  "qp:%d fc:%2d%2d%2d%2d %c %s %s %s %s dc:%d pstruct:%d fdct:%d cmv:%d qtype:%d ivlc:%d rff:%d %s\n",
1472  s->qscale,
1473  s->mpeg_f_code[0][0], s->mpeg_f_code[0][1],
1474  s->mpeg_f_code[1][0], s->mpeg_f_code[1][1],
1475  s->pict_type == AV_PICTURE_TYPE_I ? 'I' :
1476  (s->pict_type == AV_PICTURE_TYPE_P ? 'P' :
1477  (s->pict_type == AV_PICTURE_TYPE_B ? 'B' : 'S')),
1478  s->progressive_sequence ? "ps" : "",
1479  s->progressive_frame ? "pf" : "",
1480  s->alternate_scan ? "alt" : "",
1481  s->top_field_first ? "top" : "",
1482  s->intra_dc_precision, s->picture_structure,
1483  s->frame_pred_frame_dct, s->concealment_motion_vectors,
1484  s->q_scale_type, s->intra_vlc_format,
1485  s->repeat_first_field, s->chroma_420_type ? "420" : "");
1486  }
1487  }
1488 
1489  for (;;) {
1490  if ((ret = mpeg_decode_mb(s, s->block)) < 0)
1491  return ret;
1492 
1493  // Note motion_val is normally NULL unless we want to extract the MVs.
1494  if (s->current_picture.motion_val[0]) {
1495  const int wrap = s->b8_stride;
1496  int xy = s->mb_x * 2 + s->mb_y * 2 * wrap;
1497  int b8_xy = 4 * (s->mb_x + s->mb_y * s->mb_stride);
1498  int motion_x, motion_y, dir, i;
1499 
1500  for (i = 0; i < 2; i++) {
1501  for (dir = 0; dir < 2; dir++) {
1502  if (s->mb_intra ||
1503  (dir == 1 && s->pict_type != AV_PICTURE_TYPE_B)) {
1504  motion_x = motion_y = 0;
1505  } else if (s->mv_type == MV_TYPE_16X16 ||
1506  (s->mv_type == MV_TYPE_FIELD && field_pic)) {
1507  motion_x = s->mv[dir][0][0];
1508  motion_y = s->mv[dir][0][1];
1509  } else { /* if ((s->mv_type == MV_TYPE_FIELD) || (s->mv_type == MV_TYPE_16X8)) */
1510  motion_x = s->mv[dir][i][0];
1511  motion_y = s->mv[dir][i][1];
1512  }
1513 
1514  s->current_picture.motion_val[dir][xy][0] = motion_x;
1515  s->current_picture.motion_val[dir][xy][1] = motion_y;
1516  s->current_picture.motion_val[dir][xy + 1][0] = motion_x;
1517  s->current_picture.motion_val[dir][xy + 1][1] = motion_y;
1518  s->current_picture.ref_index [dir][b8_xy] =
1519  s->current_picture.ref_index [dir][b8_xy + 1] = s->field_select[dir][i];
1520  av_assert2(s->field_select[dir][i] == 0 ||
1521  s->field_select[dir][i] == 1);
1522  }
1523  xy += wrap;
1524  b8_xy += 2;
1525  }
1526  }
1527 
1528  s->dest[0] += 16 >> lowres;
1529  s->dest[1] +=(16 >> lowres) >> s->chroma_x_shift;
1530  s->dest[2] +=(16 >> lowres) >> s->chroma_x_shift;
1531 
1532  ff_mpv_reconstruct_mb(s, s->block);
1533 
1534  if (++s->mb_x >= s->mb_width) {
1535  const int mb_size = 16 >> s->avctx->lowres;
1536  int left;
1537 
1538  ff_mpeg_draw_horiz_band(s, mb_size * (s->mb_y >> field_pic), mb_size);
1540 
1541  s->mb_x = 0;
1542  s->mb_y += 1 << field_pic;
1543 
1544  if (s->mb_y >= s->mb_height) {
1545  int left = get_bits_left(&s->gb);
1546  int is_d10 = s->chroma_format == 2 &&
1547  s->pict_type == AV_PICTURE_TYPE_I &&
1548  avctx->profile == 0 && avctx->level == 5 &&
1549  s->intra_dc_precision == 2 &&
1550  s->q_scale_type == 1 && s->alternate_scan == 0 &&
1551  s->progressive_frame == 0
1552  /* vbv_delay == 0xBBB || 0xE10 */;
1553 
1554  if (left >= 32 && !is_d10) {
1555  GetBitContext gb = s->gb;
1556  align_get_bits(&gb);
1557  if (show_bits(&gb, 24) == 0x060E2B) {
1558  av_log(avctx, AV_LOG_DEBUG, "Invalid MXF data found in video stream\n");
1559  is_d10 = 1;
1560  }
1561  if (left > 32 && show_bits_long(&gb, 32) == 0x201) {
1562  av_log(avctx, AV_LOG_DEBUG, "skipping m704 alpha (unsupported)\n");
1563  goto eos;
1564  }
1565  }
1566 
1567  if (left < 0 ||
1568  (left && show_bits(&s->gb, FFMIN(left, 23)) && !is_d10) ||
1569  ((avctx->err_recognition & (AV_EF_BITSTREAM | AV_EF_AGGRESSIVE)) && left > 8)) {
1570  av_log(avctx, AV_LOG_ERROR, "end mismatch left=%d %0X at %d %d\n",
1571  left, left>0 ? show_bits(&s->gb, FFMIN(left, 23)) : 0, s->mb_x, s->mb_y);
1572  return AVERROR_INVALIDDATA;
1573  } else
1574  goto eos;
1575  }
1576  // There are some files out there which are missing the last slice
1577  // in cases where the slice is completely outside the visible
1578  // area, we detect this here instead of running into the end expecting
1579  // more data
1580  left = get_bits_left(&s->gb);
1581  if (s->mb_y >= ((s->height + 15) >> 4) &&
1582  !s->progressive_sequence &&
1583  left <= 25 &&
1584  left >= 0 &&
1585  s->mb_skip_run == -1 &&
1586  (!left || show_bits(&s->gb, left) == 0))
1587  goto eos;
1588 
1590  }
1591 
1592  /* skip mb handling */
1593  if (s->mb_skip_run == -1) {
1594  /* read increment again */
1595  s->mb_skip_run = 0;
1596  for (;;) {
1597  int code = get_vlc2(&s->gb, ff_mbincr_vlc,
1598  MBINCR_VLC_BITS, 2);
1599  if (code < 0) {
1600  av_log(s->avctx, AV_LOG_ERROR, "mb incr damaged\n");
1601  return AVERROR_INVALIDDATA;
1602  }
1603  if (code >= 33) {
1604  if (code == 33) {
1605  s->mb_skip_run += 33;
1606  } else if (code == 35) {
1607  if (s->mb_skip_run != 0 || show_bits(&s->gb, 15) != 0) {
1608  av_log(s->avctx, AV_LOG_ERROR, "slice mismatch\n");
1609  return AVERROR_INVALIDDATA;
1610  }
1611  goto eos; /* end of slice */
1612  }
1613  /* otherwise, stuffing, nothing to do */
1614  } else {
1615  s->mb_skip_run += code;
1616  break;
1617  }
1618  }
1619  if (s->mb_skip_run) {
1620  int i;
1621  if (s->pict_type == AV_PICTURE_TYPE_I) {
1622  av_log(s->avctx, AV_LOG_ERROR,
1623  "skipped MB in I-frame at %d %d\n", s->mb_x, s->mb_y);
1624  return AVERROR_INVALIDDATA;
1625  }
1626 
1627  /* skip mb */
1628  s->mb_intra = 0;
1629  for (i = 0; i < 12; i++)
1630  s->block_last_index[i] = -1;
1631  if (s->picture_structure == PICT_FRAME)
1632  s->mv_type = MV_TYPE_16X16;
1633  else
1634  s->mv_type = MV_TYPE_FIELD;
1635  if (s->pict_type == AV_PICTURE_TYPE_P) {
1636  /* if P type, zero motion vector is implied */
1637  s->mv_dir = MV_DIR_FORWARD;
1638  s->mv[0][0][0] = s->mv[0][0][1] = 0;
1639  s->last_mv[0][0][0] = s->last_mv[0][0][1] = 0;
1640  s->last_mv[0][1][0] = s->last_mv[0][1][1] = 0;
1641  s->field_select[0][0] = (s->picture_structure - 1) & 1;
1642  } else {
1643  /* if B type, reuse previous vectors and directions */
1644  s->mv[0][0][0] = s->last_mv[0][0][0];
1645  s->mv[0][0][1] = s->last_mv[0][0][1];
1646  s->mv[1][0][0] = s->last_mv[1][0][0];
1647  s->mv[1][0][1] = s->last_mv[1][0][1];
1648  s->field_select[0][0] = (s->picture_structure - 1) & 1;
1649  s->field_select[1][0] = (s->picture_structure - 1) & 1;
1650  }
1651  }
1652  }
1653  }
1654 eos: // end of slice
1655  if (get_bits_left(&s->gb) < 0) {
1656  av_log(s, AV_LOG_ERROR, "overread %d\n", -get_bits_left(&s->gb));
1657  return AVERROR_INVALIDDATA;
1658  }
1659  *buf += (get_bits_count(&s->gb) - 1) / 8;
1660  ff_dlog(s, "Slice start:%d %d end:%d %d\n", s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y);
1661  return 0;
1662 }
1663 
1665 {
1666  MpegEncContext *s = *(void **) arg;
1667  const uint8_t *buf = s->gb.buffer;
1668  int mb_y = s->start_mb_y;
1669  const int field_pic = s->picture_structure != PICT_FRAME;
1670 
1671  s->er.error_count = (3 * (s->end_mb_y - s->start_mb_y) * s->mb_width) >> field_pic;
1672 
1673  for (;;) {
1674  uint32_t start_code;
1675  int ret;
1676 
1677  ret = mpeg_decode_slice(s, mb_y, &buf, s->gb.buffer_end - buf);
1678  emms_c();
1679  ff_dlog(c, "ret:%d resync:%d/%d mb:%d/%d ts:%d/%d ec:%d\n",
1680  ret, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y,
1681  s->start_mb_y, s->end_mb_y, s->er.error_count);
1682  if (ret < 0) {
1683  if (c->err_recognition & AV_EF_EXPLODE)
1684  return ret;
1685  if (s->resync_mb_x >= 0 && s->resync_mb_y >= 0)
1686  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
1687  s->mb_x, s->mb_y,
1689  } else {
1690  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
1691  s->mb_x - 1, s->mb_y,
1693  }
1694 
1695  if (s->mb_y == s->end_mb_y)
1696  return 0;
1697 
1698  start_code = -1;
1699  buf = avpriv_find_start_code(buf, s->gb.buffer_end, &start_code);
1700  if (start_code < SLICE_MIN_START_CODE || start_code > SLICE_MAX_START_CODE)
1701  return AVERROR_INVALIDDATA;
1703  if (s->codec_id != AV_CODEC_ID_MPEG1VIDEO && s->mb_height > 2800/16)
1704  mb_y += (*buf&0xE0)<<2;
1705  mb_y <<= field_pic;
1706  if (s->picture_structure == PICT_BOTTOM_FIELD)
1707  mb_y++;
1708  if (mb_y >= s->end_mb_y)
1709  return AVERROR_INVALIDDATA;
1710  }
1711 }
1712 
1713 /**
1714  * Handle slice ends.
1715  * @return 1 if it seems to be the last slice
1716  */
1717 static int slice_end(AVCodecContext *avctx, AVFrame *pict)
1718 {
1719  Mpeg1Context *s1 = avctx->priv_data;
1720  MpegEncContext *s = &s1->mpeg_enc_ctx;
1721 
1722  if (!s1->mpeg_enc_ctx_allocated || !s->current_picture_ptr)
1723  return 0;
1724 
1725  if (s->avctx->hwaccel) {
1726  int ret = FF_HW_SIMPLE_CALL(s->avctx, end_frame);
1727  if (ret < 0) {
1728  av_log(avctx, AV_LOG_ERROR,
1729  "hardware accelerator failed to decode picture\n");
1730  return ret;
1731  }
1732  }
1733 
1734  /* end of slice reached */
1735  if (/* s->mb_y << field_pic == s->mb_height && */ !s->first_field && !s1->first_slice) {
1736  /* end of image */
1737 
1738  ff_er_frame_end(&s->er, NULL);
1739 
1741 
1742  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
1743  int ret = av_frame_ref(pict, s->current_picture_ptr->f);
1744  if (ret < 0)
1745  return ret;
1746  ff_print_debug_info(s, s->current_picture_ptr, pict);
1747  ff_mpv_export_qp_table(s, pict, s->current_picture_ptr, FF_MPV_QSCALE_TYPE_MPEG2);
1748  } else {
1749  /* latency of 1 frame for I- and P-frames */
1750  if (s->last_picture_ptr) {
1751  int ret = av_frame_ref(pict, s->last_picture_ptr->f);
1752  if (ret < 0)
1753  return ret;
1754  ff_print_debug_info(s, s->last_picture_ptr, pict);
1755  ff_mpv_export_qp_table(s, pict, s->last_picture_ptr, FF_MPV_QSCALE_TYPE_MPEG2);
1756  }
1757  }
1758 
1759  return 1;
1760  } else {
1761  return 0;
1762  }
1763 }
1764 
1766  const uint8_t *buf, int buf_size)
1767 {
1768  Mpeg1Context *s1 = avctx->priv_data;
1769  MpegEncContext *s = &s1->mpeg_enc_ctx;
1770  int width, height;
1771  int i, v, j;
1772 
1773  int ret = init_get_bits8(&s->gb, buf, buf_size);
1774  if (ret < 0)
1775  return ret;
1776 
1777  width = get_bits(&s->gb, 12);
1778  height = get_bits(&s->gb, 12);
1779  if (width == 0 || height == 0) {
1780  av_log(avctx, AV_LOG_WARNING,
1781  "Invalid horizontal or vertical size value.\n");
1783  return AVERROR_INVALIDDATA;
1784  }
1785  s1->aspect_ratio_info = get_bits(&s->gb, 4);
1786  if (s1->aspect_ratio_info == 0) {
1787  av_log(avctx, AV_LOG_ERROR, "aspect ratio has forbidden 0 value\n");
1789  return AVERROR_INVALIDDATA;
1790  }
1791  s1->frame_rate_index = get_bits(&s->gb, 4);
1792  if (s1->frame_rate_index == 0 || s1->frame_rate_index > 13) {
1793  av_log(avctx, AV_LOG_WARNING,
1794  "frame_rate_index %d is invalid\n", s1->frame_rate_index);
1795  s1->frame_rate_index = 1;
1796  }
1797  s->bit_rate = get_bits(&s->gb, 18) * 400LL;
1798  if (check_marker(s->avctx, &s->gb, "in sequence header") == 0) {
1799  return AVERROR_INVALIDDATA;
1800  }
1801 
1802  s->avctx->rc_buffer_size = get_bits(&s->gb, 10) * 1024 * 16;
1803  skip_bits(&s->gb, 1);
1804 
1805  /* get matrix */
1806  if (get_bits1(&s->gb)) {
1807  load_matrix(s, s->chroma_intra_matrix, s->intra_matrix, 1);
1808  } else {
1809  for (i = 0; i < 64; i++) {
1810  j = s->idsp.idct_permutation[i];
1812  s->intra_matrix[j] = v;
1813  s->chroma_intra_matrix[j] = v;
1814  }
1815  }
1816  if (get_bits1(&s->gb)) {
1817  load_matrix(s, s->chroma_inter_matrix, s->inter_matrix, 0);
1818  } else {
1819  for (i = 0; i < 64; i++) {
1820  int j = s->idsp.idct_permutation[i];
1822  s->inter_matrix[j] = v;
1823  s->chroma_inter_matrix[j] = v;
1824  }
1825  }
1826 
1827  if (show_bits(&s->gb, 23) != 0) {
1828  av_log(s->avctx, AV_LOG_ERROR, "sequence header damaged\n");
1829  return AVERROR_INVALIDDATA;
1830  }
1831 
1832  s->width = width;
1833  s->height = height;
1834 
1835  /* We set MPEG-2 parameters so that it emulates MPEG-1. */
1836  s->progressive_sequence = 1;
1837  s->progressive_frame = 1;
1838  s->picture_structure = PICT_FRAME;
1839  s->first_field = 0;
1840  s->frame_pred_frame_dct = 1;
1841  s->chroma_format = 1;
1842  s->codec_id =
1843  s->avctx->codec_id = AV_CODEC_ID_MPEG1VIDEO;
1844  s->out_format = FMT_MPEG1;
1845  if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY)
1846  s->low_delay = 1;
1847 
1848  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1849  av_log(s->avctx, AV_LOG_DEBUG, "vbv buffer: %d, bitrate:%"PRId64", aspect_ratio_info: %d \n",
1850  s->avctx->rc_buffer_size, s->bit_rate, s1->aspect_ratio_info);
1851 
1852  return 0;
1853 }
1854 
1856 {
1857  Mpeg1Context *s1 = avctx->priv_data;
1858  MpegEncContext *s = &s1->mpeg_enc_ctx;
1859  int i, v, ret;
1860 
1861  /* start new MPEG-1 context decoding */
1862  s->out_format = FMT_MPEG1;
1863  if (s1->mpeg_enc_ctx_allocated) {
1865  s1->mpeg_enc_ctx_allocated = 0;
1866  }
1867  s->width = avctx->coded_width;
1868  s->height = avctx->coded_height;
1869  avctx->has_b_frames = 0; // true?
1870  s->low_delay = 1;
1871 
1872  avctx->pix_fmt = mpeg_get_pixelformat(avctx);
1873 
1874  if ((ret = ff_mpv_common_init(s)) < 0)
1875  return ret;
1876  s1->mpeg_enc_ctx_allocated = 1;
1877 
1878  for (i = 0; i < 64; i++) {
1879  int j = s->idsp.idct_permutation[i];
1881  s->intra_matrix[j] = v;
1882  s->chroma_intra_matrix[j] = v;
1883 
1885  s->inter_matrix[j] = v;
1886  s->chroma_inter_matrix[j] = v;
1887  }
1888 
1889  s->progressive_sequence = 1;
1890  s->progressive_frame = 1;
1891  s->picture_structure = PICT_FRAME;
1892  s->first_field = 0;
1893  s->frame_pred_frame_dct = 1;
1894  s->chroma_format = 1;
1895  if (s->codec_tag == AV_RL32("BW10")) {
1896  s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG1VIDEO;
1897  } else {
1898  s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG2VIDEO;
1899  }
1900  s1->save_width = s->width;
1901  s1->save_height = s->height;
1902  s1->save_progressive_seq = s->progressive_sequence;
1903  return 0;
1904 }
1905 
1907  const uint8_t *p, int buf_size)
1908 {
1909  Mpeg1Context *s1 = avctx->priv_data;
1910 
1911  if (buf_size >= 6 &&
1912  p[0] == 'G' && p[1] == 'A' && p[2] == '9' && p[3] == '4' &&
1913  p[4] == 3 && (p[5] & 0x40)) {
1914  /* extract A53 Part 4 CC data */
1915  int cc_count = p[5] & 0x1f;
1916  if (cc_count > 0 && buf_size >= 7 + cc_count * 3) {
1917  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
1918  const uint64_t new_size = (old_size + cc_count
1919  * UINT64_C(3));
1920  int ret;
1921 
1922  if (new_size > 3*A53_MAX_CC_COUNT)
1923  return AVERROR(EINVAL);
1924 
1925  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
1926  if (ret >= 0)
1927  memcpy(s1->a53_buf_ref->data + old_size, p + 7, cc_count * UINT64_C(3));
1928 
1930  }
1931  return 1;
1932  } else if (buf_size >= 2 &&
1933  p[0] == 0x03 && (p[1]&0x7f) == 0x01) {
1934  /* extract SCTE-20 CC data */
1935  GetBitContext gb;
1936  int cc_count = 0;
1937  int i, ret;
1938 
1939  ret = init_get_bits8(&gb, p + 2, buf_size - 2);
1940  if (ret < 0)
1941  return ret;
1942  cc_count = get_bits(&gb, 5);
1943  if (cc_count > 0) {
1944  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
1945  const uint64_t new_size = (old_size + cc_count
1946  * UINT64_C(3));
1947  if (new_size > 3*A53_MAX_CC_COUNT)
1948  return AVERROR(EINVAL);
1949 
1950  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
1951  if (ret >= 0) {
1952  uint8_t field, cc1, cc2;
1953  uint8_t *cap = s1->a53_buf_ref->data;
1954 
1955  memset(s1->a53_buf_ref->data + old_size, 0, cc_count * 3);
1956  for (i = 0; i < cc_count && get_bits_left(&gb) >= 26; i++) {
1957  skip_bits(&gb, 2); // priority
1958  field = get_bits(&gb, 2);
1959  skip_bits(&gb, 5); // line_offset
1960  cc1 = get_bits(&gb, 8);
1961  cc2 = get_bits(&gb, 8);
1962  skip_bits(&gb, 1); // marker
1963 
1964  if (!field) { // forbidden
1965  cap[0] = cap[1] = cap[2] = 0x00;
1966  } else {
1967  field = (field == 2 ? 1 : 0);
1968  if (!s1->mpeg_enc_ctx.top_field_first) field = !field;
1969  cap[0] = 0x04 | field;
1970  cap[1] = ff_reverse[cc1];
1971  cap[2] = ff_reverse[cc2];
1972  }
1973  cap += 3;
1974  }
1975  }
1977  }
1978  return 1;
1979  } else if (buf_size >= 11 &&
1980  p[0] == 'C' && p[1] == 'C' && p[2] == 0x01 && p[3] == 0xf8) {
1981  /* extract DVD CC data
1982  *
1983  * uint32_t user_data_start_code 0x000001B2 (big endian)
1984  * uint16_t user_identifier 0x4343 "CC"
1985  * uint8_t user_data_type_code 0x01
1986  * uint8_t caption_block_size 0xF8
1987  * uint8_t
1988  * bit 7 caption_odd_field_first 1=odd field (CC1/CC2) first 0=even field (CC3/CC4) first
1989  * bit 6 caption_filler 0
1990  * bit 5:1 caption_block_count number of caption blocks (pairs of caption words = frames). Most DVDs use 15 per start of GOP.
1991  * bit 0 caption_extra_field_added 1=one additional caption word
1992  *
1993  * struct caption_field_block {
1994  * uint8_t
1995  * bit 7:1 caption_filler 0x7F (all 1s)
1996  * bit 0 caption_field_odd 1=odd field (this is CC1/CC2) 0=even field (this is CC3/CC4)
1997  * uint8_t caption_first_byte
1998  * uint8_t caption_second_byte
1999  * } caption_block[(caption_block_count * 2) + caption_extra_field_added];
2000  *
2001  * Some DVDs encode caption data for both fields with caption_field_odd=1. The only way to decode the fields
2002  * correctly is to start on the field indicated by caption_odd_field_first and count between odd/even fields.
2003  * Don't assume that the first caption word is the odd field. There do exist MPEG files in the wild that start
2004  * on the even field. There also exist DVDs in the wild that encode an odd field count and the
2005  * caption_extra_field_added/caption_odd_field_first bits change per packet to allow that. */
2006  int cc_count = 0;
2007  int i, ret;
2008  // There is a caption count field in the data, but it is often
2009  // incorrect. So count the number of captions present.
2010  for (i = 5; i + 6 <= buf_size && ((p[i] & 0xfe) == 0xfe); i += 6)
2011  cc_count++;
2012  // Transform the DVD format into A53 Part 4 format
2013  if (cc_count > 0) {
2014  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
2015  const uint64_t new_size = (old_size + cc_count
2016  * UINT64_C(6));
2017  if (new_size > 3*A53_MAX_CC_COUNT)
2018  return AVERROR(EINVAL);
2019 
2020  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
2021  if (ret >= 0) {
2022  uint8_t field1 = !!(p[4] & 0x80);
2023  uint8_t *cap = s1->a53_buf_ref->data;
2024  p += 5;
2025  for (i = 0; i < cc_count; i++) {
2026  cap[0] = (p[0] == 0xff && field1) ? 0xfc : 0xfd;
2027  cap[1] = p[1];
2028  cap[2] = p[2];
2029  cap[3] = (p[3] == 0xff && !field1) ? 0xfc : 0xfd;
2030  cap[4] = p[4];
2031  cap[5] = p[5];
2032  cap += 6;
2033  p += 6;
2034  }
2035  }
2037  }
2038  return 1;
2039  }
2040  return 0;
2041 }
2042 
2044  const uint8_t *p, int buf_size)
2045 {
2046  Mpeg1Context *s = avctx->priv_data;
2047  const uint8_t *buf_end = p + buf_size;
2048  Mpeg1Context *s1 = avctx->priv_data;
2049 
2050 #if 0
2051  int i;
2052  for(i=0; !(!p[i-2] && !p[i-1] && p[i]==1) && i<buf_size; i++){
2053  av_log(avctx, AV_LOG_ERROR, "%c", p[i]);
2054  }
2055  av_log(avctx, AV_LOG_ERROR, "\n");
2056 #endif
2057 
2058  if (buf_size > 29){
2059  int i;
2060  for(i=0; i<20; i++)
2061  if (!memcmp(p+i, "\0TMPGEXS\0", 9)){
2062  s->tmpgexs= 1;
2063  }
2064  }
2065  /* we parse the DTG active format information */
2066  if (buf_end - p >= 5 &&
2067  p[0] == 'D' && p[1] == 'T' && p[2] == 'G' && p[3] == '1') {
2068  int flags = p[4];
2069  p += 5;
2070  if (flags & 0x80) {
2071  /* skip event id */
2072  p += 2;
2073  }
2074  if (flags & 0x40) {
2075  if (buf_end - p < 1)
2076  return;
2077  s1->has_afd = 1;
2078  s1->afd = p[0] & 0x0f;
2079  }
2080  } else if (buf_end - p >= 6 &&
2081  p[0] == 'J' && p[1] == 'P' && p[2] == '3' && p[3] == 'D' &&
2082  p[4] == 0x03) { // S3D_video_format_length
2083  // the 0x7F mask ignores the reserved_bit value
2084  const uint8_t S3D_video_format_type = p[5] & 0x7F;
2085 
2086  if (S3D_video_format_type == 0x03 ||
2087  S3D_video_format_type == 0x04 ||
2088  S3D_video_format_type == 0x08 ||
2089  S3D_video_format_type == 0x23) {
2090 
2091  s1->has_stereo3d = 1;
2092 
2093  switch (S3D_video_format_type) {
2094  case 0x03:
2095  s1->stereo3d.type = AV_STEREO3D_SIDEBYSIDE;
2096  break;
2097  case 0x04:
2098  s1->stereo3d.type = AV_STEREO3D_TOPBOTTOM;
2099  break;
2100  case 0x08:
2101  s1->stereo3d.type = AV_STEREO3D_2D;
2102  break;
2103  case 0x23:
2104  s1->stereo3d.type = AV_STEREO3D_SIDEBYSIDE_QUINCUNX;
2105  break;
2106  }
2107  }
2108  } else if (mpeg_decode_a53_cc(avctx, p, buf_size)) {
2109  return;
2110  }
2111 }
2112 
2114  const uint8_t *buf, int buf_size)
2115 {
2116  Mpeg1Context *s1 = avctx->priv_data;
2117  MpegEncContext *s = &s1->mpeg_enc_ctx;
2118  int broken_link;
2119  int64_t tc;
2120 
2121  int ret = init_get_bits8(&s->gb, buf, buf_size);
2122  if (ret < 0)
2123  return ret;
2124 
2125  tc = s1->timecode_frame_start = get_bits(&s->gb, 25);
2126 
2127  s1->closed_gop = get_bits1(&s->gb);
2128  /* broken_link indicates that after editing the
2129  * reference frames of the first B-Frames after GOP I-Frame
2130  * are missing (open gop) */
2131  broken_link = get_bits1(&s->gb);
2132 
2133  if (s->avctx->debug & FF_DEBUG_PICT_INFO) {
2134  char tcbuf[AV_TIMECODE_STR_SIZE];
2136  av_log(s->avctx, AV_LOG_DEBUG,
2137  "GOP (%s) closed_gop=%d broken_link=%d\n",
2138  tcbuf, s1->closed_gop, broken_link);
2139  }
2140 
2141  return 0;
2142 }
2143 
2144 static int decode_chunks(AVCodecContext *avctx, AVFrame *picture,
2145  int *got_output, const uint8_t *buf, int buf_size)
2146 {
2147  Mpeg1Context *s = avctx->priv_data;
2148  MpegEncContext *s2 = &s->mpeg_enc_ctx;
2149  const uint8_t *buf_ptr = buf;
2150  const uint8_t *buf_end = buf + buf_size;
2151  int ret, input_size;
2152  int last_code = 0, skip_frame = 0;
2153  int picture_start_code_seen = 0;
2154 
2155  for (;;) {
2156  /* find next start code */
2157  uint32_t start_code = -1;
2158  buf_ptr = avpriv_find_start_code(buf_ptr, buf_end, &start_code);
2159  if (start_code > 0x1ff) {
2160  if (!skip_frame) {
2161  if (HAVE_THREADS &&
2162  (avctx->active_thread_type & FF_THREAD_SLICE) &&
2163  !avctx->hwaccel) {
2164  int i;
2165  av_assert0(avctx->thread_count > 1);
2166 
2167  avctx->execute(avctx, slice_decode_thread,
2168  &s2->thread_context[0], NULL,
2169  s->slice_count, sizeof(void *));
2170  for (i = 0; i < s->slice_count; i++)
2171  s2->er.error_count += s2->thread_context[i]->er.error_count;
2172  }
2173 
2174  ret = slice_end(avctx, picture);
2175  if (ret < 0)
2176  return ret;
2177  else if (ret) {
2178  // FIXME: merge with the stuff in mpeg_decode_slice
2179  if (s2->last_picture_ptr || s2->low_delay || s2->pict_type == AV_PICTURE_TYPE_B)
2180  *got_output = 1;
2181  }
2182  }
2183  s2->pict_type = 0;
2184 
2185  if (avctx->err_recognition & AV_EF_EXPLODE && s2->er.error_count)
2186  return AVERROR_INVALIDDATA;
2187 
2188  return FFMAX(0, buf_ptr - buf);
2189  }
2190 
2191  input_size = buf_end - buf_ptr;
2192 
2193  if (avctx->debug & FF_DEBUG_STARTCODE)
2194  av_log(avctx, AV_LOG_DEBUG, "%3"PRIX32" at %"PTRDIFF_SPECIFIER" left %d\n",
2195  start_code, buf_ptr - buf, input_size);
2196 
2197  /* prepare data for next start code */
2198  switch (start_code) {
2199  case SEQ_START_CODE:
2200  if (last_code == 0) {
2201  mpeg1_decode_sequence(avctx, buf_ptr, input_size);
2202  if (buf != avctx->extradata)
2203  s->sync = 1;
2204  } else {
2205  av_log(avctx, AV_LOG_ERROR,
2206  "ignoring SEQ_START_CODE after %X\n", last_code);
2207  if (avctx->err_recognition & AV_EF_EXPLODE)
2208  return AVERROR_INVALIDDATA;
2209  }
2210  break;
2211 
2212  case PICTURE_START_CODE:
2213  if (picture_start_code_seen && s2->picture_structure == PICT_FRAME) {
2214  /* If it's a frame picture, there can't be more than one picture header.
2215  Yet, it does happen and we need to handle it. */
2216  av_log(avctx, AV_LOG_WARNING, "ignoring extra picture following a frame-picture\n");
2217  break;
2218  }
2219  picture_start_code_seen = 1;
2220 
2221  if (buf == avctx->extradata && avctx->codec_tag == AV_RL32("AVmp")) {
2222  av_log(avctx, AV_LOG_WARNING, "ignoring picture start code in AVmp extradata\n");
2223  break;
2224  }
2225 
2226  if (s2->width <= 0 || s2->height <= 0) {
2227  av_log(avctx, AV_LOG_ERROR, "Invalid frame dimensions %dx%d.\n",
2228  s2->width, s2->height);
2229  return AVERROR_INVALIDDATA;
2230  }
2231 
2232  if (s->tmpgexs){
2233  s2->intra_dc_precision= 3;
2234  s2->intra_matrix[0]= 1;
2235  }
2236  if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE) &&
2237  !avctx->hwaccel && s->slice_count) {
2238  int i;
2239 
2240  avctx->execute(avctx, slice_decode_thread,
2241  s2->thread_context, NULL,
2242  s->slice_count, sizeof(void *));
2243  for (i = 0; i < s->slice_count; i++)
2244  s2->er.error_count += s2->thread_context[i]->er.error_count;
2245  s->slice_count = 0;
2246  }
2247  if (last_code == 0 || last_code == SLICE_MIN_START_CODE) {
2248  ret = mpeg_decode_postinit(avctx);
2249  if (ret < 0) {
2250  av_log(avctx, AV_LOG_ERROR,
2251  "mpeg_decode_postinit() failure\n");
2252  return ret;
2253  }
2254 
2255  /* We have a complete image: we try to decompress it. */
2256  if (mpeg1_decode_picture(avctx, buf_ptr, input_size) < 0)
2257  s2->pict_type = 0;
2258  s->first_slice = 1;
2259  last_code = PICTURE_START_CODE;
2260  } else {
2261  av_log(avctx, AV_LOG_ERROR,
2262  "ignoring pic after %X\n", last_code);
2263  if (avctx->err_recognition & AV_EF_EXPLODE)
2264  return AVERROR_INVALIDDATA;
2265  }
2266  break;
2267  case EXT_START_CODE:
2268  ret = init_get_bits8(&s2->gb, buf_ptr, input_size);
2269  if (ret < 0)
2270  return ret;
2271 
2272  switch (get_bits(&s2->gb, 4)) {
2273  case 0x1:
2274  if (last_code == 0) {
2276  } else {
2277  av_log(avctx, AV_LOG_ERROR,
2278  "ignoring seq ext after %X\n", last_code);
2279  if (avctx->err_recognition & AV_EF_EXPLODE)
2280  return AVERROR_INVALIDDATA;
2281  }
2282  break;
2283  case 0x2:
2285  break;
2286  case 0x3:
2288  break;
2289  case 0x7:
2291  break;
2292  case 0x8:
2293  if (last_code == PICTURE_START_CODE) {
2295  if (ret < 0)
2296  return ret;
2297  } else {
2298  av_log(avctx, AV_LOG_ERROR,
2299  "ignoring pic cod ext after %X\n", last_code);
2300  if (avctx->err_recognition & AV_EF_EXPLODE)
2301  return AVERROR_INVALIDDATA;
2302  }
2303  break;
2304  }
2305  break;
2306  case USER_START_CODE:
2307  mpeg_decode_user_data(avctx, buf_ptr, input_size);
2308  break;
2309  case GOP_START_CODE:
2310  if (last_code == 0) {
2311  s2->first_field = 0;
2312  ret = mpeg_decode_gop(avctx, buf_ptr, input_size);
2313  if (ret < 0)
2314  return ret;
2315  s->sync = 1;
2316  } else {
2317  av_log(avctx, AV_LOG_ERROR,
2318  "ignoring GOP_START_CODE after %X\n", last_code);
2319  if (avctx->err_recognition & AV_EF_EXPLODE)
2320  return AVERROR_INVALIDDATA;
2321  }
2322  break;
2323  default:
2325  start_code <= SLICE_MAX_START_CODE && last_code == PICTURE_START_CODE) {
2326  if (s2->progressive_sequence && !s2->progressive_frame) {
2327  s2->progressive_frame = 1;
2328  av_log(s2->avctx, AV_LOG_ERROR,
2329  "interlaced frame in progressive sequence, ignoring\n");
2330  }
2331 
2332  if (s2->picture_structure == 0 ||
2333  (s2->progressive_frame && s2->picture_structure != PICT_FRAME)) {
2334  av_log(s2->avctx, AV_LOG_ERROR,
2335  "picture_structure %d invalid, ignoring\n",
2336  s2->picture_structure);
2337  s2->picture_structure = PICT_FRAME;
2338  }
2339 
2340  if (s2->progressive_sequence && !s2->frame_pred_frame_dct)
2341  av_log(s2->avctx, AV_LOG_WARNING, "invalid frame_pred_frame_dct\n");
2342 
2343  if (s2->picture_structure == PICT_FRAME) {
2344  s2->first_field = 0;
2345  s2->v_edge_pos = 16 * s2->mb_height;
2346  } else {
2347  s2->first_field ^= 1;
2348  s2->v_edge_pos = 8 * s2->mb_height;
2349  memset(s2->mbskip_table, 0, s2->mb_stride * s2->mb_height);
2350  }
2351  }
2353  start_code <= SLICE_MAX_START_CODE && last_code != 0) {
2354  const int field_pic = s2->picture_structure != PICT_FRAME;
2355  int mb_y = start_code - SLICE_MIN_START_CODE;
2356  last_code = SLICE_MIN_START_CODE;
2357  if (s2->codec_id != AV_CODEC_ID_MPEG1VIDEO && s2->mb_height > 2800/16)
2358  mb_y += (*buf_ptr&0xE0)<<2;
2359 
2360  mb_y <<= field_pic;
2361  if (s2->picture_structure == PICT_BOTTOM_FIELD)
2362  mb_y++;
2363 
2364  if (buf_end - buf_ptr < 2) {
2365  av_log(s2->avctx, AV_LOG_ERROR, "slice too small\n");
2366  return AVERROR_INVALIDDATA;
2367  }
2368 
2369  if (mb_y >= s2->mb_height) {
2370  av_log(s2->avctx, AV_LOG_ERROR,
2371  "slice below image (%d >= %d)\n", mb_y, s2->mb_height);
2372  return AVERROR_INVALIDDATA;
2373  }
2374 
2375  if (!s2->last_picture_ptr) {
2376  /* Skip B-frames if we do not have reference frames and
2377  * GOP is not closed. */
2378  if (s2->pict_type == AV_PICTURE_TYPE_B) {
2379  if (!s->closed_gop) {
2380  skip_frame = 1;
2381  av_log(s2->avctx, AV_LOG_DEBUG,
2382  "Skipping B slice due to open GOP\n");
2383  break;
2384  }
2385  }
2386  }
2387  if (s2->pict_type == AV_PICTURE_TYPE_I || (s2->avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL))
2388  s->sync = 1;
2389  if (!s2->next_picture_ptr) {
2390  /* Skip P-frames if we do not have a reference frame or
2391  * we have an invalid header. */
2392  if (s2->pict_type == AV_PICTURE_TYPE_P && !s->sync) {
2393  skip_frame = 1;
2394  av_log(s2->avctx, AV_LOG_DEBUG,
2395  "Skipping P slice due to !sync\n");
2396  break;
2397  }
2398  }
2399  if ((avctx->skip_frame >= AVDISCARD_NONREF &&
2400  s2->pict_type == AV_PICTURE_TYPE_B) ||
2401  (avctx->skip_frame >= AVDISCARD_NONKEY &&
2402  s2->pict_type != AV_PICTURE_TYPE_I) ||
2403  avctx->skip_frame >= AVDISCARD_ALL) {
2404  skip_frame = 1;
2405  break;
2406  }
2407 
2408  if (!s->mpeg_enc_ctx_allocated)
2409  break;
2410 
2411  if (s2->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
2412  if (mb_y < avctx->skip_top ||
2413  mb_y >= s2->mb_height - avctx->skip_bottom)
2414  break;
2415  }
2416 
2417  if (!s2->pict_type) {
2418  av_log(avctx, AV_LOG_ERROR, "Missing picture start code\n");
2419  if (avctx->err_recognition & AV_EF_EXPLODE)
2420  return AVERROR_INVALIDDATA;
2421  break;
2422  }
2423 
2424  if (s->first_slice) {
2425  skip_frame = 0;
2426  s->first_slice = 0;
2427  if ((ret = mpeg_field_start(s2, buf, buf_size)) < 0)
2428  return ret;
2429  }
2430  if (!s2->current_picture_ptr) {
2431  av_log(avctx, AV_LOG_ERROR,
2432  "current_picture not initialized\n");
2433  return AVERROR_INVALIDDATA;
2434  }
2435 
2436  if (HAVE_THREADS &&
2437  (avctx->active_thread_type & FF_THREAD_SLICE) &&
2438  !avctx->hwaccel) {
2439  int threshold = (s2->mb_height * s->slice_count +
2440  s2->slice_context_count / 2) /
2441  s2->slice_context_count;
2442  av_assert0(avctx->thread_count > 1);
2443  if (threshold <= mb_y) {
2444  MpegEncContext *thread_context = s2->thread_context[s->slice_count];
2445 
2446  thread_context->start_mb_y = mb_y;
2447  thread_context->end_mb_y = s2->mb_height;
2448  if (s->slice_count) {
2449  s2->thread_context[s->slice_count - 1]->end_mb_y = mb_y;
2450  ret = ff_update_duplicate_context(thread_context, s2);
2451  if (ret < 0)
2452  return ret;
2453  }
2454  ret = init_get_bits8(&thread_context->gb, buf_ptr, input_size);
2455  if (ret < 0)
2456  return ret;
2457  s->slice_count++;
2458  }
2459  buf_ptr += 2; // FIXME add minimum number of bytes per slice
2460  } else {
2461  ret = mpeg_decode_slice(s2, mb_y, &buf_ptr, input_size);
2462  emms_c();
2463 
2464  if (ret < 0) {
2465  if (avctx->err_recognition & AV_EF_EXPLODE)
2466  return ret;
2467  if (s2->resync_mb_x >= 0 && s2->resync_mb_y >= 0)
2468  ff_er_add_slice(&s2->er, s2->resync_mb_x,
2469  s2->resync_mb_y, s2->mb_x, s2->mb_y,
2471  } else {
2472  ff_er_add_slice(&s2->er, s2->resync_mb_x,
2473  s2->resync_mb_y, s2->mb_x - 1, s2->mb_y,
2475  }
2476  }
2477  }
2478  break;
2479  }
2480  }
2481 }
2482 
2483 static int mpeg_decode_frame(AVCodecContext *avctx, AVFrame *picture,
2484  int *got_output, AVPacket *avpkt)
2485 {
2486  const uint8_t *buf = avpkt->data;
2487  int ret;
2488  int buf_size = avpkt->size;
2489  Mpeg1Context *s = avctx->priv_data;
2490  MpegEncContext *s2 = &s->mpeg_enc_ctx;
2491 
2492  if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == SEQ_END_CODE)) {
2493  /* special case for last picture */
2494  if (s2->low_delay == 0 && s2->next_picture_ptr) {
2495  int ret = av_frame_ref(picture, s2->next_picture_ptr->f);
2496  if (ret < 0)
2497  return ret;
2498 
2499  s2->next_picture_ptr = NULL;
2500 
2501  *got_output = 1;
2502  }
2503  return buf_size;
2504  }
2505 
2506  if (s->mpeg_enc_ctx_allocated == 0 && ( s2->codec_tag == AV_RL32("VCR2")
2507  || s2->codec_tag == AV_RL32("BW10")
2508  ))
2509  vcr2_init_sequence(avctx);
2510 
2511  s->slice_count = 0;
2512 
2513  if (avctx->extradata && !s->extradata_decoded) {
2514  ret = decode_chunks(avctx, picture, got_output,
2515  avctx->extradata, avctx->extradata_size);
2516  if (*got_output) {
2517  av_log(avctx, AV_LOG_ERROR, "picture in extradata\n");
2518  av_frame_unref(picture);
2519  *got_output = 0;
2520  }
2521  s->extradata_decoded = 1;
2522  if (ret < 0 && (avctx->err_recognition & AV_EF_EXPLODE)) {
2523  s2->current_picture_ptr = NULL;
2524  return ret;
2525  }
2526  }
2527 
2528  ret = decode_chunks(avctx, picture, got_output, buf, buf_size);
2529  if (ret<0 || *got_output) {
2530  s2->current_picture_ptr = NULL;
2531 
2532  if (s->timecode_frame_start != -1 && *got_output) {
2533  char tcbuf[AV_TIMECODE_STR_SIZE];
2534  AVFrameSideData *tcside = av_frame_new_side_data(picture,
2536  sizeof(int64_t));
2537  if (!tcside)
2538  return AVERROR(ENOMEM);
2539  memcpy(tcside->data, &s->timecode_frame_start, sizeof(int64_t));
2540 
2541  av_timecode_make_mpeg_tc_string(tcbuf, s->timecode_frame_start);
2542  av_dict_set(&picture->metadata, "timecode", tcbuf, 0);
2543 
2544  s->timecode_frame_start = -1;
2545  }
2546  }
2547 
2548  return ret;
2549 }
2550 
2551 static void flush(AVCodecContext *avctx)
2552 {
2553  Mpeg1Context *s = avctx->priv_data;
2554 
2555  s->sync = 0;
2556  s->closed_gop = 0;
2557 
2558  av_buffer_unref(&s->a53_buf_ref);
2559  ff_mpeg_flush(avctx);
2560 }
2561 
2563 {
2564  Mpeg1Context *s = avctx->priv_data;
2565 
2566  if (s->mpeg_enc_ctx_allocated)
2567  ff_mpv_common_end(&s->mpeg_enc_ctx);
2568  av_buffer_unref(&s->a53_buf_ref);
2569  return 0;
2570 }
2571 
2573  .p.name = "mpeg1video",
2574  CODEC_LONG_NAME("MPEG-1 video"),
2575  .p.type = AVMEDIA_TYPE_VIDEO,
2576  .p.id = AV_CODEC_ID_MPEG1VIDEO,
2577  .priv_data_size = sizeof(Mpeg1Context),
2579  .close = mpeg_decode_end,
2581  .p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
2583  .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
2584  .flush = flush,
2585  .p.max_lowres = 3,
2586  UPDATE_THREAD_CONTEXT(mpeg_decode_update_thread_context),
2587  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2588 #if CONFIG_MPEG1_NVDEC_HWACCEL
2589  HWACCEL_NVDEC(mpeg1),
2590 #endif
2591 #if CONFIG_MPEG1_VDPAU_HWACCEL
2592  HWACCEL_VDPAU(mpeg1),
2593 #endif
2594 #if CONFIG_MPEG1_VIDEOTOOLBOX_HWACCEL
2595  HWACCEL_VIDEOTOOLBOX(mpeg1),
2596 #endif
2597  NULL
2598  },
2599 };
2600 
2602  .p.name = "mpeg2video",
2603  CODEC_LONG_NAME("MPEG-2 video"),
2604  .p.type = AVMEDIA_TYPE_VIDEO,
2605  .p.id = AV_CODEC_ID_MPEG2VIDEO,
2606  .priv_data_size = sizeof(Mpeg1Context),
2608  .close = mpeg_decode_end,
2610  .p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
2612  .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
2613  .flush = flush,
2614  .p.max_lowres = 3,
2616  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2617 #if CONFIG_MPEG2_DXVA2_HWACCEL
2618  HWACCEL_DXVA2(mpeg2),
2619 #endif
2620 #if CONFIG_MPEG2_D3D11VA_HWACCEL
2621  HWACCEL_D3D11VA(mpeg2),
2622 #endif
2623 #if CONFIG_MPEG2_D3D11VA2_HWACCEL
2624  HWACCEL_D3D11VA2(mpeg2),
2625 #endif
2626 #if CONFIG_MPEG2_D3D12VA_HWACCEL
2627  HWACCEL_D3D12VA(mpeg2),
2628 #endif
2629 #if CONFIG_MPEG2_NVDEC_HWACCEL
2630  HWACCEL_NVDEC(mpeg2),
2631 #endif
2632 #if CONFIG_MPEG2_VAAPI_HWACCEL
2633  HWACCEL_VAAPI(mpeg2),
2634 #endif
2635 #if CONFIG_MPEG2_VDPAU_HWACCEL
2636  HWACCEL_VDPAU(mpeg2),
2637 #endif
2638 #if CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL
2639  HWACCEL_VIDEOTOOLBOX(mpeg2),
2640 #endif
2641  NULL
2642  },
2643 };
2644 
2645 //legacy decoder
2647  .p.name = "mpegvideo",
2648  CODEC_LONG_NAME("MPEG-1 video"),
2649  .p.type = AVMEDIA_TYPE_VIDEO,
2650  .p.id = AV_CODEC_ID_MPEG2VIDEO,
2651  .priv_data_size = sizeof(Mpeg1Context),
2653  .close = mpeg_decode_end,
2655  .p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
2657  .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
2658  .flush = flush,
2659  .p.max_lowres = 3,
2660 };
2661 
2662 typedef struct IPUContext {
2664 
2665  int flags;
2666  DECLARE_ALIGNED(32, int16_t, block)[6][64];
2667 } IPUContext;
2668 
2670  int *got_frame, AVPacket *avpkt)
2671 {
2672  IPUContext *s = avctx->priv_data;
2673  MpegEncContext *m = &s->m;
2674  GetBitContext *gb = &m->gb;
2675  int ret;
2676 
2677  // Check for minimal intra MB size (considering mb header, luma & chroma dc VLC, ac EOB VLC)
2678  if (avpkt->size*8LL < (avctx->width+15)/16 * ((avctx->height+15)/16) * (2 + 3*4 + 2*2 + 2*6))
2679  return AVERROR_INVALIDDATA;
2680 
2681  ret = ff_get_buffer(avctx, frame, 0);
2682  if (ret < 0)
2683  return ret;
2684 
2685  ret = init_get_bits8(gb, avpkt->data, avpkt->size);
2686  if (ret < 0)
2687  return ret;
2688 
2689  s->flags = get_bits(gb, 8);
2690  m->intra_dc_precision = s->flags & 3;
2691  m->q_scale_type = !!(s->flags & 0x40);
2692  m->intra_vlc_format = !!(s->flags & 0x20);
2693  m->alternate_scan = !!(s->flags & 0x10);
2694 
2695  if (s->flags & 0x10) {
2698  } else {
2701  }
2702 
2703  m->last_dc[0] = m->last_dc[1] = m->last_dc[2] = 1 << (7 + (s->flags & 3));
2704  m->qscale = 1;
2705 
2706  for (int y = 0; y < avctx->height; y += 16) {
2707  int intraquant;
2708 
2709  for (int x = 0; x < avctx->width; x += 16) {
2710  if (x || y) {
2711  if (!get_bits1(gb))
2712  return AVERROR_INVALIDDATA;
2713  }
2714  if (get_bits1(gb)) {
2715  intraquant = 0;
2716  } else {
2717  if (!get_bits1(gb))
2718  return AVERROR_INVALIDDATA;
2719  intraquant = 1;
2720  }
2721 
2722  if (s->flags & 4)
2723  skip_bits1(gb);
2724 
2725  if (intraquant)
2726  m->qscale = mpeg_get_qscale(m);
2727 
2728  memset(s->block, 0, sizeof(s->block));
2729 
2730  for (int n = 0; n < 6; n++) {
2731  if (s->flags & 0x80) {
2733  m->intra_matrix,
2735  m->last_dc, s->block[n],
2736  n, m->qscale);
2737  if (ret >= 0)
2738  m->block_last_index[n] = ret;
2739  } else {
2740  ret = mpeg2_decode_block_intra(m, s->block[n], n);
2741  }
2742 
2743  if (ret < 0)
2744  return ret;
2745  }
2746 
2747  m->idsp.idct_put(frame->data[0] + y * frame->linesize[0] + x,
2748  frame->linesize[0], s->block[0]);
2749  m->idsp.idct_put(frame->data[0] + y * frame->linesize[0] + x + 8,
2750  frame->linesize[0], s->block[1]);
2751  m->idsp.idct_put(frame->data[0] + (y + 8) * frame->linesize[0] + x,
2752  frame->linesize[0], s->block[2]);
2753  m->idsp.idct_put(frame->data[0] + (y + 8) * frame->linesize[0] + x + 8,
2754  frame->linesize[0], s->block[3]);
2755  m->idsp.idct_put(frame->data[1] + (y >> 1) * frame->linesize[1] + (x >> 1),
2756  frame->linesize[1], s->block[4]);
2757  m->idsp.idct_put(frame->data[2] + (y >> 1) * frame->linesize[2] + (x >> 1),
2758  frame->linesize[2], s->block[5]);
2759  }
2760  }
2761 
2762  align_get_bits(gb);
2763  if (get_bits_left(gb) != 32)
2764  return AVERROR_INVALIDDATA;
2765 
2768  *got_frame = 1;
2769 
2770  return avpkt->size;
2771 }
2772 
2774 {
2775  IPUContext *s = avctx->priv_data;
2776  MpegEncContext *m = &s->m;
2777 
2778  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
2779 
2780  ff_mpv_decode_init(m, avctx);
2782 
2783  for (int i = 0; i < 64; i++) {
2784  int j = m->idsp.idct_permutation[i];
2786  m->intra_matrix[j] = v;
2787  m->chroma_intra_matrix[j] = v;
2788  }
2789 
2790  for (int i = 0; i < 64; i++) {
2791  int j = m->idsp.idct_permutation[i];
2793  m->inter_matrix[j] = v;
2794  m->chroma_inter_matrix[j] = v;
2795  }
2796 
2797  return 0;
2798 }
2799 
2801 {
2802  IPUContext *s = avctx->priv_data;
2803 
2804  ff_mpv_common_end(&s->m);
2805 
2806  return 0;
2807 }
2808 
2810  .p.name = "ipu",
2811  CODEC_LONG_NAME("IPU Video"),
2812  .p.type = AVMEDIA_TYPE_VIDEO,
2813  .p.id = AV_CODEC_ID_IPU,
2814  .priv_data_size = sizeof(IPUContext),
2815  .init = ipu_decode_init,
2817  .close = ipu_decode_end,
2818  .p.capabilities = AV_CODEC_CAP_DR1,
2819  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
2820 };
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:38
vcr2_init_sequence
static int vcr2_init_sequence(AVCodecContext *avctx)
Definition: mpeg12dec.c:1855
HWACCEL_D3D12VA
#define HWACCEL_D3D12VA(codec)
Definition: hwconfig.h:80
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:681
hwconfig.h
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1427
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
MB_TYPE_L0
#define MB_TYPE_L0
Definition: mpegutils.h:60
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:261
Mpeg1Context::has_afd
int has_afd
Definition: mpeg12dec.c:74
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_TIMECODE_STR_SIZE
#define AV_TIMECODE_STR_SIZE
Definition: timecode.h:33
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:260
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
ff_mb_pat_vlc
VLCElem ff_mb_pat_vlc[512]
Definition: mpeg12.c:126
level
uint8_t level
Definition: svq3.c:204
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
Mpeg1Context::a53_buf_ref
AVBufferRef * a53_buf_ref
Definition: mpeg12dec.c:72
ff_mpeg2_aspect
const AVRational ff_mpeg2_aspect[16]
Definition: mpeg12data.c:380
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
show_bits_long
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
Definition: get_bits.h:495
mpeg_decode_a53_cc
static int mpeg_decode_a53_cc(AVCodecContext *avctx, const uint8_t *p, int buf_size)
Definition: mpeg12dec.c:1906
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:694
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegutils.h:117
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
decode_slice
static int decode_slice(AVCodecContext *c, void *arg)
Definition: ffv1dec.c:255
ff_mpv_export_qp_table
int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const Picture *p, int qp_type)
Definition: mpegvideo_dec.c:505
AV_STEREO3D_SIDEBYSIDE_QUINCUNX
@ AV_STEREO3D_SIDEBYSIDE_QUINCUNX
Views are next to each other, but when upscaling apply a checkerboard pattern.
Definition: stereo3d.h:114
FF_MPV_QSCALE_TYPE_MPEG2
#define FF_MPV_QSCALE_TYPE_MPEG2
Definition: mpegvideodec.h:41
ff_frame_new_side_data_from_buf
int ff_frame_new_side_data_from_buf(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef **buf, AVFrameSideData **psd)
Similar to ff_frame_new_side_data, but using an existing buffer ref.
Definition: decode.c:1837
mem_internal.h
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1219
mpeg_decode_frame
static int mpeg_decode_frame(AVCodecContext *avctx, AVFrame *picture, int *got_output, AVPacket *avpkt)
Definition: mpeg12dec.c:2483
MpegEncContext::gb
GetBitContext gb
Definition: mpegvideo.h:425
AV_EF_COMPLIANT
#define AV_EF_COMPLIANT
consider all spec non compliances as errors
Definition: defs.h:55
SEQ_END_CODE
#define SEQ_END_CODE
Definition: mpeg12.h:28
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:704
check_scantable_index
#define check_scantable_index(ctx, x)
Definition: mpeg12dec.c:143
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:59
MT_FIELD
#define MT_FIELD
Definition: mpeg12dec.c:415
EXT_START_CODE
#define EXT_START_CODE
Definition: cavs.h:39
MV_TYPE_16X8
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:263
av_div_q
AVRational av_div_q(AVRational b, AVRational c)
Divide one rational by another.
Definition: rational.c:88
AVPanScan
Pan Scan area.
Definition: defs.h:240
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1420
SLICE_MAX_START_CODE
#define SLICE_MAX_START_CODE
Definition: cavs.h:38
MB_TYPE_16x8
#define MB_TYPE_16x8
Definition: mpegutils.h:48
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:266
ipu_decode_init
static av_cold int ipu_decode_init(AVCodecContext *avctx)
Definition: mpeg12dec.c:2773
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:490
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:220
ff_mpv_report_decode_progress
void ff_mpv_report_decode_progress(MpegEncContext *s)
Definition: mpegvideo_dec.c:564
w
uint8_t w
Definition: llviddspenc.c:38
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:64
ff_mpegvideo_decoder
const FFCodec ff_mpegvideo_decoder
Definition: mpeg12dec.c:2646
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:522
ipu_decode_end
static av_cold int ipu_decode_end(AVCodecContext *avctx)
Definition: mpeg12dec.c:2800
mpeg_decode_mb
static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpeg12dec.c:420
Mpeg1Context::closed_gop
int closed_gop
Definition: mpeg12dec.c:82
mpeg2_decode_block_intra
static int mpeg2_decode_block_intra(MpegEncContext *s, int16_t *block, int n)
Definition: mpeg12dec.c:321
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:66
ff_reverse
const uint8_t ff_reverse[256]
Definition: reverse.c:23
MpegEncContext::last_dc
int last_dc[3]
last DC values for MPEG-1
Definition: mpegvideo.h:175
MB_TYPE_16x16
#define MB_TYPE_16x16
Definition: mpegutils.h:47
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:254
FFCodec
Definition: codec_internal.h:127
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:37
FF_HW_SIMPLE_CALL
#define FF_HW_SIMPLE_CALL(avctx, function)
Definition: hwaccel_internal.h:174
ff_er_add_slice
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
Definition: error_resilience.c:822
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:845
reverse.h
mpegvideo.h
MB_TYPE_L1
#define MB_TYPE_L1
Definition: mpegutils.h:61
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:225
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:612
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Mpeg1Context::first_slice
int first_slice
Definition: mpeg12dec.c:84
ER_DC_END
#define ER_DC_END
Definition: error_resilience.h:34
mpeg_decode_postinit
static int mpeg_decode_postinit(AVCodecContext *avctx)
Definition: mpeg12dec.c:899
mpegutils.h
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:94
ER_MV_ERROR
#define ER_MV_ERROR
Definition: error_resilience.h:32
thread.h
SEQ_START_CODE
#define SEQ_START_CODE
Definition: mpeg12.h:29
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1397
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
MV_TYPE_DMV
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:265
GET_CACHE
#define GET_CACHE(name, gb)
Definition: get_bits.h:263
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:381
ff_mpeg2_rl_vlc
RL_VLC_ELEM ff_mpeg2_rl_vlc[674]
Definition: mpeg12.c:129
Mpeg1Context::save_aspect
AVRational save_aspect
Definition: mpeg12dec.c:77
MpegEncContext::intra_scantable
ScanTable intra_scantable
Definition: mpegvideo.h:81
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:560
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:64
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
MB_TYPE_ZERO_MV
#define MB_TYPE_ZERO_MV
Definition: mpeg12dec.c:89
MT_DMV
#define MT_DMV
Definition: mpeg12dec.c:418
ff_mpv_reconstruct_mb
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo_dec.c:1008
ff_mbincr_vlc
VLCElem ff_mbincr_vlc[538]
Definition: mpeg12.c:123
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
decode_chunks
static int decode_chunks(AVCodecContext *avctx, AVFrame *picture, int *got_output, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:2144
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1819
mpeg_decode_quant_matrix_extension
static void mpeg_decode_quant_matrix_extension(MpegEncContext *s)
Definition: mpeg12dec.c:1201
AVCodecContext::thread_count
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:1582
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:52
wrap
#define wrap(func)
Definition: neontest.h:65
timecode.h
GetBitContext
Definition: get_bits.h:108
AV_EF_BITSTREAM
#define AV_EF_BITSTREAM
detect bitstream specification deviations
Definition: defs.h:49
USES_LIST
#define USES_LIST(a, list)
Definition: mpegutils.h:92
slice_decode_thread
static int slice_decode_thread(AVCodecContext *c, void *arg)
Definition: mpeg12dec.c:1664
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:502
IDCTDSPContext::idct_put
void(* idct_put)(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
block -> idct -> clip to unsigned 8 bit -> dest.
Definition: idctdsp.h:62
MB_TYPE_CBP
#define MB_TYPE_CBP
Definition: mpegutils.h:64
val
static double val(void *priv, double ch)
Definition: aeval.c:78
Mpeg1Context::tmpgexs
int tmpgexs
Definition: mpeg12dec.c:83
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:72
AV_CODEC_FLAG_LOW_DELAY
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:334
mpeg12_pixfmt_list_444
static enum AVPixelFormat mpeg12_pixfmt_list_444[]
Definition: mpeg12dec.c:871
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:633
ff_print_debug_info
void ff_print_debug_info(const MpegEncContext *s, const Picture *p, AVFrame *pict)
Definition: mpegvideo_dec.c:498
mpeg1_decode_sequence
static int mpeg1_decode_sequence(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:1765
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
HAS_CBP
#define HAS_CBP(a)
Definition: mpegutils.h:94
AVRational::num
int num
Numerator.
Definition: rational.h:59
GOP_START_CODE
#define GOP_START_CODE
Definition: mpeg12.h:30
IPUContext
Definition: mpeg12dec.c:2662
mpeg1_hwaccel_pixfmt_list_420
static enum AVPixelFormat mpeg1_hwaccel_pixfmt_list_420[]
Definition: mpeg12dec.c:828
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:782
mpeg12.h
mpegvideodec.h
ff_mpeg2video_decoder
const FFCodec ff_mpeg2video_decoder
Definition: mpeg12dec.c:2601
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
Mpeg1Context::frame_rate_index
unsigned frame_rate_index
Definition: mpeg12dec.c:80
ipu_decode_frame
static int ipu_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: mpeg12dec.c:2669
ER_DC_ERROR
#define ER_DC_ERROR
Definition: error_resilience.h:31
av_cold
#define av_cold
Definition: attributes.h:90
mpeg2_hwaccel_pixfmt_list_420
static enum AVPixelFormat mpeg2_hwaccel_pixfmt_list_420[]
Definition: mpeg12dec.c:839
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:591
mpeg1_decode_picture
static int mpeg1_decode_picture(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:1030
flush
static void flush(AVCodecContext *avctx)
Definition: mpeg12dec.c:2551
Mpeg1Context::save_progressive_seq
int save_progressive_seq
Definition: mpeg12dec.c:78
emms_c
#define emms_c()
Definition: emms.h:63
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:188
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:524
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:723
A53_MAX_CC_COUNT
#define A53_MAX_CC_COUNT
Definition: mpeg12dec.c:63
ff_er_frame_end
void ff_er_frame_end(ERContext *s, int *decode_error_flags)
Indicate that a frame has finished decoding and perform error concealment in case it has been enabled...
Definition: error_resilience.c:892
Mpeg1Context::repeat_field
int repeat_field
Definition: mpeg12dec.c:68
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:287
stereo3d.h
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:134
s
#define s(width, name)
Definition: cbs_vp9.c:198
ff_mv_vlc
VLCElem ff_mv_vlc[266]
Definition: mpeg12.c:118
ff_mpeg1_aspect
const float ff_mpeg1_aspect[16]
Definition: mpeg12data.c:359
s1
#define s1
Definition: regdef.h:38
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:1717
Mpeg1Context::mpeg_enc_ctx_allocated
int mpeg_enc_ctx_allocated
Definition: mpeg12dec.c:67
SHOW_SBITS
#define SHOW_SBITS(name, gb, num)
Definition: get_bits.h:260
ff_mpeg_er_frame_start
void ff_mpeg_er_frame_start(MpegEncContext *s)
Definition: mpeg_er.c:47
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
Mpeg1Context::aspect_ratio_info
unsigned aspect_ratio_info
Definition: mpeg12dec.c:76
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
mpeg_decode_sequence_display_extension
static void mpeg_decode_sequence_display_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1120
Mpeg1Context::pan_scan
AVPanScan pan_scan
Definition: mpeg12dec.c:69
get_sbits
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:320
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
ctx
AVFormatContext * ctx
Definition: movenc.c:48
PICT_TOP_FIELD
#define PICT_TOP_FIELD
Definition: mpegutils.h:36
decode.h
mpeg12_pixfmt_list_422
static enum AVPixelFormat mpeg12_pixfmt_list_422[]
Definition: mpeg12dec.c:866
SKIP_BITS
#define SKIP_BITS(name, gb, num)
Definition: get_bits.h:241
IS_INTRA
#define IS_INTRA(x, y)
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1292
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
frame
static AVFrame * frame
Definition: demux_decode.c:54
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:455
arg
const char * arg
Definition: jacosubdec.c:67
rl_vlc
static const VLCElem * rl_vlc[2]
Definition: mobiclip.c:277
if
if(ret)
Definition: filter_design.txt:179
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:219
MB_PTYPE_VLC_BITS
#define MB_PTYPE_VLC_BITS
Definition: mpeg12vlc.h:39
Mpeg1Context::save_width
int save_width
Definition: mpeg12dec.c:78
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:140
NULL
#define NULL
Definition: coverity.c:32
run
uint8_t run
Definition: svq3.c:203
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:695
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
ER_AC_ERROR
#define ER_AC_ERROR
Definition: error_resilience.h:30
SLICE_MIN_START_CODE
#define SLICE_MIN_START_CODE
Definition: mpeg12.h:32
hwaccel_internal.h
Mpeg1Context::sync
int sync
Definition: mpeg12dec.c:81
AVCHROMA_LOC_LEFT
@ AVCHROMA_LOC_LEFT
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
Definition: pixfmt.h:704
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AVCHROMA_LOC_TOPLEFT
@ AVCHROMA_LOC_TOPLEFT
ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2.
Definition: pixfmt.h:706
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:495
mpeg_decode_picture_display_extension
static void mpeg_decode_picture_display_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1144
MpegEncContext::inter_matrix
uint16_t inter_matrix[64]
Definition: mpegvideo.h:297
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:388
profiles.h
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:247
MB_TYPE_QUANT
#define MB_TYPE_QUANT
Definition: mpegutils.h:63
avpriv_find_start_code
const uint8_t * avpriv_find_start_code(const uint8_t *p, const uint8_t *end, uint32_t *state)
lowres
static int lowres
Definition: ffplay.c:333
ff_mpeg1_rl_vlc
RL_VLC_ELEM ff_mpeg1_rl_vlc[680]
Definition: mpeg12.c:128
MB_BTYPE_VLC_BITS
#define MB_BTYPE_VLC_BITS
Definition: mpeg12vlc.h:40
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:281
AV_PIX_FMT_D3D12
@ AV_PIX_FMT_D3D12
Hardware surfaces for Direct3D 12.
Definition: pixfmt.h:440
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
mpeg12codecs.h
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:652
AV_FRAME_DATA_AFD
@ AV_FRAME_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:90
AVCodecContext::level
int level
Encoding level descriptor.
Definition: avcodec.h:1783
Mpeg1Context::save_height
int save_height
Definition: mpeg12dec.c:78
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
MpegEncContext::idsp
IDCTDSPContext idsp
Definition: mpegvideo.h:217
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
startcode.h
s2
#define s2
Definition: regdef.h:39
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:218
check_marker
static int check_marker(void *logctx, GetBitContext *s, const char *msg)
Definition: mpegvideodec.h:73
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:509
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:442
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1568
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:365
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:322
AVPacket::size
int size
Definition: packet.h:523
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:106
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:194
AV_CODEC_ID_IPU
@ AV_CODEC_ID_IPU
Definition: codec_id.h:306
AV_FRAME_DATA_PANSCAN
@ AV_FRAME_DATA_PANSCAN
The data is the AVPanScan struct defined in libavcodec.
Definition: frame.h:53
RL_VLC_ELEM
Definition: vlc.h:53
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:312
MT_FRAME
#define MT_FRAME
Definition: mpeg12dec.c:416
codec_internal.h
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:109
shift
static int shift(int a, int b)
Definition: bonk.c:262
IPUContext::flags
int flags
Definition: mpeg12dec.c:2665
MpegEncContext::intra_matrix
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:295
ff_mpeg1_clean_buffers
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:106
ff_mpeg1video_decoder
const FFCodec ff_mpeg1video_decoder
Definition: mpeg12dec.c:2572
ff_frame_new_side_data
int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, size_t size, AVFrameSideData **psd)
Wrapper around av_frame_new_side_data, which rejects side data overridden by the demuxer.
Definition: decode.c:1818
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: codec_internal.h:54
AVFrameSideData::data
uint8_t * data
Definition: frame.h:248
MB_TYPE_SKIP
#define MB_TYPE_SKIP
Definition: mpegutils.h:55
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1594
ff_mpeg_draw_horiz_band
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo_dec.c:535
PICTURE_START_CODE
#define PICTURE_START_CODE
Definition: mpeg12.h:31
USER_START_CODE
#define USER_START_CODE
Definition: cavs.h:40
AVCodecContext::skip_bottom
int skip_bottom
Number of macroblock rows at the bottom which are skipped.
Definition: avcodec.h:1847
AVCodecHWConfigInternal
Definition: hwconfig.h:25
ff_mpeg1_default_intra_matrix
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:31
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:164
ff_mpv_frame_start
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo_dec.c:295
MB_TYPE_INTERLACED
#define MB_TYPE_INTERLACED
Definition: mpegutils.h:51
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:177
ff_mpeg_flush
void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo_dec.c:543
height
#define height
Mpeg1Context::has_stereo3d
int has_stereo3d
Definition: mpeg12dec.c:71
mpeg_decode_init
static av_cold int mpeg_decode_init(AVCodecContext *avctx)
Definition: mpeg12dec.c:785
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:114
HWACCEL_D3D11VA
#define HWACCEL_D3D11VA(codec)
Definition: hwconfig.h:78
mpegvideodata.h
attributes.h
ff_mpeg1_decode_block_intra
int ff_mpeg1_decode_block_intra(GetBitContext *gb, const uint16_t *quant_matrix, const uint8_t *scantable, int last_dc[3], int16_t *block, int index, int qscale)
Definition: mpeg12.c:172
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:264
skip_bits1
static void skip_bits1(GetBitContext *s)
Definition: get_bits.h:413
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:336
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:68
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:126
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1593
ff_mpeg2_video_profiles
const AVProfile ff_mpeg2_video_profiles[]
Definition: profiles.c:113
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:194
emms.h
MB_TYPE_L0L1
#define MB_TYPE_L0L1
Definition: mpegutils.h:62
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:305
ff_init_scantable
av_cold void ff_init_scantable(const uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: mpegvideo.c:321
MpegEncContext::block_last_index
int block_last_index[12]
last non zero coefficient in block
Definition: mpegvideo.h:72
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
MpegEncContext::chroma_inter_matrix
uint16_t chroma_inter_matrix[64]
Definition: mpegvideo.h:298
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
AV_CODEC_FLAG2_SHOW_ALL
#define AV_CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
Definition: avcodec.h:380
AVCodecContext::properties
unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1795
ff_alternate_vertical_scan
const uint8_t ff_alternate_vertical_scan[64]
Definition: mpegvideodata.c:63
btype2mb_type
static const uint32_t btype2mb_type[11]
Definition: mpeg12dec.c:101
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:523
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:371
internal.h
ff_mpv_decode_init
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
Definition: mpegvideo_dec.c:45
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:76
IS_QUANT
#define IS_QUANT(a)
Definition: mpegutils.h:88
ff_mpeg12_init_vlcs
av_cold void ff_mpeg12_init_vlcs(void)
Definition: mpeg12.c:164
FF_DEBUG_STARTCODE
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:1404
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_d2q
AVRational av_d2q(double d, int max)
Convert a double precision floating point number to a rational.
Definition: rational.c:106
MB_PAT_VLC_BITS
#define MB_PAT_VLC_BITS
Definition: mpeg12vlc.h:38
mpeg1_decode_block_inter
static int mpeg1_decode_block_inter(MpegEncContext *s, int16_t *block, int n)
Definition: mpeg12dec.c:152
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:534
ptype2mb_type
static const uint32_t ptype2mb_type[7]
Definition: mpeg12dec.c:91
IPUContext::m
MpegEncContext m
Definition: mpeg12dec.c:2663
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
MpegEncContext::intra_vlc_format
int intra_vlc_format
Definition: mpegvideo.h:444
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:702
MAX_INDEX
#define MAX_INDEX
Definition: mpeg12dec.c:142
AVCodecContext::height
int height
Definition: avcodec.h:618
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:666
HWACCEL_VIDEOTOOLBOX
#define HWACCEL_VIDEOTOOLBOX(codec)
Definition: hwconfig.h:74
Mpeg1Context::stereo3d
AVStereo3D stereo3d
Definition: mpeg12dec.c:70
idctdsp.h
avcodec.h
av_cmp_q
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
GET_RL_VLC
#define GET_RL_VLC(level, run, name, gb, table, bits, max_depth, need_update)
Definition: get_bits.h:606
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ff_mpeg12_frame_rate_tab
const AVRational ff_mpeg12_frame_rate_tab[]
Definition: mpeg12framerate.c:24
mpeg_decode_gop
static int mpeg_decode_gop(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:2113
ret
ret
Definition: filter_design.txt:187
AV_EF_AGGRESSIVE
#define AV_EF_AGGRESSIVE
consider things that a sane encoder/muxer should not do as an error
Definition: defs.h:56
pred
static const float pred[4]
Definition: siprdata.h:259
AV_FRAME_DATA_GOP_TIMECODE
@ AV_FRAME_DATA_GOP_TIMECODE
The GOP timecode in 25 bit timecode format.
Definition: frame.h:125
ff_mpeg1_default_non_intra_matrix
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:42
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:561
TEX_VLC_BITS
#define TEX_VLC_BITS
Definition: dvdec.c:147
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
mpeg_get_pixelformat
static enum AVPixelFormat mpeg_get_pixelformat(AVCodecContext *avctx)
Definition: mpeg12dec.c:876
AV_CODEC_FLAG2_CHUNKS
#define AV_CODEC_FLAG2_CHUNKS
Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries.
Definition: avcodec.h:371
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
mpeg12data.h
mpeg_field_start
static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:1276
ff_mpeg_update_thread_context
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo_dec.c:62
skip_1stop_8data_bits
static int skip_1stop_8data_bits(GetBitContext *gb)
Definition: get_bits.h:699
AVCodecContext
main external API structure.
Definition: avcodec.h:445
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1601
av_timecode_make_mpeg_tc_string
char * av_timecode_make_mpeg_tc_string(char *buf, uint32_t tc25bit)
Get the timecode string from the 25-bit timecode format (MPEG GOP format).
Definition: timecode.c:167
MpegEncContext::intra_dc_precision
int intra_dc_precision
Definition: mpegvideo.h:438
AVCodecContext::execute
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:1612
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:259
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:281
mpeg12dec.h
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:705
AVRational::den
int den
Denominator.
Definition: rational.h:60
error_resilience.h
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
FF_HW_CALL
#define FF_HW_CALL(avctx, function,...)
Definition: hwaccel_internal.h:171
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1639
AVFrame::metadata
AVDictionary * metadata
metadata.
Definition: frame.h:658
sign_extend
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:133
ff_mpv_frame_end
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo_dec.c:490
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
Mpeg1Context::slice_count
int slice_count
Definition: mpeg12dec.c:75
AVCodecContext::ticks_per_frame
attribute_deprecated int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:576
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
FF_CODEC_PROPERTY_CLOSED_CAPTIONS
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
Definition: avcodec.h:1797
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
av_buffer_realloc
int av_buffer_realloc(AVBufferRef **pbuf, size_t size)
Reallocate a given buffer.
Definition: buffer.c:183
ff_mb_ptype_vlc
VLCElem ff_mb_ptype_vlc[64]
Definition: mpeg12.c:124
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1396
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:633
get_dmv
static int get_dmv(MpegEncContext *s)
Definition: mpeg12dec.c:406
tc
#define tc
Definition: regdef.h:69
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
mpeg_decode_end
static av_cold int mpeg_decode_end(AVCodecContext *avctx)
Definition: mpeg12dec.c:2562
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
MpegEncContext::inter_scantable
ScanTable inter_scantable
if inter == intra then intra should be used to reduce the cache usage
Definition: mpegvideo.h:76
IDCTDSPContext::idct_permutation
uint8_t idct_permutation[64]
IDCT input permutation.
Definition: idctdsp.h:86
ff_ipu_decoder
const FFCodec ff_ipu_decoder
Definition: mpeg12dec.c:2809
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:34
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:246
ER_MV_END
#define ER_MV_END
Definition: error_resilience.h:35
MpegEncContext::q_scale_type
int q_scale_type
Definition: mpegvideo.h:442
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:470
Mpeg1Context::mpeg_enc_ctx
MpegEncContext mpeg_enc_ctx
Definition: mpeg12dec.c:66
ff_tlog
#define ff_tlog(ctx,...)
Definition: internal.h:153
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:257
AVPacket
This structure stores compressed data.
Definition: packet.h:499
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
ScanTable::permutated
uint8_t permutated[64]
Definition: mpegvideo.h:60
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:88
mpeg_get_qscale
static int mpeg_get_qscale(MpegEncContext *s)
Definition: mpegvideodec.h:64
mpeg_decode_sequence_extension
static void mpeg_decode_sequence_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1077
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:70
mpeg_er.h
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:618
int32_t
int32_t
Definition: audioconvert.c:56
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:474
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:385
AV_CODEC_CAP_DRAW_HORIZ_BAND
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: codec.h:44
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
Mpeg1Context::frame_rate_ext
AVRational frame_rate_ext
Definition: mpeg12dec.c:79
mpeg_decode_motion
static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred)
Definition: mpeg12dec.c:116
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
IPUContext::block
int16_t block[6][64]
Definition: mpeg12dec.c:2666
ff_mb_btype_vlc
VLCElem ff_mb_btype_vlc[64]
Definition: mpeg12.c:125
mpeg_decode_user_data
static void mpeg_decode_user_data(AVCodecContext *avctx, const uint8_t *p, int buf_size)
Definition: mpeg12dec.c:2043
h
h
Definition: vp9dsp_template.c:2038
MpegEncContext::end_mb_y
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:143
ER_AC_END
#define ER_AC_END
Definition: error_resilience.h:33
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:173
av_image_check_sar
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
Definition: imgutils.c:323
MV_VLC_BITS
#define MV_VLC_BITS
Definition: mpeg12vlc.h:34
Mpeg1Context::timecode_frame_start
int64_t timecode_frame_start
Definition: mpeg12dec.c:86
MpegEncContext::start_mb_y
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:142
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:215
MpegEncContext::alternate_scan
int alternate_scan
Definition: mpegvideo.h:445
DECODE_SLICE_OK
#define DECODE_SLICE_OK
Definition: mpeg12dec.c:1380
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
DECODE_SLICE_ERROR
#define DECODE_SLICE_ERROR
Definition: mpeg12dec.c:1379
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:67
load_matrix
static int load_matrix(MpegEncContext *s, uint16_t matrix0[64], uint16_t matrix1[64], int intra)
Definition: mpeg12dec.c:1178
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:642
decode_dc
static int decode_dc(GetBitContext *gb, int component)
Definition: mpeg12dec.h:28
Mpeg1Context::afd
uint8_t afd
Definition: mpeg12dec.c:73
Mpeg1Context
Definition: mpeg12dec.c:65
MpegEncContext::chroma_intra_matrix
uint16_t chroma_intra_matrix[64]
Definition: mpegvideo.h:296
mpeg_decode_picture_coding_extension
static int mpeg_decode_picture_coding_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1215
Mpeg1Context::extradata_decoded
int extradata_decoded
Definition: mpeg12dec.c:85
mpeg2_decode_block_non_intra
static int mpeg2_decode_block_non_intra(MpegEncContext *s, int16_t *block, int n)
Definition: mpeg12dec.c:236
MB_TYPE_INTRA
#define MB_TYPE_INTRA
Definition: mpegutils.h:66
MBINCR_VLC_BITS
#define MBINCR_VLC_BITS
Definition: mpeg12vlc.h:37
mpeg_decode_slice
static int mpeg_decode_slice(MpegEncContext *s, int mb_y, const uint8_t **buf, int buf_size)
Decode a slice.
Definition: mpeg12dec.c:1388