FFmpeg
mpeg12dec.c
Go to the documentation of this file.
1 /*
2  * MPEG-1/2 decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2002-2013 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * MPEG-1/2 decoder
26  */
27 
28 #include "config_components.h"
29 
30 #define UNCHECKED_BITSTREAM_READER 1
31 #include <inttypes.h>
32 
33 #include "libavutil/attributes.h"
34 #include "libavutil/emms.h"
35 #include "libavutil/imgutils.h"
36 #include "libavutil/internal.h"
37 #include "libavutil/mem_internal.h"
38 #include "libavutil/reverse.h"
39 #include "libavutil/stereo3d.h"
40 #include "libavutil/timecode.h"
41 
42 #include "avcodec.h"
43 #include "codec_internal.h"
44 #include "decode.h"
45 #include "error_resilience.h"
46 #include "hwaccel_internal.h"
47 #include "hwconfig.h"
48 #include "idctdsp.h"
49 #include "internal.h"
50 #include "mpeg_er.h"
51 #include "mpeg12.h"
52 #include "mpeg12codecs.h"
53 #include "mpeg12data.h"
54 #include "mpeg12dec.h"
55 #include "mpegutils.h"
56 #include "mpegvideo.h"
57 #include "mpegvideodata.h"
58 #include "mpegvideodec.h"
59 #include "profiles.h"
60 #include "startcode.h"
61 #include "thread.h"
62 
63 #define A53_MAX_CC_COUNT 2000
64 
70 };
71 
72 typedef struct Mpeg1Context {
74  int repeat_field; /* true if we must repeat the field */
75  AVPanScan pan_scan; /* some temporary storage for the panscan */
80  uint8_t afd;
81  int has_afd;
86  AVRational frame_rate_ext; /* MPEG-2 specific framerate modificator */
87  unsigned frame_rate_index;
88  int sync; /* Did we reach a sync point like a GOP/SEQ/KEYFrame? */
90  int tmpgexs;
93  int64_t timecode_frame_start; /*< GOP timecode frame start number, in non drop frame format */
94 } Mpeg1Context;
95 
96 #define MB_TYPE_ZERO_MV 0x20000000
97 
98 static const uint32_t ptype2mb_type[7] = {
101  MB_TYPE_L0,
106 };
107 
108 static const uint32_t btype2mb_type[11] = {
110  MB_TYPE_L1,
112  MB_TYPE_L0,
114  MB_TYPE_L0L1,
120 };
121 
122 /* as H.263, but only 17 codes */
123 static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred)
124 {
125  int code, sign, val, shift;
126 
127  code = get_vlc2(&s->gb, ff_mv_vlc, MV_VLC_BITS, 2);
128  if (code == 0)
129  return pred;
130  if (code < 0)
131  return 0xffff;
132 
133  sign = get_bits1(&s->gb);
134  shift = fcode - 1;
135  val = code;
136  if (shift) {
137  val = (val - 1) << shift;
138  val |= get_bits(&s->gb, shift);
139  val++;
140  }
141  if (sign)
142  val = -val;
143  val += pred;
144 
145  /* modulo decoding */
146  return sign_extend(val, 5 + shift);
147 }
148 
149 #define MAX_INDEX (64 - 1)
150 #define check_scantable_index(ctx, x) \
151  do { \
152  if ((x) > MAX_INDEX) { \
153  av_log(ctx->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", \
154  ctx->mb_x, ctx->mb_y); \
155  return AVERROR_INVALIDDATA; \
156  } \
157  } while (0)
158 
160  int16_t *block, int n)
161 {
162  int level, i, j, run;
163  uint8_t *const scantable = s->intra_scantable.permutated;
164  const uint16_t *quant_matrix = s->inter_matrix;
165  const int qscale = s->qscale;
166 
167  {
168  OPEN_READER(re, &s->gb);
169  i = -1;
170  // special case for first coefficient, no need to add second VLC table
171  UPDATE_CACHE(re, &s->gb);
172  if (((int32_t) GET_CACHE(re, &s->gb)) < 0) {
173  level = (3 * qscale * quant_matrix[0]) >> 5;
174  level = (level - 1) | 1;
175  if (GET_CACHE(re, &s->gb) & 0x40000000)
176  level = -level;
177  block[0] = level;
178  i++;
179  SKIP_BITS(re, &s->gb, 2);
180  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
181  goto end;
182  }
183  /* now quantify & encode AC coefficients */
184  for (;;) {
185  GET_RL_VLC(level, run, re, &s->gb, ff_mpeg1_rl_vlc,
186  TEX_VLC_BITS, 2, 0);
187 
188  if (level != 0) {
189  i += run;
190  if (i > MAX_INDEX)
191  break;
192  j = scantable[i];
193  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
194  level = (level - 1) | 1;
195  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
196  SHOW_SBITS(re, &s->gb, 1);
197  SKIP_BITS(re, &s->gb, 1);
198  } else {
199  /* escape */
200  run = SHOW_UBITS(re, &s->gb, 6) + 1;
201  LAST_SKIP_BITS(re, &s->gb, 6);
202  UPDATE_CACHE(re, &s->gb);
203  level = SHOW_SBITS(re, &s->gb, 8);
204  SKIP_BITS(re, &s->gb, 8);
205  if (level == -128) {
206  level = SHOW_UBITS(re, &s->gb, 8) - 256;
207  SKIP_BITS(re, &s->gb, 8);
208  } else if (level == 0) {
209  level = SHOW_UBITS(re, &s->gb, 8);
210  SKIP_BITS(re, &s->gb, 8);
211  }
212  i += run;
213  if (i > MAX_INDEX)
214  break;
215  j = scantable[i];
216  if (level < 0) {
217  level = -level;
218  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
219  level = (level - 1) | 1;
220  level = -level;
221  } else {
222  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
223  level = (level - 1) | 1;
224  }
225  }
226 
227  block[j] = level;
228  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
229  break;
230  UPDATE_CACHE(re, &s->gb);
231  }
232 end:
233  LAST_SKIP_BITS(re, &s->gb, 2);
234  CLOSE_READER(re, &s->gb);
235  }
236 
238 
239  s->block_last_index[n] = i;
240  return 0;
241 }
242 
244  int16_t *block, int n)
245 {
246  int level, i, j, run;
247  uint8_t *const scantable = s->intra_scantable.permutated;
248  const uint16_t *quant_matrix;
249  const int qscale = s->qscale;
250  int mismatch;
251 
252  mismatch = 1;
253 
254  {
255  OPEN_READER(re, &s->gb);
256  i = -1;
257  if (n < 4)
258  quant_matrix = s->inter_matrix;
259  else
260  quant_matrix = s->chroma_inter_matrix;
261 
262  // Special case for first coefficient, no need to add second VLC table.
263  UPDATE_CACHE(re, &s->gb);
264  if (((int32_t) GET_CACHE(re, &s->gb)) < 0) {
265  level = (3 * qscale * quant_matrix[0]) >> 5;
266  if (GET_CACHE(re, &s->gb) & 0x40000000)
267  level = -level;
268  block[0] = level;
269  mismatch ^= level;
270  i++;
271  SKIP_BITS(re, &s->gb, 2);
272  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
273  goto end;
274  }
275 
276  /* now quantify & encode AC coefficients */
277  for (;;) {
278  GET_RL_VLC(level, run, re, &s->gb, ff_mpeg1_rl_vlc,
279  TEX_VLC_BITS, 2, 0);
280 
281  if (level != 0) {
282  i += run;
283  if (i > MAX_INDEX)
284  break;
285  j = scantable[i];
286  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
287  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
288  SHOW_SBITS(re, &s->gb, 1);
289  SKIP_BITS(re, &s->gb, 1);
290  } else {
291  /* escape */
292  run = SHOW_UBITS(re, &s->gb, 6) + 1;
293  LAST_SKIP_BITS(re, &s->gb, 6);
294  UPDATE_CACHE(re, &s->gb);
295  level = SHOW_SBITS(re, &s->gb, 12);
296  SKIP_BITS(re, &s->gb, 12);
297 
298  i += run;
299  if (i > MAX_INDEX)
300  break;
301  j = scantable[i];
302  if (level < 0) {
303  level = ((-level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
304  level = -level;
305  } else {
306  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
307  }
308  }
309 
310  mismatch ^= level;
311  block[j] = level;
312  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
313  break;
314  UPDATE_CACHE(re, &s->gb);
315  }
316 end:
317  LAST_SKIP_BITS(re, &s->gb, 2);
318  CLOSE_READER(re, &s->gb);
319  }
320  block[63] ^= (mismatch & 1);
321 
323 
324  s->block_last_index[n] = i;
325  return 0;
326 }
327 
329  int16_t *block, int n)
330 {
331  int level, dc, diff, i, j, run;
332  int component;
333  const RL_VLC_ELEM *rl_vlc;
334  uint8_t *const scantable = s->intra_scantable.permutated;
335  const uint16_t *quant_matrix;
336  const int qscale = s->qscale;
337  int mismatch;
338 
339  /* DC coefficient */
340  if (n < 4) {
341  quant_matrix = s->intra_matrix;
342  component = 0;
343  } else {
344  quant_matrix = s->chroma_intra_matrix;
345  component = (n & 1) + 1;
346  }
347  diff = decode_dc(&s->gb, component);
348  dc = s->last_dc[component];
349  dc += diff;
350  s->last_dc[component] = dc;
351  block[0] = dc * (1 << (3 - s->intra_dc_precision));
352  ff_tlog(s->avctx, "dc=%d\n", block[0]);
353  mismatch = block[0] ^ 1;
354  i = 0;
355  if (s->intra_vlc_format)
357  else
359 
360  {
361  OPEN_READER(re, &s->gb);
362  /* now quantify & encode AC coefficients */
363  for (;;) {
364  UPDATE_CACHE(re, &s->gb);
365  GET_RL_VLC(level, run, re, &s->gb, rl_vlc,
366  TEX_VLC_BITS, 2, 0);
367 
368  if (level == 127) {
369  break;
370  } else if (level != 0) {
371  i += run;
372  if (i > MAX_INDEX)
373  break;
374  j = scantable[i];
375  level = (level * qscale * quant_matrix[j]) >> 4;
376  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
377  SHOW_SBITS(re, &s->gb, 1);
378  LAST_SKIP_BITS(re, &s->gb, 1);
379  } else {
380  /* escape */
381  run = SHOW_UBITS(re, &s->gb, 6) + 1;
382  SKIP_BITS(re, &s->gb, 6);
383  level = SHOW_SBITS(re, &s->gb, 12);
384  LAST_SKIP_BITS(re, &s->gb, 12);
385  i += run;
386  if (i > MAX_INDEX)
387  break;
388  j = scantable[i];
389  if (level < 0) {
390  level = (-level * qscale * quant_matrix[j]) >> 4;
391  level = -level;
392  } else {
393  level = (level * qscale * quant_matrix[j]) >> 4;
394  }
395  }
396 
397  mismatch ^= level;
398  block[j] = level;
399  }
400  CLOSE_READER(re, &s->gb);
401  }
402  block[63] ^= mismatch & 1;
403 
405 
406  s->block_last_index[n] = i;
407  return 0;
408 }
409 
410 /******************************************/
411 /* decoding */
412 
413 static inline int get_dmv(MpegEncContext *s)
414 {
415  if (get_bits1(&s->gb))
416  return 1 - (get_bits1(&s->gb) << 1);
417  else
418  return 0;
419 }
420 
421 /* motion type (for MPEG-2) */
422 #define MT_FIELD 1
423 #define MT_FRAME 2
424 #define MT_16X8 2
425 #define MT_DMV 3
426 
427 static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
428 {
429  int i, j, k, cbp, val, mb_type, motion_type;
430  const int mb_block_count = 4 + (1 << s->chroma_format);
431  int ret;
432 
433  ff_tlog(s->avctx, "decode_mb: x=%d y=%d\n", s->mb_x, s->mb_y);
434 
435  av_assert2(s->mb_skipped == 0);
436 
437  if (s->mb_skip_run-- != 0) {
438  if (s->pict_type == AV_PICTURE_TYPE_P) {
439  s->mb_skipped = 1;
440  s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride] =
442  } else {
443  int mb_type;
444 
445  if (s->mb_x)
446  mb_type = s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1];
447  else
448  // FIXME not sure if this is allowed in MPEG at all
449  mb_type = s->current_picture.mb_type[s->mb_width + (s->mb_y - 1) * s->mb_stride - 1];
450  if (IS_INTRA(mb_type)) {
451  av_log(s->avctx, AV_LOG_ERROR, "skip with previntra\n");
452  return AVERROR_INVALIDDATA;
453  }
454  s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride] =
455  mb_type | MB_TYPE_SKIP;
456 
457  if ((s->mv[0][0][0] | s->mv[0][0][1] | s->mv[1][0][0] | s->mv[1][0][1]) == 0)
458  s->mb_skipped = 1;
459  }
460 
461  return 0;
462  }
463 
464  switch (s->pict_type) {
465  default:
466  case AV_PICTURE_TYPE_I:
467  if (get_bits1(&s->gb) == 0) {
468  if (get_bits1(&s->gb) == 0) {
469  av_log(s->avctx, AV_LOG_ERROR,
470  "Invalid mb type in I-frame at %d %d\n",
471  s->mb_x, s->mb_y);
472  return AVERROR_INVALIDDATA;
473  }
474  mb_type = MB_TYPE_QUANT | MB_TYPE_INTRA;
475  } else {
476  mb_type = MB_TYPE_INTRA;
477  }
478  break;
479  case AV_PICTURE_TYPE_P:
480  mb_type = get_vlc2(&s->gb, ff_mb_ptype_vlc, MB_PTYPE_VLC_BITS, 1);
481  if (mb_type < 0) {
482  av_log(s->avctx, AV_LOG_ERROR,
483  "Invalid mb type in P-frame at %d %d\n", s->mb_x, s->mb_y);
484  return AVERROR_INVALIDDATA;
485  }
486  mb_type = ptype2mb_type[mb_type];
487  break;
488  case AV_PICTURE_TYPE_B:
489  mb_type = get_vlc2(&s->gb, ff_mb_btype_vlc, MB_BTYPE_VLC_BITS, 1);
490  if (mb_type < 0) {
491  av_log(s->avctx, AV_LOG_ERROR,
492  "Invalid mb type in B-frame at %d %d\n", s->mb_x, s->mb_y);
493  return AVERROR_INVALIDDATA;
494  }
495  mb_type = btype2mb_type[mb_type];
496  break;
497  }
498  ff_tlog(s->avctx, "mb_type=%x\n", mb_type);
499 // motion_type = 0; /* avoid warning */
500  if (IS_INTRA(mb_type)) {
501  s->bdsp.clear_blocks(s->block[0]);
502 
503  if (!s->chroma_y_shift)
504  s->bdsp.clear_blocks(s->block[6]);
505 
506  /* compute DCT type */
507  // FIXME: add an interlaced_dct coded var?
508  if (s->picture_structure == PICT_FRAME &&
509  !s->frame_pred_frame_dct)
510  s->interlaced_dct = get_bits1(&s->gb);
511 
512  if (IS_QUANT(mb_type))
513  s->qscale = mpeg_get_qscale(s);
514 
515  if (s->concealment_motion_vectors) {
516  /* just parse them */
517  if (s->picture_structure != PICT_FRAME)
518  skip_bits1(&s->gb); /* field select */
519 
520  s->mv[0][0][0] =
521  s->last_mv[0][0][0] =
522  s->last_mv[0][1][0] = mpeg_decode_motion(s, s->mpeg_f_code[0][0],
523  s->last_mv[0][0][0]);
524  s->mv[0][0][1] =
525  s->last_mv[0][0][1] =
526  s->last_mv[0][1][1] = mpeg_decode_motion(s, s->mpeg_f_code[0][1],
527  s->last_mv[0][0][1]);
528 
529  check_marker(s->avctx, &s->gb, "after concealment_motion_vectors");
530  } else {
531  /* reset mv prediction */
532  memset(s->last_mv, 0, sizeof(s->last_mv));
533  }
534  s->mb_intra = 1;
535 
536  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
537  for (i = 0; i < mb_block_count; i++)
538  if ((ret = mpeg2_decode_block_intra(s, *s->pblocks[i], i)) < 0)
539  return ret;
540  } else {
541  for (i = 0; i < 6; i++) {
543  s->intra_matrix,
544  s->intra_scantable.permutated,
545  s->last_dc, *s->pblocks[i],
546  i, s->qscale);
547  if (ret < 0) {
548  av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n",
549  s->mb_x, s->mb_y);
550  return ret;
551  }
552 
553  s->block_last_index[i] = ret;
554  }
555  }
556  } else {
557  if (mb_type & MB_TYPE_ZERO_MV) {
558  av_assert2(mb_type & MB_TYPE_CBP);
559 
560  s->mv_dir = MV_DIR_FORWARD;
561  if (s->picture_structure == PICT_FRAME) {
562  if (s->picture_structure == PICT_FRAME
563  && !s->frame_pred_frame_dct)
564  s->interlaced_dct = get_bits1(&s->gb);
565  s->mv_type = MV_TYPE_16X16;
566  } else {
567  s->mv_type = MV_TYPE_FIELD;
568  mb_type |= MB_TYPE_INTERLACED;
569  s->field_select[0][0] = s->picture_structure - 1;
570  }
571 
572  if (IS_QUANT(mb_type))
573  s->qscale = mpeg_get_qscale(s);
574 
575  s->last_mv[0][0][0] = 0;
576  s->last_mv[0][0][1] = 0;
577  s->last_mv[0][1][0] = 0;
578  s->last_mv[0][1][1] = 0;
579  s->mv[0][0][0] = 0;
580  s->mv[0][0][1] = 0;
581  } else {
582  av_assert2(mb_type & MB_TYPE_L0L1);
583  // FIXME decide if MBs in field pictures are MB_TYPE_INTERLACED
584  /* get additional motion vector type */
585  if (s->picture_structure == PICT_FRAME && s->frame_pred_frame_dct) {
586  motion_type = MT_FRAME;
587  } else {
588  motion_type = get_bits(&s->gb, 2);
589  if (s->picture_structure == PICT_FRAME && HAS_CBP(mb_type))
590  s->interlaced_dct = get_bits1(&s->gb);
591  }
592 
593  if (IS_QUANT(mb_type))
594  s->qscale = mpeg_get_qscale(s);
595 
596  /* motion vectors */
597  s->mv_dir = (mb_type >> 13) & 3;
598  ff_tlog(s->avctx, "motion_type=%d\n", motion_type);
599  switch (motion_type) {
600  case MT_FRAME: /* or MT_16X8 */
601  if (s->picture_structure == PICT_FRAME) {
602  mb_type |= MB_TYPE_16x16;
603  s->mv_type = MV_TYPE_16X16;
604  for (i = 0; i < 2; i++) {
605  if (USES_LIST(mb_type, i)) {
606  /* MT_FRAME */
607  s->mv[i][0][0] =
608  s->last_mv[i][0][0] =
609  s->last_mv[i][1][0] =
610  mpeg_decode_motion(s, s->mpeg_f_code[i][0],
611  s->last_mv[i][0][0]);
612  s->mv[i][0][1] =
613  s->last_mv[i][0][1] =
614  s->last_mv[i][1][1] =
615  mpeg_decode_motion(s, s->mpeg_f_code[i][1],
616  s->last_mv[i][0][1]);
617  /* full_pel: only for MPEG-1 */
618  if (s->full_pel[i]) {
619  s->mv[i][0][0] *= 2;
620  s->mv[i][0][1] *= 2;
621  }
622  }
623  }
624  } else {
625  mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
626  s->mv_type = MV_TYPE_16X8;
627  for (i = 0; i < 2; i++) {
628  if (USES_LIST(mb_type, i)) {
629  /* MT_16X8 */
630  for (j = 0; j < 2; j++) {
631  s->field_select[i][j] = get_bits1(&s->gb);
632  for (k = 0; k < 2; k++) {
633  val = mpeg_decode_motion(s, s->mpeg_f_code[i][k],
634  s->last_mv[i][j][k]);
635  s->last_mv[i][j][k] = val;
636  s->mv[i][j][k] = val;
637  }
638  }
639  }
640  }
641  }
642  break;
643  case MT_FIELD:
644  s->mv_type = MV_TYPE_FIELD;
645  if (s->picture_structure == PICT_FRAME) {
646  mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
647  for (i = 0; i < 2; i++) {
648  if (USES_LIST(mb_type, i)) {
649  for (j = 0; j < 2; j++) {
650  s->field_select[i][j] = get_bits1(&s->gb);
651  val = mpeg_decode_motion(s, s->mpeg_f_code[i][0],
652  s->last_mv[i][j][0]);
653  s->last_mv[i][j][0] = val;
654  s->mv[i][j][0] = val;
655  ff_tlog(s->avctx, "fmx=%d\n", val);
656  val = mpeg_decode_motion(s, s->mpeg_f_code[i][1],
657  s->last_mv[i][j][1] >> 1);
658  s->last_mv[i][j][1] = 2 * val;
659  s->mv[i][j][1] = val;
660  ff_tlog(s->avctx, "fmy=%d\n", val);
661  }
662  }
663  }
664  } else {
665  av_assert0(!s->progressive_sequence);
666  mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED;
667  for (i = 0; i < 2; i++) {
668  if (USES_LIST(mb_type, i)) {
669  s->field_select[i][0] = get_bits1(&s->gb);
670  for (k = 0; k < 2; k++) {
671  val = mpeg_decode_motion(s, s->mpeg_f_code[i][k],
672  s->last_mv[i][0][k]);
673  s->last_mv[i][0][k] = val;
674  s->last_mv[i][1][k] = val;
675  s->mv[i][0][k] = val;
676  }
677  }
678  }
679  }
680  break;
681  case MT_DMV:
682  if (s->progressive_sequence){
683  av_log(s->avctx, AV_LOG_ERROR, "MT_DMV in progressive_sequence\n");
684  return AVERROR_INVALIDDATA;
685  }
686  s->mv_type = MV_TYPE_DMV;
687  for (i = 0; i < 2; i++) {
688  if (USES_LIST(mb_type, i)) {
689  int dmx, dmy, mx, my, m;
690  const int my_shift = s->picture_structure == PICT_FRAME;
691 
692  mx = mpeg_decode_motion(s, s->mpeg_f_code[i][0],
693  s->last_mv[i][0][0]);
694  s->last_mv[i][0][0] = mx;
695  s->last_mv[i][1][0] = mx;
696  dmx = get_dmv(s);
697  my = mpeg_decode_motion(s, s->mpeg_f_code[i][1],
698  s->last_mv[i][0][1] >> my_shift);
699  dmy = get_dmv(s);
700 
701 
702  s->last_mv[i][0][1] = my * (1 << my_shift);
703  s->last_mv[i][1][1] = my * (1 << my_shift);
704 
705  s->mv[i][0][0] = mx;
706  s->mv[i][0][1] = my;
707  s->mv[i][1][0] = mx; // not used
708  s->mv[i][1][1] = my; // not used
709 
710  if (s->picture_structure == PICT_FRAME) {
711  mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED;
712 
713  // m = 1 + 2 * s->top_field_first;
714  m = s->top_field_first ? 1 : 3;
715 
716  /* top -> top pred */
717  s->mv[i][2][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
718  s->mv[i][2][1] = ((my * m + (my > 0)) >> 1) + dmy - 1;
719  m = 4 - m;
720  s->mv[i][3][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
721  s->mv[i][3][1] = ((my * m + (my > 0)) >> 1) + dmy + 1;
722  } else {
723  mb_type |= MB_TYPE_16x16;
724 
725  s->mv[i][2][0] = ((mx + (mx > 0)) >> 1) + dmx;
726  s->mv[i][2][1] = ((my + (my > 0)) >> 1) + dmy;
727  if (s->picture_structure == PICT_TOP_FIELD)
728  s->mv[i][2][1]--;
729  else
730  s->mv[i][2][1]++;
731  }
732  }
733  }
734  break;
735  default:
736  av_log(s->avctx, AV_LOG_ERROR,
737  "00 motion_type at %d %d\n", s->mb_x, s->mb_y);
738  return AVERROR_INVALIDDATA;
739  }
740  }
741 
742  s->mb_intra = 0;
743  if (HAS_CBP(mb_type)) {
744  s->bdsp.clear_blocks(s->block[0]);
745 
746  cbp = get_vlc2(&s->gb, ff_mb_pat_vlc, MB_PAT_VLC_BITS, 1);
747  if (mb_block_count > 6) {
748  cbp *= 1 << mb_block_count - 6;
749  cbp |= get_bits(&s->gb, mb_block_count - 6);
750  s->bdsp.clear_blocks(s->block[6]);
751  }
752  if (cbp <= 0) {
753  av_log(s->avctx, AV_LOG_ERROR,
754  "invalid cbp %d at %d %d\n", cbp, s->mb_x, s->mb_y);
755  return AVERROR_INVALIDDATA;
756  }
757 
758  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
759  cbp <<= 12 - mb_block_count;
760 
761  for (i = 0; i < mb_block_count; i++) {
762  if (cbp & (1 << 11)) {
763  if ((ret = mpeg2_decode_block_non_intra(s, *s->pblocks[i], i)) < 0)
764  return ret;
765  } else {
766  s->block_last_index[i] = -1;
767  }
768  cbp += cbp;
769  }
770  } else {
771  for (i = 0; i < 6; i++) {
772  if (cbp & 32) {
773  if ((ret = mpeg1_decode_block_inter(s, *s->pblocks[i], i)) < 0)
774  return ret;
775  } else {
776  s->block_last_index[i] = -1;
777  }
778  cbp += cbp;
779  }
780  }
781  } else {
782  for (i = 0; i < 12; i++)
783  s->block_last_index[i] = -1;
784  }
785  }
786 
787  s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride] = mb_type;
788 
789  return 0;
790 }
791 
793 {
794  Mpeg1Context *s = avctx->priv_data;
795  MpegEncContext *s2 = &s->mpeg_enc_ctx;
796 
797  if ( avctx->codec_tag != AV_RL32("VCR2")
798  && avctx->codec_tag != AV_RL32("BW10"))
799  avctx->coded_width = avctx->coded_height = 0; // do not trust dimensions from input
800  ff_mpv_decode_init(s2, avctx);
801 
803 
804  s2->chroma_format = 1;
805  s->repeat_field = 0;
806  avctx->color_range = AVCOL_RANGE_MPEG;
807  return 0;
808 }
809 
810 #if HAVE_THREADS
811 static int mpeg_decode_update_thread_context(AVCodecContext *avctx,
812  const AVCodecContext *avctx_from)
813 {
814  Mpeg1Context *ctx = avctx->priv_data, *ctx_from = avctx_from->priv_data;
815  MpegEncContext *s = &ctx->mpeg_enc_ctx, *s1 = &ctx_from->mpeg_enc_ctx;
816  int err;
817 
818  if (avctx == avctx_from || !s1->context_initialized)
819  return 0;
820 
821  err = ff_mpeg_update_thread_context(avctx, avctx_from);
822  if (err)
823  return err;
824 
825  if (!s->context_initialized)
826  memcpy(s + 1, s1 + 1, sizeof(Mpeg1Context) - sizeof(MpegEncContext));
827 
828  return 0;
829 }
830 #endif
831 
833 #if CONFIG_MPEG1_NVDEC_HWACCEL
835 #endif
836 #if CONFIG_MPEG1_VDPAU_HWACCEL
838 #endif
841 };
842 
844 #if CONFIG_MPEG2_NVDEC_HWACCEL
846 #endif
847 #if CONFIG_MPEG2_VDPAU_HWACCEL
849 #endif
850 #if CONFIG_MPEG2_DXVA2_HWACCEL
852 #endif
853 #if CONFIG_MPEG2_D3D11VA_HWACCEL
856 #endif
857 #if CONFIG_MPEG2_D3D12VA_HWACCEL
859 #endif
860 #if CONFIG_MPEG2_VAAPI_HWACCEL
862 #endif
863 #if CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL
865 #endif
868 };
869 
870 static const enum AVPixelFormat mpeg12_pixfmt_list_422[] = {
873 };
874 
875 static const enum AVPixelFormat mpeg12_pixfmt_list_444[] = {
878 };
879 
881 {
882  Mpeg1Context *s1 = avctx->priv_data;
883  MpegEncContext *s = &s1->mpeg_enc_ctx;
884  const enum AVPixelFormat *pix_fmts;
885 
886  if (CONFIG_GRAY && (avctx->flags & AV_CODEC_FLAG_GRAY))
887  return AV_PIX_FMT_GRAY8;
888 
889  if (s->chroma_format < 2)
893  else if (s->chroma_format == 2)
895  else
897 
898  return ff_get_format(avctx, pix_fmts);
899 }
900 
901 /* Call this function when we know all parameters.
902  * It may be called in different places for MPEG-1 and MPEG-2. */
904 {
905  Mpeg1Context *s1 = avctx->priv_data;
906  MpegEncContext *s = &s1->mpeg_enc_ctx;
907  int ret;
908 
909  if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
910  // MPEG-1 aspect
911  AVRational aspect_inv = av_d2q(ff_mpeg1_aspect[s1->aspect_ratio_info], 255);
912  avctx->sample_aspect_ratio = (AVRational) { aspect_inv.den, aspect_inv.num };
913  } else { // MPEG-2
914  // MPEG-2 aspect
915  if (s1->aspect_ratio_info > 1) {
916  AVRational dar =
917  av_mul_q(av_div_q(ff_mpeg2_aspect[s1->aspect_ratio_info],
918  (AVRational) { s1->pan_scan.width,
919  s1->pan_scan.height }),
920  (AVRational) { s->width, s->height });
921 
922  /* We ignore the spec here and guess a bit as reality does not
923  * match the spec, see for example res_change_ffmpeg_aspect.ts
924  * and sequence-display-aspect.mpg.
925  * issue1613, 621, 562 */
926  if ((s1->pan_scan.width == 0) || (s1->pan_scan.height == 0) ||
927  (av_cmp_q(dar, (AVRational) { 4, 3 }) &&
928  av_cmp_q(dar, (AVRational) { 16, 9 }))) {
929  s->avctx->sample_aspect_ratio =
930  av_div_q(ff_mpeg2_aspect[s1->aspect_ratio_info],
931  (AVRational) { s->width, s->height });
932  } else {
933  s->avctx->sample_aspect_ratio =
934  av_div_q(ff_mpeg2_aspect[s1->aspect_ratio_info],
935  (AVRational) { s1->pan_scan.width, s1->pan_scan.height });
936 // issue1613 4/3 16/9 -> 16/9
937 // res_change_ffmpeg_aspect.ts 4/3 225/44 ->4/3
938 // widescreen-issue562.mpg 4/3 16/9 -> 16/9
939 // s->avctx->sample_aspect_ratio = av_mul_q(s->avctx->sample_aspect_ratio, (AVRational) {s->width, s->height});
940  ff_dlog(avctx, "aspect A %d/%d\n",
941  ff_mpeg2_aspect[s1->aspect_ratio_info].num,
942  ff_mpeg2_aspect[s1->aspect_ratio_info].den);
943  ff_dlog(avctx, "aspect B %d/%d\n", s->avctx->sample_aspect_ratio.num,
944  s->avctx->sample_aspect_ratio.den);
945  }
946  } else {
947  s->avctx->sample_aspect_ratio =
948  ff_mpeg2_aspect[s1->aspect_ratio_info];
949  }
950  } // MPEG-2
951 
952  if (av_image_check_sar(s->width, s->height,
953  avctx->sample_aspect_ratio) < 0) {
954  av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
955  avctx->sample_aspect_ratio.num,
956  avctx->sample_aspect_ratio.den);
957  avctx->sample_aspect_ratio = (AVRational){ 0, 1 };
958  }
959 
960  if (!s->context_initialized ||
961  avctx->coded_width != s->width ||
962  avctx->coded_height != s->height ||
963  s1->save_width != s->width ||
964  s1->save_height != s->height ||
965  av_cmp_q(s1->save_aspect, s->avctx->sample_aspect_ratio) ||
966  (s1->save_progressive_seq != s->progressive_sequence && FFALIGN(s->height, 16) != FFALIGN(s->height, 32)) ||
967  0) {
968  if (s->context_initialized)
970 
971  ret = ff_set_dimensions(avctx, s->width, s->height);
972  if (ret < 0)
973  return ret;
974 
975  if (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO && s->bit_rate &&
976  (s->bit_rate != 0x3FFFF*400)) {
977  avctx->rc_max_rate = s->bit_rate;
978  } else if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO && s->bit_rate &&
979  (s->bit_rate != 0x3FFFF*400 || s->vbv_delay != 0xFFFF)) {
980  avctx->bit_rate = s->bit_rate;
981  }
982  s1->save_aspect = s->avctx->sample_aspect_ratio;
983  s1->save_width = s->width;
984  s1->save_height = s->height;
985  s1->save_progressive_seq = s->progressive_sequence;
986 
987  /* low_delay may be forced, in this case we will have B-frames
988  * that behave like P-frames. */
989  avctx->has_b_frames = !s->low_delay;
990 
991  if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
992  // MPEG-1 fps
993  avctx->framerate = ff_mpeg12_frame_rate_tab[s1->frame_rate_index];
994 #if FF_API_TICKS_PER_FRAME
996  avctx->ticks_per_frame = 1;
998 #endif
999 
1001  } else { // MPEG-2
1002  // MPEG-2 fps
1003  av_reduce(&s->avctx->framerate.num,
1004  &s->avctx->framerate.den,
1005  ff_mpeg12_frame_rate_tab[s1->frame_rate_index].num * s1->frame_rate_ext.num,
1006  ff_mpeg12_frame_rate_tab[s1->frame_rate_index].den * s1->frame_rate_ext.den,
1007  1 << 30);
1008 #if FF_API_TICKS_PER_FRAME
1010  avctx->ticks_per_frame = 2;
1012 #endif
1013 
1014  switch (s->chroma_format) {
1015  case 1: avctx->chroma_sample_location = AVCHROMA_LOC_LEFT; break;
1016  case 2:
1017  case 3: avctx->chroma_sample_location = AVCHROMA_LOC_TOPLEFT; break;
1018  default: av_assert0(0);
1019  }
1020  } // MPEG-2
1021 
1022  avctx->pix_fmt = mpeg_get_pixelformat(avctx);
1023 
1024  if ((ret = ff_mpv_common_init(s)) < 0)
1025  return ret;
1026  }
1027  return 0;
1028 }
1029 
1030 static int mpeg1_decode_picture(AVCodecContext *avctx, const uint8_t *buf,
1031  int buf_size)
1032 {
1033  Mpeg1Context *s1 = avctx->priv_data;
1034  MpegEncContext *s = &s1->mpeg_enc_ctx;
1035  int ref, f_code, vbv_delay, ret;
1036 
1037  ret = init_get_bits8(&s->gb, buf, buf_size);
1038  if (ret < 0)
1039  return ret;
1040 
1041  ref = get_bits(&s->gb, 10); /* temporal ref */
1042  s->pict_type = get_bits(&s->gb, 3);
1043  if (s->pict_type == 0 || s->pict_type > 3)
1044  return AVERROR_INVALIDDATA;
1045 
1046  vbv_delay = get_bits(&s->gb, 16);
1047  s->vbv_delay = vbv_delay;
1048  if (s->pict_type == AV_PICTURE_TYPE_P ||
1049  s->pict_type == AV_PICTURE_TYPE_B) {
1050  s->full_pel[0] = get_bits1(&s->gb);
1051  f_code = get_bits(&s->gb, 3);
1052  if (f_code == 0 && (avctx->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT)))
1053  return AVERROR_INVALIDDATA;
1054  f_code += !f_code;
1055  s->mpeg_f_code[0][0] = f_code;
1056  s->mpeg_f_code[0][1] = f_code;
1057  }
1058  if (s->pict_type == AV_PICTURE_TYPE_B) {
1059  s->full_pel[1] = get_bits1(&s->gb);
1060  f_code = get_bits(&s->gb, 3);
1061  if (f_code == 0 && (avctx->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT)))
1062  return AVERROR_INVALIDDATA;
1063  f_code += !f_code;
1064  s->mpeg_f_code[1][0] = f_code;
1065  s->mpeg_f_code[1][1] = f_code;
1066  }
1067 
1068  if (avctx->debug & FF_DEBUG_PICT_INFO)
1069  av_log(avctx, AV_LOG_DEBUG,
1070  "vbv_delay %d, ref %d type:%d\n", vbv_delay, ref, s->pict_type);
1071 
1072  s->y_dc_scale = 8;
1073  s->c_dc_scale = 8;
1074  return 0;
1075 }
1076 
1078 {
1079  MpegEncContext *s = &s1->mpeg_enc_ctx;
1080  int horiz_size_ext, vert_size_ext;
1081  int bit_rate_ext;
1082 
1083  skip_bits(&s->gb, 1); /* profile and level esc*/
1084  s->avctx->profile = get_bits(&s->gb, 3);
1085  s->avctx->level = get_bits(&s->gb, 4);
1086  s->progressive_sequence = get_bits1(&s->gb); /* progressive_sequence */
1087  s->chroma_format = get_bits(&s->gb, 2); /* chroma_format 1=420, 2=422, 3=444 */
1088 
1089  if (!s->chroma_format) {
1090  s->chroma_format = 1;
1091  av_log(s->avctx, AV_LOG_WARNING, "Chroma format invalid\n");
1092  }
1093 
1094  horiz_size_ext = get_bits(&s->gb, 2);
1095  vert_size_ext = get_bits(&s->gb, 2);
1096  s->width |= (horiz_size_ext << 12);
1097  s->height |= (vert_size_ext << 12);
1098  bit_rate_ext = get_bits(&s->gb, 12); /* XXX: handle it */
1099  s->bit_rate += (bit_rate_ext << 18) * 400LL;
1100  check_marker(s->avctx, &s->gb, "after bit rate extension");
1101  s->avctx->rc_buffer_size += get_bits(&s->gb, 8) * 1024 * 16 << 10;
1102 
1103  s->low_delay = get_bits1(&s->gb);
1104  if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY)
1105  s->low_delay = 1;
1106 
1107  s1->frame_rate_ext.num = get_bits(&s->gb, 2) + 1;
1108  s1->frame_rate_ext.den = get_bits(&s->gb, 5) + 1;
1109 
1110  ff_dlog(s->avctx, "sequence extension\n");
1111  s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG2VIDEO;
1112 
1113  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1114  av_log(s->avctx, AV_LOG_DEBUG,
1115  "profile: %d, level: %d ps: %d cf:%d vbv buffer: %d, bitrate:%"PRId64"\n",
1116  s->avctx->profile, s->avctx->level, s->progressive_sequence, s->chroma_format,
1117  s->avctx->rc_buffer_size, s->bit_rate);
1118 }
1119 
1121 {
1122  MpegEncContext *s = &s1->mpeg_enc_ctx;
1123  int color_description, w, h;
1124 
1125  skip_bits(&s->gb, 3); /* video format */
1126  color_description = get_bits1(&s->gb);
1127  if (color_description) {
1128  s->avctx->color_primaries = get_bits(&s->gb, 8);
1129  s->avctx->color_trc = get_bits(&s->gb, 8);
1130  s->avctx->colorspace = get_bits(&s->gb, 8);
1131  }
1132  w = get_bits(&s->gb, 14);
1133  skip_bits(&s->gb, 1); // marker
1134  h = get_bits(&s->gb, 14);
1135  // remaining 3 bits are zero padding
1136 
1137  s1->pan_scan.width = 16 * w;
1138  s1->pan_scan.height = 16 * h;
1139 
1140  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1141  av_log(s->avctx, AV_LOG_DEBUG, "sde w:%d, h:%d\n", w, h);
1142 }
1143 
1145 {
1146  MpegEncContext *s = &s1->mpeg_enc_ctx;
1147  int i, nofco;
1148 
1149  nofco = 1;
1150  if (s->progressive_sequence) {
1151  if (s->repeat_first_field) {
1152  nofco++;
1153  if (s->top_field_first)
1154  nofco++;
1155  }
1156  } else {
1157  if (s->picture_structure == PICT_FRAME) {
1158  nofco++;
1159  if (s->repeat_first_field)
1160  nofco++;
1161  }
1162  }
1163  for (i = 0; i < nofco; i++) {
1164  s1->pan_scan.position[i][0] = get_sbits(&s->gb, 16);
1165  skip_bits(&s->gb, 1); // marker
1166  s1->pan_scan.position[i][1] = get_sbits(&s->gb, 16);
1167  skip_bits(&s->gb, 1); // marker
1168  }
1169 
1170  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1171  av_log(s->avctx, AV_LOG_DEBUG,
1172  "pde (%"PRId16",%"PRId16") (%"PRId16",%"PRId16") (%"PRId16",%"PRId16")\n",
1173  s1->pan_scan.position[0][0], s1->pan_scan.position[0][1],
1174  s1->pan_scan.position[1][0], s1->pan_scan.position[1][1],
1175  s1->pan_scan.position[2][0], s1->pan_scan.position[2][1]);
1176 }
1177 
1178 static int load_matrix(MpegEncContext *s, uint16_t matrix0[64],
1179  uint16_t matrix1[64], int intra)
1180 {
1181  int i;
1182 
1183  for (i = 0; i < 64; i++) {
1184  int j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
1185  int v = get_bits(&s->gb, 8);
1186  if (v == 0) {
1187  av_log(s->avctx, AV_LOG_ERROR, "matrix damaged\n");
1188  return AVERROR_INVALIDDATA;
1189  }
1190  if (intra && i == 0 && v != 8) {
1191  av_log(s->avctx, AV_LOG_DEBUG, "intra matrix specifies invalid DC quantizer %d, ignoring\n", v);
1192  v = 8; // needed by pink.mpg / issue1046
1193  }
1194  matrix0[j] = v;
1195  if (matrix1)
1196  matrix1[j] = v;
1197  }
1198  return 0;
1199 }
1200 
1202 {
1203  ff_dlog(s->avctx, "matrix extension\n");
1204 
1205  if (get_bits1(&s->gb))
1206  load_matrix(s, s->chroma_intra_matrix, s->intra_matrix, 1);
1207  if (get_bits1(&s->gb))
1208  load_matrix(s, s->chroma_inter_matrix, s->inter_matrix, 0);
1209  if (get_bits1(&s->gb))
1210  load_matrix(s, s->chroma_intra_matrix, NULL, 1);
1211  if (get_bits1(&s->gb))
1212  load_matrix(s, s->chroma_inter_matrix, NULL, 0);
1213 }
1214 
1216 {
1217  MpegEncContext *s = &s1->mpeg_enc_ctx;
1218 
1219  s->full_pel[0] = s->full_pel[1] = 0;
1220  s->mpeg_f_code[0][0] = get_bits(&s->gb, 4);
1221  s->mpeg_f_code[0][1] = get_bits(&s->gb, 4);
1222  s->mpeg_f_code[1][0] = get_bits(&s->gb, 4);
1223  s->mpeg_f_code[1][1] = get_bits(&s->gb, 4);
1224  s->mpeg_f_code[0][0] += !s->mpeg_f_code[0][0];
1225  s->mpeg_f_code[0][1] += !s->mpeg_f_code[0][1];
1226  s->mpeg_f_code[1][0] += !s->mpeg_f_code[1][0];
1227  s->mpeg_f_code[1][1] += !s->mpeg_f_code[1][1];
1228  if (!s->pict_type && s->context_initialized) {
1229  av_log(s->avctx, AV_LOG_ERROR, "Missing picture start code\n");
1230  if (s->avctx->err_recognition & AV_EF_EXPLODE)
1231  return AVERROR_INVALIDDATA;
1232  av_log(s->avctx, AV_LOG_WARNING, "Guessing pict_type from mpeg_f_code\n");
1233  if (s->mpeg_f_code[1][0] == 15 && s->mpeg_f_code[1][1] == 15) {
1234  if (s->mpeg_f_code[0][0] == 15 && s->mpeg_f_code[0][1] == 15)
1235  s->pict_type = AV_PICTURE_TYPE_I;
1236  else
1237  s->pict_type = AV_PICTURE_TYPE_P;
1238  } else
1239  s->pict_type = AV_PICTURE_TYPE_B;
1240  }
1241 
1242  s->intra_dc_precision = get_bits(&s->gb, 2);
1243  s->picture_structure = get_bits(&s->gb, 2);
1244  s->top_field_first = get_bits1(&s->gb);
1245  s->frame_pred_frame_dct = get_bits1(&s->gb);
1246  s->concealment_motion_vectors = get_bits1(&s->gb);
1247  s->q_scale_type = get_bits1(&s->gb);
1248  s->intra_vlc_format = get_bits1(&s->gb);
1249  s->alternate_scan = get_bits1(&s->gb);
1250  s->repeat_first_field = get_bits1(&s->gb);
1251  s->chroma_420_type = get_bits1(&s->gb);
1252  s->progressive_frame = get_bits1(&s->gb);
1253 
1254  if (s->alternate_scan) {
1255  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
1256  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
1257  } else {
1258  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
1259  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
1260  }
1261 
1262  /* composite display not parsed */
1263  ff_dlog(s->avctx, "intra_dc_precision=%d\n", s->intra_dc_precision);
1264  ff_dlog(s->avctx, "picture_structure=%d\n", s->picture_structure);
1265  ff_dlog(s->avctx, "top field first=%d\n", s->top_field_first);
1266  ff_dlog(s->avctx, "repeat first field=%d\n", s->repeat_first_field);
1267  ff_dlog(s->avctx, "conceal=%d\n", s->concealment_motion_vectors);
1268  ff_dlog(s->avctx, "intra_vlc_format=%d\n", s->intra_vlc_format);
1269  ff_dlog(s->avctx, "alternate_scan=%d\n", s->alternate_scan);
1270  ff_dlog(s->avctx, "frame_pred_frame_dct=%d\n", s->frame_pred_frame_dct);
1271  ff_dlog(s->avctx, "progressive_frame=%d\n", s->progressive_frame);
1272 
1273  return 0;
1274 }
1275 
1276 static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
1277 {
1278  AVCodecContext *avctx = s->avctx;
1279  Mpeg1Context *s1 = (Mpeg1Context *) s;
1280  int ret;
1281 
1282  if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)) {
1283  if (s->mb_width * s->mb_height * 11LL / (33 * 2 * 8) > buf_size)
1284  return AVERROR_INVALIDDATA;
1285  }
1286 
1287  /* start frame decoding */
1288  if (s->first_field || s->picture_structure == PICT_FRAME) {
1289  AVFrameSideData *pan_scan;
1290 
1291  if ((ret = ff_mpv_frame_start(s, avctx)) < 0)
1292  return ret;
1293 
1294  if (s->picture_structure != PICT_FRAME) {
1295  s->current_picture_ptr->f->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST *
1296  (s->picture_structure == PICT_TOP_FIELD);
1297 
1298  for (int i = 0; i < 3; i++) {
1299  if (s->picture_structure == PICT_BOTTOM_FIELD) {
1300  s->current_picture.f->data[i] = FF_PTR_ADD(s->current_picture.f->data[i],
1301  s->current_picture.f->linesize[i]);
1302  }
1303  s->current_picture.f->linesize[i] *= 2;
1304  s->last_picture.f->linesize[i] *= 2;
1305  s->next_picture.f->linesize[i] *= 2;
1306  }
1307  }
1308 
1310 
1311  /* first check if we must repeat the frame */
1312  s->current_picture_ptr->f->repeat_pict = 0;
1313  if (s->repeat_first_field) {
1314  if (s->progressive_sequence) {
1315  if (s->top_field_first)
1316  s->current_picture_ptr->f->repeat_pict = 4;
1317  else
1318  s->current_picture_ptr->f->repeat_pict = 2;
1319  } else if (s->progressive_frame) {
1320  s->current_picture_ptr->f->repeat_pict = 1;
1321  }
1322  }
1323 
1324  ret = ff_frame_new_side_data(s->avctx, s->current_picture_ptr->f,
1325  AV_FRAME_DATA_PANSCAN, sizeof(s1->pan_scan),
1326  &pan_scan);
1327  if (ret < 0)
1328  return ret;
1329  if (pan_scan)
1330  memcpy(pan_scan->data, &s1->pan_scan, sizeof(s1->pan_scan));
1331 
1332  if (s1->a53_buf_ref) {
1334  s->avctx, s->current_picture_ptr->f, AV_FRAME_DATA_A53_CC,
1335  &s1->a53_buf_ref, NULL);
1336  if (ret < 0)
1337  return ret;
1338  }
1339 
1340  if (s1->has_stereo3d) {
1341  AVStereo3D *stereo = av_stereo3d_create_side_data(s->current_picture_ptr->f);
1342  if (!stereo)
1343  return AVERROR(ENOMEM);
1344 
1345  *stereo = s1->stereo3d;
1346  s1->has_stereo3d = 0;
1347  }
1348 
1349  if (s1->has_afd) {
1350  AVFrameSideData *sd;
1351  ret = ff_frame_new_side_data(s->avctx, s->current_picture_ptr->f,
1352  AV_FRAME_DATA_AFD, 1, &sd);
1353  if (ret < 0)
1354  return ret;
1355  if (sd)
1356  *sd->data = s1->afd;
1357  s1->has_afd = 0;
1358  }
1359 
1360  if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME))
1361  ff_thread_finish_setup(avctx);
1362  } else { // second field
1363  if (!s->current_picture_ptr) {
1364  av_log(s->avctx, AV_LOG_ERROR, "first field missing\n");
1365  return AVERROR_INVALIDDATA;
1366  }
1367 
1368  if (s->avctx->hwaccel) {
1369  if ((ret = FF_HW_SIMPLE_CALL(s->avctx, end_frame)) < 0) {
1370  av_log(avctx, AV_LOG_ERROR,
1371  "hardware accelerator failed to decode first field\n");
1372  return ret;
1373  }
1374  }
1375 
1376  for (int i = 0; i < 3; i++) {
1377  s->current_picture.f->data[i] = s->current_picture_ptr->f->data[i];
1378  if (s->picture_structure == PICT_BOTTOM_FIELD)
1379  s->current_picture.f->data[i] +=
1380  s->current_picture_ptr->f->linesize[i];
1381  }
1382  }
1383 
1384  if (avctx->hwaccel) {
1385  if ((ret = FF_HW_CALL(avctx, start_frame, buf, buf_size)) < 0)
1386  return ret;
1387  }
1388 
1389  return 0;
1390 }
1391 
1392 #define DECODE_SLICE_ERROR -1
1393 #define DECODE_SLICE_OK 0
1394 
1395 /**
1396  * Decode a slice.
1397  * MpegEncContext.mb_y must be set to the MB row from the startcode.
1398  * @return DECODE_SLICE_ERROR if the slice is damaged,
1399  * DECODE_SLICE_OK if this slice is OK
1400  */
1401 static int mpeg_decode_slice(MpegEncContext *s, int mb_y,
1402  const uint8_t **buf, int buf_size)
1403 {
1404  AVCodecContext *avctx = s->avctx;
1405  const int lowres = s->avctx->lowres;
1406  const int field_pic = s->picture_structure != PICT_FRAME;
1407  int ret;
1408 
1409  s->resync_mb_x =
1410  s->resync_mb_y = -1;
1411 
1412  av_assert0(mb_y < s->mb_height);
1413 
1414  ret = init_get_bits8(&s->gb, *buf, buf_size);
1415  if (ret < 0)
1416  return ret;
1417 
1418  if (s->codec_id != AV_CODEC_ID_MPEG1VIDEO && s->mb_height > 2800/16)
1419  skip_bits(&s->gb, 3);
1420 
1422  s->interlaced_dct = 0;
1423 
1424  s->qscale = mpeg_get_qscale(s);
1425 
1426  if (s->qscale == 0) {
1427  av_log(s->avctx, AV_LOG_ERROR, "qscale == 0\n");
1428  return AVERROR_INVALIDDATA;
1429  }
1430 
1431  /* extra slice info */
1432  if (skip_1stop_8data_bits(&s->gb) < 0)
1433  return AVERROR_INVALIDDATA;
1434 
1435  s->mb_x = 0;
1436 
1437  if (mb_y == 0 && s->codec_tag == AV_RL32("SLIF")) {
1438  skip_bits1(&s->gb);
1439  } else {
1440  while (get_bits_left(&s->gb) > 0) {
1441  int code = get_vlc2(&s->gb, ff_mbincr_vlc,
1442  MBINCR_VLC_BITS, 2);
1443  if (code < 0) {
1444  av_log(s->avctx, AV_LOG_ERROR, "first mb_incr damaged\n");
1445  return AVERROR_INVALIDDATA;
1446  }
1447  if (code >= 33) {
1448  if (code == 33)
1449  s->mb_x += 33;
1450  /* otherwise, stuffing, nothing to do */
1451  } else {
1452  s->mb_x += code;
1453  break;
1454  }
1455  }
1456  }
1457 
1458  if (s->mb_x >= (unsigned) s->mb_width) {
1459  av_log(s->avctx, AV_LOG_ERROR, "initial skip overflow\n");
1460  return AVERROR_INVALIDDATA;
1461  }
1462 
1463  if (avctx->hwaccel) {
1464  const uint8_t *buf_end, *buf_start = *buf - 4; /* include start_code */
1465  int start_code = -1;
1466  buf_end = avpriv_find_start_code(buf_start + 2, *buf + buf_size, &start_code);
1467  if (buf_end < *buf + buf_size)
1468  buf_end -= 4;
1469  s->mb_y = mb_y;
1470  if (FF_HW_CALL(avctx, decode_slice, buf_start, buf_end - buf_start) < 0)
1471  return DECODE_SLICE_ERROR;
1472  *buf = buf_end;
1473  return DECODE_SLICE_OK;
1474  }
1475 
1476  s->resync_mb_x = s->mb_x;
1477  s->resync_mb_y = s->mb_y = mb_y;
1478  s->mb_skip_run = 0;
1480 
1481  if (s->mb_y == 0 && s->mb_x == 0 && (s->first_field || s->picture_structure == PICT_FRAME)) {
1482  if (s->avctx->debug & FF_DEBUG_PICT_INFO) {
1483  av_log(s->avctx, AV_LOG_DEBUG,
1484  "qp:%d fc:%2d%2d%2d%2d %c %s %s %s %s dc:%d pstruct:%d fdct:%d cmv:%d qtype:%d ivlc:%d rff:%d %s\n",
1485  s->qscale,
1486  s->mpeg_f_code[0][0], s->mpeg_f_code[0][1],
1487  s->mpeg_f_code[1][0], s->mpeg_f_code[1][1],
1488  s->pict_type == AV_PICTURE_TYPE_I ? 'I' :
1489  (s->pict_type == AV_PICTURE_TYPE_P ? 'P' :
1490  (s->pict_type == AV_PICTURE_TYPE_B ? 'B' : 'S')),
1491  s->progressive_sequence ? "ps" : "",
1492  s->progressive_frame ? "pf" : "",
1493  s->alternate_scan ? "alt" : "",
1494  s->top_field_first ? "top" : "",
1495  s->intra_dc_precision, s->picture_structure,
1496  s->frame_pred_frame_dct, s->concealment_motion_vectors,
1497  s->q_scale_type, s->intra_vlc_format,
1498  s->repeat_first_field, s->chroma_420_type ? "420" : "");
1499  }
1500  }
1501 
1502  for (;;) {
1503  if ((ret = mpeg_decode_mb(s, s->block)) < 0)
1504  return ret;
1505 
1506  // Note motion_val is normally NULL unless we want to extract the MVs.
1507  if (s->current_picture.motion_val[0]) {
1508  const int wrap = s->b8_stride;
1509  int xy = s->mb_x * 2 + s->mb_y * 2 * wrap;
1510  int b8_xy = 4 * (s->mb_x + s->mb_y * s->mb_stride);
1511  int motion_x, motion_y, dir, i;
1512 
1513  for (i = 0; i < 2; i++) {
1514  for (dir = 0; dir < 2; dir++) {
1515  if (s->mb_intra ||
1516  (dir == 1 && s->pict_type != AV_PICTURE_TYPE_B)) {
1517  motion_x = motion_y = 0;
1518  } else if (s->mv_type == MV_TYPE_16X16 ||
1519  (s->mv_type == MV_TYPE_FIELD && field_pic)) {
1520  motion_x = s->mv[dir][0][0];
1521  motion_y = s->mv[dir][0][1];
1522  } else { /* if ((s->mv_type == MV_TYPE_FIELD) || (s->mv_type == MV_TYPE_16X8)) */
1523  motion_x = s->mv[dir][i][0];
1524  motion_y = s->mv[dir][i][1];
1525  }
1526 
1527  s->current_picture.motion_val[dir][xy][0] = motion_x;
1528  s->current_picture.motion_val[dir][xy][1] = motion_y;
1529  s->current_picture.motion_val[dir][xy + 1][0] = motion_x;
1530  s->current_picture.motion_val[dir][xy + 1][1] = motion_y;
1531  s->current_picture.ref_index [dir][b8_xy] =
1532  s->current_picture.ref_index [dir][b8_xy + 1] = s->field_select[dir][i];
1533  av_assert2(s->field_select[dir][i] == 0 ||
1534  s->field_select[dir][i] == 1);
1535  }
1536  xy += wrap;
1537  b8_xy += 2;
1538  }
1539  }
1540 
1541  s->dest[0] += 16 >> lowres;
1542  s->dest[1] +=(16 >> lowres) >> s->chroma_x_shift;
1543  s->dest[2] +=(16 >> lowres) >> s->chroma_x_shift;
1544 
1545  ff_mpv_reconstruct_mb(s, s->block);
1546 
1547  if (++s->mb_x >= s->mb_width) {
1548  const int mb_size = 16 >> s->avctx->lowres;
1549  int left;
1550 
1551  ff_mpeg_draw_horiz_band(s, mb_size * (s->mb_y >> field_pic), mb_size);
1553 
1554  s->mb_x = 0;
1555  s->mb_y += 1 << field_pic;
1556 
1557  if (s->mb_y >= s->mb_height) {
1558  int left = get_bits_left(&s->gb);
1559  int is_d10 = s->chroma_format == 2 &&
1560  s->pict_type == AV_PICTURE_TYPE_I &&
1561  avctx->profile == 0 && avctx->level == 5 &&
1562  s->intra_dc_precision == 2 &&
1563  s->q_scale_type == 1 && s->alternate_scan == 0 &&
1564  s->progressive_frame == 0
1565  /* vbv_delay == 0xBBB || 0xE10 */;
1566 
1567  if (left >= 32 && !is_d10) {
1568  GetBitContext gb = s->gb;
1569  align_get_bits(&gb);
1570  if (show_bits(&gb, 24) == 0x060E2B) {
1571  av_log(avctx, AV_LOG_DEBUG, "Invalid MXF data found in video stream\n");
1572  is_d10 = 1;
1573  }
1574  if (left > 32 && show_bits_long(&gb, 32) == 0x201) {
1575  av_log(avctx, AV_LOG_DEBUG, "skipping m704 alpha (unsupported)\n");
1576  goto eos;
1577  }
1578  }
1579 
1580  if (left < 0 ||
1581  (left && show_bits(&s->gb, FFMIN(left, 23)) && !is_d10) ||
1582  ((avctx->err_recognition & (AV_EF_BITSTREAM | AV_EF_AGGRESSIVE)) && left > 8)) {
1583  av_log(avctx, AV_LOG_ERROR, "end mismatch left=%d %0X at %d %d\n",
1584  left, left>0 ? show_bits(&s->gb, FFMIN(left, 23)) : 0, s->mb_x, s->mb_y);
1585  return AVERROR_INVALIDDATA;
1586  } else
1587  goto eos;
1588  }
1589  // There are some files out there which are missing the last slice
1590  // in cases where the slice is completely outside the visible
1591  // area, we detect this here instead of running into the end expecting
1592  // more data
1593  left = get_bits_left(&s->gb);
1594  if (s->mb_y >= ((s->height + 15) >> 4) &&
1595  !s->progressive_sequence &&
1596  left <= 25 &&
1597  left >= 0 &&
1598  s->mb_skip_run == -1 &&
1599  (!left || show_bits(&s->gb, left) == 0))
1600  goto eos;
1601 
1603  }
1604 
1605  /* skip mb handling */
1606  if (s->mb_skip_run == -1) {
1607  /* read increment again */
1608  s->mb_skip_run = 0;
1609  for (;;) {
1610  int code = get_vlc2(&s->gb, ff_mbincr_vlc,
1611  MBINCR_VLC_BITS, 2);
1612  if (code < 0) {
1613  av_log(s->avctx, AV_LOG_ERROR, "mb incr damaged\n");
1614  return AVERROR_INVALIDDATA;
1615  }
1616  if (code >= 33) {
1617  if (code == 33) {
1618  s->mb_skip_run += 33;
1619  } else if (code == 35) {
1620  if (s->mb_skip_run != 0 || show_bits(&s->gb, 15) != 0) {
1621  av_log(s->avctx, AV_LOG_ERROR, "slice mismatch\n");
1622  return AVERROR_INVALIDDATA;
1623  }
1624  goto eos; /* end of slice */
1625  }
1626  /* otherwise, stuffing, nothing to do */
1627  } else {
1628  s->mb_skip_run += code;
1629  break;
1630  }
1631  }
1632  if (s->mb_skip_run) {
1633  int i;
1634  if (s->pict_type == AV_PICTURE_TYPE_I) {
1635  av_log(s->avctx, AV_LOG_ERROR,
1636  "skipped MB in I-frame at %d %d\n", s->mb_x, s->mb_y);
1637  return AVERROR_INVALIDDATA;
1638  }
1639 
1640  /* skip mb */
1641  s->mb_intra = 0;
1642  for (i = 0; i < 12; i++)
1643  s->block_last_index[i] = -1;
1644  if (s->picture_structure == PICT_FRAME)
1645  s->mv_type = MV_TYPE_16X16;
1646  else
1647  s->mv_type = MV_TYPE_FIELD;
1648  if (s->pict_type == AV_PICTURE_TYPE_P) {
1649  /* if P type, zero motion vector is implied */
1650  s->mv_dir = MV_DIR_FORWARD;
1651  s->mv[0][0][0] = s->mv[0][0][1] = 0;
1652  s->last_mv[0][0][0] = s->last_mv[0][0][1] = 0;
1653  s->last_mv[0][1][0] = s->last_mv[0][1][1] = 0;
1654  s->field_select[0][0] = (s->picture_structure - 1) & 1;
1655  } else {
1656  /* if B type, reuse previous vectors and directions */
1657  s->mv[0][0][0] = s->last_mv[0][0][0];
1658  s->mv[0][0][1] = s->last_mv[0][0][1];
1659  s->mv[1][0][0] = s->last_mv[1][0][0];
1660  s->mv[1][0][1] = s->last_mv[1][0][1];
1661  s->field_select[0][0] = (s->picture_structure - 1) & 1;
1662  s->field_select[1][0] = (s->picture_structure - 1) & 1;
1663  }
1664  }
1665  }
1666  }
1667 eos: // end of slice
1668  if (get_bits_left(&s->gb) < 0) {
1669  av_log(s, AV_LOG_ERROR, "overread %d\n", -get_bits_left(&s->gb));
1670  return AVERROR_INVALIDDATA;
1671  }
1672  *buf += (get_bits_count(&s->gb) - 1) / 8;
1673  ff_dlog(s, "Slice start:%d %d end:%d %d\n", s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y);
1674  return 0;
1675 }
1676 
1678 {
1679  MpegEncContext *s = *(void **) arg;
1680  const uint8_t *buf = s->gb.buffer;
1681  int mb_y = s->start_mb_y;
1682  const int field_pic = s->picture_structure != PICT_FRAME;
1683 
1684  s->er.error_count = (3 * (s->end_mb_y - s->start_mb_y) * s->mb_width) >> field_pic;
1685 
1686  for (;;) {
1687  uint32_t start_code;
1688  int ret;
1689 
1690  ret = mpeg_decode_slice(s, mb_y, &buf, s->gb.buffer_end - buf);
1691  emms_c();
1692  ff_dlog(c, "ret:%d resync:%d/%d mb:%d/%d ts:%d/%d ec:%d\n",
1693  ret, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y,
1694  s->start_mb_y, s->end_mb_y, s->er.error_count);
1695  if (ret < 0) {
1696  if (c->err_recognition & AV_EF_EXPLODE)
1697  return ret;
1698  if (s->resync_mb_x >= 0 && s->resync_mb_y >= 0)
1699  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
1700  s->mb_x, s->mb_y,
1702  } else {
1703  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
1704  s->mb_x - 1, s->mb_y,
1706  }
1707 
1708  if (s->mb_y == s->end_mb_y)
1709  return 0;
1710 
1711  start_code = -1;
1712  buf = avpriv_find_start_code(buf, s->gb.buffer_end, &start_code);
1713  if (start_code < SLICE_MIN_START_CODE || start_code > SLICE_MAX_START_CODE)
1714  return AVERROR_INVALIDDATA;
1716  if (s->codec_id != AV_CODEC_ID_MPEG1VIDEO && s->mb_height > 2800/16)
1717  mb_y += (*buf&0xE0)<<2;
1718  mb_y <<= field_pic;
1719  if (s->picture_structure == PICT_BOTTOM_FIELD)
1720  mb_y++;
1721  if (mb_y >= s->end_mb_y)
1722  return AVERROR_INVALIDDATA;
1723  }
1724 }
1725 
1726 /**
1727  * Handle slice ends.
1728  * @return 1 if it seems to be the last slice
1729  */
1730 static int slice_end(AVCodecContext *avctx, AVFrame *pict)
1731 {
1732  Mpeg1Context *s1 = avctx->priv_data;
1733  MpegEncContext *s = &s1->mpeg_enc_ctx;
1734 
1735  if (!s->context_initialized || !s->current_picture_ptr)
1736  return 0;
1737 
1738  if (s->avctx->hwaccel) {
1739  int ret = FF_HW_SIMPLE_CALL(s->avctx, end_frame);
1740  if (ret < 0) {
1741  av_log(avctx, AV_LOG_ERROR,
1742  "hardware accelerator failed to decode picture\n");
1743  return ret;
1744  }
1745  }
1746 
1747  /* end of slice reached */
1748  if (/* s->mb_y << field_pic == s->mb_height && */ !s->first_field && !s1->first_slice) {
1749  /* end of image */
1750 
1751  ff_er_frame_end(&s->er, NULL);
1752 
1754 
1755  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
1756  int ret = av_frame_ref(pict, s->current_picture_ptr->f);
1757  if (ret < 0)
1758  return ret;
1759  ff_print_debug_info(s, s->current_picture_ptr, pict);
1760  ff_mpv_export_qp_table(s, pict, s->current_picture_ptr, FF_MPV_QSCALE_TYPE_MPEG2);
1761  } else {
1762  /* latency of 1 frame for I- and P-frames */
1763  if (s->last_picture_ptr) {
1764  int ret = av_frame_ref(pict, s->last_picture_ptr->f);
1765  if (ret < 0)
1766  return ret;
1767  ff_print_debug_info(s, s->last_picture_ptr, pict);
1768  ff_mpv_export_qp_table(s, pict, s->last_picture_ptr, FF_MPV_QSCALE_TYPE_MPEG2);
1769  }
1770  }
1771 
1772  return 1;
1773  } else {
1774  return 0;
1775  }
1776 }
1777 
1779  const uint8_t *buf, int buf_size)
1780 {
1781  Mpeg1Context *s1 = avctx->priv_data;
1782  MpegEncContext *s = &s1->mpeg_enc_ctx;
1783  int width, height;
1784  int i, v, j;
1785 
1786  int ret = init_get_bits8(&s->gb, buf, buf_size);
1787  if (ret < 0)
1788  return ret;
1789 
1790  width = get_bits(&s->gb, 12);
1791  height = get_bits(&s->gb, 12);
1792  if (width == 0 || height == 0) {
1793  av_log(avctx, AV_LOG_WARNING,
1794  "Invalid horizontal or vertical size value.\n");
1796  return AVERROR_INVALIDDATA;
1797  }
1798  s1->aspect_ratio_info = get_bits(&s->gb, 4);
1799  if (s1->aspect_ratio_info == 0) {
1800  av_log(avctx, AV_LOG_ERROR, "aspect ratio has forbidden 0 value\n");
1802  return AVERROR_INVALIDDATA;
1803  }
1804  s1->frame_rate_index = get_bits(&s->gb, 4);
1805  if (s1->frame_rate_index == 0 || s1->frame_rate_index > 13) {
1806  av_log(avctx, AV_LOG_WARNING,
1807  "frame_rate_index %d is invalid\n", s1->frame_rate_index);
1808  s1->frame_rate_index = 1;
1809  }
1810  s->bit_rate = get_bits(&s->gb, 18) * 400LL;
1811  if (check_marker(s->avctx, &s->gb, "in sequence header") == 0) {
1812  return AVERROR_INVALIDDATA;
1813  }
1814 
1815  s->avctx->rc_buffer_size = get_bits(&s->gb, 10) * 1024 * 16;
1816  skip_bits(&s->gb, 1);
1817 
1818  /* get matrix */
1819  if (get_bits1(&s->gb)) {
1820  load_matrix(s, s->chroma_intra_matrix, s->intra_matrix, 1);
1821  } else {
1822  for (i = 0; i < 64; i++) {
1823  j = s->idsp.idct_permutation[i];
1825  s->intra_matrix[j] = v;
1826  s->chroma_intra_matrix[j] = v;
1827  }
1828  }
1829  if (get_bits1(&s->gb)) {
1830  load_matrix(s, s->chroma_inter_matrix, s->inter_matrix, 0);
1831  } else {
1832  for (i = 0; i < 64; i++) {
1833  int j = s->idsp.idct_permutation[i];
1835  s->inter_matrix[j] = v;
1836  s->chroma_inter_matrix[j] = v;
1837  }
1838  }
1839 
1840  if (show_bits(&s->gb, 23) != 0) {
1841  av_log(s->avctx, AV_LOG_ERROR, "sequence header damaged\n");
1842  return AVERROR_INVALIDDATA;
1843  }
1844 
1845  s->width = width;
1846  s->height = height;
1847 
1848  /* We set MPEG-2 parameters so that it emulates MPEG-1. */
1849  s->progressive_sequence = 1;
1850  s->progressive_frame = 1;
1851  s->picture_structure = PICT_FRAME;
1852  s->first_field = 0;
1853  s->frame_pred_frame_dct = 1;
1854  s->chroma_format = 1;
1855  s->codec_id =
1856  s->avctx->codec_id = AV_CODEC_ID_MPEG1VIDEO;
1857  s->out_format = FMT_MPEG1;
1858  if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY)
1859  s->low_delay = 1;
1860 
1861  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1862  av_log(s->avctx, AV_LOG_DEBUG, "vbv buffer: %d, bitrate:%"PRId64", aspect_ratio_info: %d \n",
1863  s->avctx->rc_buffer_size, s->bit_rate, s1->aspect_ratio_info);
1864 
1865  return 0;
1866 }
1867 
1869 {
1870  Mpeg1Context *s1 = avctx->priv_data;
1871  MpegEncContext *s = &s1->mpeg_enc_ctx;
1872  int i, v, ret;
1873 
1874  /* start new MPEG-1 context decoding */
1875  s->out_format = FMT_MPEG1;
1876  if (s->context_initialized)
1878 
1879  s->width = avctx->coded_width;
1880  s->height = avctx->coded_height;
1881  avctx->has_b_frames = 0; // true?
1882  s->low_delay = 1;
1883 
1884  avctx->pix_fmt = mpeg_get_pixelformat(avctx);
1885 
1886  if ((ret = ff_mpv_common_init(s)) < 0)
1887  return ret;
1888 
1889  for (i = 0; i < 64; i++) {
1890  int j = s->idsp.idct_permutation[i];
1892  s->intra_matrix[j] = v;
1893  s->chroma_intra_matrix[j] = v;
1894 
1896  s->inter_matrix[j] = v;
1897  s->chroma_inter_matrix[j] = v;
1898  }
1899 
1900  s->progressive_sequence = 1;
1901  s->progressive_frame = 1;
1902  s->picture_structure = PICT_FRAME;
1903  s->first_field = 0;
1904  s->frame_pred_frame_dct = 1;
1905  s->chroma_format = 1;
1906  if (s->codec_tag == AV_RL32("BW10")) {
1907  s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG1VIDEO;
1908  } else {
1909  s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG2VIDEO;
1910  }
1911  s1->save_width = s->width;
1912  s1->save_height = s->height;
1913  s1->save_progressive_seq = s->progressive_sequence;
1914  return 0;
1915 }
1916 
1918  const char *label)
1919 {
1920  Mpeg1Context *s1 = avctx->priv_data;
1921 
1923 
1924  if (!s1->cc_format) {
1925  s1->cc_format = format;
1926 
1927  av_log(avctx, AV_LOG_DEBUG, "CC: first seen substream is %s format\n", label);
1928  }
1929 }
1930 
1932  const uint8_t *p, int buf_size)
1933 {
1934  Mpeg1Context *s1 = avctx->priv_data;
1935 
1936  if ((!s1->cc_format || s1->cc_format == CC_FORMAT_A53_PART4) &&
1937  buf_size >= 6 &&
1938  p[0] == 'G' && p[1] == 'A' && p[2] == '9' && p[3] == '4' &&
1939  p[4] == 3 && (p[5] & 0x40)) {
1940  /* extract A53 Part 4 CC data */
1941  int cc_count = p[5] & 0x1f;
1942  if (cc_count > 0 && buf_size >= 7 + cc_count * 3) {
1943  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
1944  const uint64_t new_size = (old_size + cc_count
1945  * UINT64_C(3));
1946  int ret;
1947 
1948  if (new_size > 3*A53_MAX_CC_COUNT)
1949  return AVERROR(EINVAL);
1950 
1951  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
1952  if (ret >= 0)
1953  memcpy(s1->a53_buf_ref->data + old_size, p + 7, cc_count * UINT64_C(3));
1954 
1956  mpeg_set_cc_format(avctx, CC_FORMAT_A53_PART4, "A/53 Part 4");
1957  }
1958  return 1;
1959  } else if ((!s1->cc_format || s1->cc_format == CC_FORMAT_SCTE20) &&
1960  buf_size >= 2 &&
1961  p[0] == 0x03 && (p[1]&0x7f) == 0x01) {
1962  /* extract SCTE-20 CC data */
1963  GetBitContext gb;
1964  int cc_count = 0;
1965  int i, ret;
1966 
1967  ret = init_get_bits8(&gb, p + 2, buf_size - 2);
1968  if (ret < 0)
1969  return ret;
1970  cc_count = get_bits(&gb, 5);
1971  if (cc_count > 0) {
1972  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
1973  const uint64_t new_size = (old_size + cc_count
1974  * UINT64_C(3));
1975  if (new_size > 3*A53_MAX_CC_COUNT)
1976  return AVERROR(EINVAL);
1977 
1978  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
1979  if (ret >= 0) {
1980  uint8_t field, cc1, cc2;
1981  uint8_t *cap = s1->a53_buf_ref->data;
1982 
1983  memset(s1->a53_buf_ref->data + old_size, 0, cc_count * 3);
1984  for (i = 0; i < cc_count && get_bits_left(&gb) >= 26; i++) {
1985  skip_bits(&gb, 2); // priority
1986  field = get_bits(&gb, 2);
1987  skip_bits(&gb, 5); // line_offset
1988  cc1 = get_bits(&gb, 8);
1989  cc2 = get_bits(&gb, 8);
1990  skip_bits(&gb, 1); // marker
1991 
1992  if (!field) { // forbidden
1993  cap[0] = cap[1] = cap[2] = 0x00;
1994  } else {
1995  field = (field == 2 ? 1 : 0);
1996  if (!s1->mpeg_enc_ctx.top_field_first) field = !field;
1997  cap[0] = 0x04 | field;
1998  cap[1] = ff_reverse[cc1];
1999  cap[2] = ff_reverse[cc2];
2000  }
2001  cap += 3;
2002  }
2003  }
2004 
2006  mpeg_set_cc_format(avctx, CC_FORMAT_SCTE20, "SCTE-20");
2007  }
2008  return 1;
2009  } else if ((!s1->cc_format || s1->cc_format == CC_FORMAT_DVD) &&
2010  buf_size >= 11 &&
2011  p[0] == 'C' && p[1] == 'C' && p[2] == 0x01 && p[3] == 0xf8) {
2012  /* extract DVD CC data
2013  *
2014  * uint32_t user_data_start_code 0x000001B2 (big endian)
2015  * uint16_t user_identifier 0x4343 "CC"
2016  * uint8_t user_data_type_code 0x01
2017  * uint8_t caption_block_size 0xF8
2018  * uint8_t
2019  * bit 7 caption_odd_field_first 1=odd field (CC1/CC2) first 0=even field (CC3/CC4) first
2020  * bit 6 caption_filler 0
2021  * bit 5:1 caption_block_count number of caption blocks (pairs of caption words = frames). Most DVDs use 15 per start of GOP.
2022  * bit 0 caption_extra_field_added 1=one additional caption word
2023  *
2024  * struct caption_field_block {
2025  * uint8_t
2026  * bit 7:1 caption_filler 0x7F (all 1s)
2027  * bit 0 caption_field_odd 1=odd field (this is CC1/CC2) 0=even field (this is CC3/CC4)
2028  * uint8_t caption_first_byte
2029  * uint8_t caption_second_byte
2030  * } caption_block[(caption_block_count * 2) + caption_extra_field_added];
2031  *
2032  * Some DVDs encode caption data for both fields with caption_field_odd=1. The only way to decode the fields
2033  * correctly is to start on the field indicated by caption_odd_field_first and count between odd/even fields.
2034  * Don't assume that the first caption word is the odd field. There do exist MPEG files in the wild that start
2035  * on the even field. There also exist DVDs in the wild that encode an odd field count and the
2036  * caption_extra_field_added/caption_odd_field_first bits change per packet to allow that. */
2037  int cc_count = 0;
2038  int i, ret;
2039  // There is a caption count field in the data, but it is often
2040  // incorrect. So count the number of captions present.
2041  for (i = 5; i + 6 <= buf_size && ((p[i] & 0xfe) == 0xfe); i += 6)
2042  cc_count++;
2043  // Transform the DVD format into A53 Part 4 format
2044  if (cc_count > 0) {
2045  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
2046  const uint64_t new_size = (old_size + cc_count
2047  * UINT64_C(6));
2048  if (new_size > 3*A53_MAX_CC_COUNT)
2049  return AVERROR(EINVAL);
2050 
2051  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
2052  if (ret >= 0) {
2053  uint8_t field1 = !!(p[4] & 0x80);
2054  uint8_t *cap = s1->a53_buf_ref->data;
2055  p += 5;
2056  for (i = 0; i < cc_count; i++) {
2057  cap[0] = (p[0] == 0xff && field1) ? 0xfc : 0xfd;
2058  cap[1] = p[1];
2059  cap[2] = p[2];
2060  cap[3] = (p[3] == 0xff && !field1) ? 0xfc : 0xfd;
2061  cap[4] = p[4];
2062  cap[5] = p[5];
2063  cap += 6;
2064  p += 6;
2065  }
2066  }
2067 
2069  mpeg_set_cc_format(avctx, CC_FORMAT_DVD, "DVD");
2070  }
2071  return 1;
2072  }
2073  return 0;
2074 }
2075 
2077  const uint8_t *p, int buf_size)
2078 {
2079  Mpeg1Context *s = avctx->priv_data;
2080  const uint8_t *buf_end = p + buf_size;
2081  Mpeg1Context *s1 = avctx->priv_data;
2082 
2083 #if 0
2084  int i;
2085  for(i=0; !(!p[i-2] && !p[i-1] && p[i]==1) && i<buf_size; i++){
2086  av_log(avctx, AV_LOG_ERROR, "%c", p[i]);
2087  }
2088  av_log(avctx, AV_LOG_ERROR, "\n");
2089 #endif
2090 
2091  if (buf_size > 29){
2092  int i;
2093  for(i=0; i<20; i++)
2094  if (!memcmp(p+i, "\0TMPGEXS\0", 9)){
2095  s->tmpgexs= 1;
2096  }
2097  }
2098  /* we parse the DTG active format information */
2099  if (buf_end - p >= 5 &&
2100  p[0] == 'D' && p[1] == 'T' && p[2] == 'G' && p[3] == '1') {
2101  int flags = p[4];
2102  p += 5;
2103  if (flags & 0x80) {
2104  /* skip event id */
2105  p += 2;
2106  }
2107  if (flags & 0x40) {
2108  if (buf_end - p < 1)
2109  return;
2110  s1->has_afd = 1;
2111  s1->afd = p[0] & 0x0f;
2112  }
2113  } else if (buf_end - p >= 6 &&
2114  p[0] == 'J' && p[1] == 'P' && p[2] == '3' && p[3] == 'D' &&
2115  p[4] == 0x03) { // S3D_video_format_length
2116  // the 0x7F mask ignores the reserved_bit value
2117  const uint8_t S3D_video_format_type = p[5] & 0x7F;
2118 
2119  if (S3D_video_format_type == 0x03 ||
2120  S3D_video_format_type == 0x04 ||
2121  S3D_video_format_type == 0x08 ||
2122  S3D_video_format_type == 0x23) {
2123 
2124  s1->has_stereo3d = 1;
2125 
2126  switch (S3D_video_format_type) {
2127  case 0x03:
2128  s1->stereo3d.type = AV_STEREO3D_SIDEBYSIDE;
2129  break;
2130  case 0x04:
2131  s1->stereo3d.type = AV_STEREO3D_TOPBOTTOM;
2132  break;
2133  case 0x08:
2134  s1->stereo3d.type = AV_STEREO3D_2D;
2135  break;
2136  case 0x23:
2137  s1->stereo3d.type = AV_STEREO3D_SIDEBYSIDE_QUINCUNX;
2138  break;
2139  }
2140  }
2141  } else if (mpeg_decode_a53_cc(avctx, p, buf_size)) {
2142  return;
2143  }
2144 }
2145 
2147  const uint8_t *buf, int buf_size)
2148 {
2149  Mpeg1Context *s1 = avctx->priv_data;
2150  MpegEncContext *s = &s1->mpeg_enc_ctx;
2151  int broken_link;
2152  int64_t tc;
2153 
2154  int ret = init_get_bits8(&s->gb, buf, buf_size);
2155  if (ret < 0)
2156  return ret;
2157 
2158  tc = s1->timecode_frame_start = get_bits(&s->gb, 25);
2159 
2160  s1->closed_gop = get_bits1(&s->gb);
2161  /* broken_link indicates that after editing the
2162  * reference frames of the first B-Frames after GOP I-Frame
2163  * are missing (open gop) */
2164  broken_link = get_bits1(&s->gb);
2165 
2166  if (s->avctx->debug & FF_DEBUG_PICT_INFO) {
2167  char tcbuf[AV_TIMECODE_STR_SIZE];
2169  av_log(s->avctx, AV_LOG_DEBUG,
2170  "GOP (%s) closed_gop=%d broken_link=%d\n",
2171  tcbuf, s1->closed_gop, broken_link);
2172  }
2173 
2174  return 0;
2175 }
2176 
2177 static int decode_chunks(AVCodecContext *avctx, AVFrame *picture,
2178  int *got_output, const uint8_t *buf, int buf_size)
2179 {
2180  Mpeg1Context *s = avctx->priv_data;
2181  MpegEncContext *s2 = &s->mpeg_enc_ctx;
2182  const uint8_t *buf_ptr = buf;
2183  const uint8_t *buf_end = buf + buf_size;
2184  int ret, input_size;
2185  int last_code = 0, skip_frame = 0;
2186  int picture_start_code_seen = 0;
2187 
2188  for (;;) {
2189  /* find next start code */
2190  uint32_t start_code = -1;
2191  buf_ptr = avpriv_find_start_code(buf_ptr, buf_end, &start_code);
2192  if (start_code > 0x1ff) {
2193  if (!skip_frame) {
2194  if (HAVE_THREADS &&
2195  (avctx->active_thread_type & FF_THREAD_SLICE) &&
2196  !avctx->hwaccel) {
2197  int i;
2198  av_assert0(avctx->thread_count > 1);
2199 
2200  avctx->execute(avctx, slice_decode_thread,
2201  &s2->thread_context[0], NULL,
2202  s->slice_count, sizeof(void *));
2203  for (i = 0; i < s->slice_count; i++)
2204  s2->er.error_count += s2->thread_context[i]->er.error_count;
2205  }
2206 
2207  ret = slice_end(avctx, picture);
2208  if (ret < 0)
2209  return ret;
2210  else if (ret) {
2211  // FIXME: merge with the stuff in mpeg_decode_slice
2212  if (s2->last_picture_ptr || s2->low_delay || s2->pict_type == AV_PICTURE_TYPE_B)
2213  *got_output = 1;
2214  }
2215  }
2216  s2->pict_type = 0;
2217 
2218  if (avctx->err_recognition & AV_EF_EXPLODE && s2->er.error_count)
2219  return AVERROR_INVALIDDATA;
2220 
2221  return FFMAX(0, buf_ptr - buf);
2222  }
2223 
2224  input_size = buf_end - buf_ptr;
2225 
2226  if (avctx->debug & FF_DEBUG_STARTCODE)
2227  av_log(avctx, AV_LOG_DEBUG, "%3"PRIX32" at %"PTRDIFF_SPECIFIER" left %d\n",
2228  start_code, buf_ptr - buf, input_size);
2229 
2230  /* prepare data for next start code */
2231  switch (start_code) {
2232  case SEQ_START_CODE:
2233  if (last_code == 0) {
2234  mpeg1_decode_sequence(avctx, buf_ptr, input_size);
2235  if (buf != avctx->extradata)
2236  s->sync = 1;
2237  } else {
2238  av_log(avctx, AV_LOG_ERROR,
2239  "ignoring SEQ_START_CODE after %X\n", last_code);
2240  if (avctx->err_recognition & AV_EF_EXPLODE)
2241  return AVERROR_INVALIDDATA;
2242  }
2243  break;
2244 
2245  case PICTURE_START_CODE:
2246  if (picture_start_code_seen && s2->picture_structure == PICT_FRAME) {
2247  /* If it's a frame picture, there can't be more than one picture header.
2248  Yet, it does happen and we need to handle it. */
2249  av_log(avctx, AV_LOG_WARNING, "ignoring extra picture following a frame-picture\n");
2250  break;
2251  }
2252  picture_start_code_seen = 1;
2253 
2254  if (buf == avctx->extradata && avctx->codec_tag == AV_RL32("AVmp")) {
2255  av_log(avctx, AV_LOG_WARNING, "ignoring picture start code in AVmp extradata\n");
2256  break;
2257  }
2258 
2259  if (s2->width <= 0 || s2->height <= 0) {
2260  av_log(avctx, AV_LOG_ERROR, "Invalid frame dimensions %dx%d.\n",
2261  s2->width, s2->height);
2262  return AVERROR_INVALIDDATA;
2263  }
2264 
2265  if (s->tmpgexs){
2266  s2->intra_dc_precision= 3;
2267  s2->intra_matrix[0]= 1;
2268  }
2269  if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE) &&
2270  !avctx->hwaccel && s->slice_count) {
2271  int i;
2272 
2273  avctx->execute(avctx, slice_decode_thread,
2274  s2->thread_context, NULL,
2275  s->slice_count, sizeof(void *));
2276  for (i = 0; i < s->slice_count; i++)
2277  s2->er.error_count += s2->thread_context[i]->er.error_count;
2278  s->slice_count = 0;
2279  }
2280  if (last_code == 0 || last_code == SLICE_MIN_START_CODE) {
2281  ret = mpeg_decode_postinit(avctx);
2282  if (ret < 0) {
2283  av_log(avctx, AV_LOG_ERROR,
2284  "mpeg_decode_postinit() failure\n");
2285  return ret;
2286  }
2287 
2288  /* We have a complete image: we try to decompress it. */
2289  if (mpeg1_decode_picture(avctx, buf_ptr, input_size) < 0)
2290  s2->pict_type = 0;
2291  s->first_slice = 1;
2292  last_code = PICTURE_START_CODE;
2293  } else {
2294  av_log(avctx, AV_LOG_ERROR,
2295  "ignoring pic after %X\n", last_code);
2296  if (avctx->err_recognition & AV_EF_EXPLODE)
2297  return AVERROR_INVALIDDATA;
2298  }
2299  break;
2300  case EXT_START_CODE:
2301  ret = init_get_bits8(&s2->gb, buf_ptr, input_size);
2302  if (ret < 0)
2303  return ret;
2304 
2305  switch (get_bits(&s2->gb, 4)) {
2306  case 0x1:
2307  if (last_code == 0) {
2309  } else {
2310  av_log(avctx, AV_LOG_ERROR,
2311  "ignoring seq ext after %X\n", last_code);
2312  if (avctx->err_recognition & AV_EF_EXPLODE)
2313  return AVERROR_INVALIDDATA;
2314  }
2315  break;
2316  case 0x2:
2318  break;
2319  case 0x3:
2321  break;
2322  case 0x7:
2324  break;
2325  case 0x8:
2326  if (last_code == PICTURE_START_CODE) {
2328  if (ret < 0)
2329  return ret;
2330  } else {
2331  av_log(avctx, AV_LOG_ERROR,
2332  "ignoring pic cod ext after %X\n", last_code);
2333  if (avctx->err_recognition & AV_EF_EXPLODE)
2334  return AVERROR_INVALIDDATA;
2335  }
2336  break;
2337  }
2338  break;
2339  case USER_START_CODE:
2340  mpeg_decode_user_data(avctx, buf_ptr, input_size);
2341  break;
2342  case GOP_START_CODE:
2343  if (last_code == 0) {
2344  s2->first_field = 0;
2345  ret = mpeg_decode_gop(avctx, buf_ptr, input_size);
2346  if (ret < 0)
2347  return ret;
2348  s->sync = 1;
2349  } else {
2350  av_log(avctx, AV_LOG_ERROR,
2351  "ignoring GOP_START_CODE after %X\n", last_code);
2352  if (avctx->err_recognition & AV_EF_EXPLODE)
2353  return AVERROR_INVALIDDATA;
2354  }
2355  break;
2356  default:
2358  start_code <= SLICE_MAX_START_CODE && last_code == PICTURE_START_CODE) {
2359  if (s2->progressive_sequence && !s2->progressive_frame) {
2360  s2->progressive_frame = 1;
2361  av_log(s2->avctx, AV_LOG_ERROR,
2362  "interlaced frame in progressive sequence, ignoring\n");
2363  }
2364 
2365  if (s2->picture_structure == 0 ||
2366  (s2->progressive_frame && s2->picture_structure != PICT_FRAME)) {
2367  av_log(s2->avctx, AV_LOG_ERROR,
2368  "picture_structure %d invalid, ignoring\n",
2369  s2->picture_structure);
2370  s2->picture_structure = PICT_FRAME;
2371  }
2372 
2373  if (s2->progressive_sequence && !s2->frame_pred_frame_dct)
2374  av_log(s2->avctx, AV_LOG_WARNING, "invalid frame_pred_frame_dct\n");
2375 
2376  if (s2->picture_structure == PICT_FRAME) {
2377  s2->first_field = 0;
2378  s2->v_edge_pos = 16 * s2->mb_height;
2379  } else {
2380  s2->first_field ^= 1;
2381  s2->v_edge_pos = 8 * s2->mb_height;
2382  memset(s2->mbskip_table, 0, s2->mb_stride * s2->mb_height);
2383  }
2384  }
2386  start_code <= SLICE_MAX_START_CODE && last_code != 0) {
2387  const int field_pic = s2->picture_structure != PICT_FRAME;
2388  int mb_y = start_code - SLICE_MIN_START_CODE;
2389  last_code = SLICE_MIN_START_CODE;
2390  if (s2->codec_id != AV_CODEC_ID_MPEG1VIDEO && s2->mb_height > 2800/16)
2391  mb_y += (*buf_ptr&0xE0)<<2;
2392 
2393  mb_y <<= field_pic;
2394  if (s2->picture_structure == PICT_BOTTOM_FIELD)
2395  mb_y++;
2396 
2397  if (buf_end - buf_ptr < 2) {
2398  av_log(s2->avctx, AV_LOG_ERROR, "slice too small\n");
2399  return AVERROR_INVALIDDATA;
2400  }
2401 
2402  if (mb_y >= s2->mb_height) {
2403  av_log(s2->avctx, AV_LOG_ERROR,
2404  "slice below image (%d >= %d)\n", mb_y, s2->mb_height);
2405  return AVERROR_INVALIDDATA;
2406  }
2407 
2408  if (!s2->last_picture_ptr) {
2409  /* Skip B-frames if we do not have reference frames and
2410  * GOP is not closed. */
2411  if (s2->pict_type == AV_PICTURE_TYPE_B) {
2412  if (!s->closed_gop) {
2413  skip_frame = 1;
2414  av_log(s2->avctx, AV_LOG_DEBUG,
2415  "Skipping B slice due to open GOP\n");
2416  break;
2417  }
2418  }
2419  }
2420  if (s2->pict_type == AV_PICTURE_TYPE_I || (s2->avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL))
2421  s->sync = 1;
2422  if (!s2->next_picture_ptr) {
2423  /* Skip P-frames if we do not have a reference frame or
2424  * we have an invalid header. */
2425  if (s2->pict_type == AV_PICTURE_TYPE_P && !s->sync) {
2426  skip_frame = 1;
2427  av_log(s2->avctx, AV_LOG_DEBUG,
2428  "Skipping P slice due to !sync\n");
2429  break;
2430  }
2431  }
2432  if ((avctx->skip_frame >= AVDISCARD_NONREF &&
2433  s2->pict_type == AV_PICTURE_TYPE_B) ||
2434  (avctx->skip_frame >= AVDISCARD_NONKEY &&
2435  s2->pict_type != AV_PICTURE_TYPE_I) ||
2436  avctx->skip_frame >= AVDISCARD_ALL) {
2437  skip_frame = 1;
2438  break;
2439  }
2440 
2441  if (!s2->context_initialized)
2442  break;
2443 
2444  if (s2->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
2445  if (mb_y < avctx->skip_top ||
2446  mb_y >= s2->mb_height - avctx->skip_bottom)
2447  break;
2448  }
2449 
2450  if (!s2->pict_type) {
2451  av_log(avctx, AV_LOG_ERROR, "Missing picture start code\n");
2452  if (avctx->err_recognition & AV_EF_EXPLODE)
2453  return AVERROR_INVALIDDATA;
2454  break;
2455  }
2456 
2457  if (s->first_slice) {
2458  skip_frame = 0;
2459  s->first_slice = 0;
2460  if ((ret = mpeg_field_start(s2, buf, buf_size)) < 0)
2461  return ret;
2462  }
2463  if (!s2->current_picture_ptr) {
2464  av_log(avctx, AV_LOG_ERROR,
2465  "current_picture not initialized\n");
2466  return AVERROR_INVALIDDATA;
2467  }
2468 
2469  if (HAVE_THREADS &&
2470  (avctx->active_thread_type & FF_THREAD_SLICE) &&
2471  !avctx->hwaccel) {
2472  int threshold = (s2->mb_height * s->slice_count +
2473  s2->slice_context_count / 2) /
2474  s2->slice_context_count;
2475  av_assert0(avctx->thread_count > 1);
2476  if (threshold <= mb_y) {
2477  MpegEncContext *thread_context = s2->thread_context[s->slice_count];
2478 
2479  thread_context->start_mb_y = mb_y;
2480  thread_context->end_mb_y = s2->mb_height;
2481  if (s->slice_count) {
2482  s2->thread_context[s->slice_count - 1]->end_mb_y = mb_y;
2483  ret = ff_update_duplicate_context(thread_context, s2);
2484  if (ret < 0)
2485  return ret;
2486  }
2487  ret = init_get_bits8(&thread_context->gb, buf_ptr, input_size);
2488  if (ret < 0)
2489  return ret;
2490  s->slice_count++;
2491  }
2492  buf_ptr += 2; // FIXME add minimum number of bytes per slice
2493  } else {
2494  ret = mpeg_decode_slice(s2, mb_y, &buf_ptr, input_size);
2495  emms_c();
2496 
2497  if (ret < 0) {
2498  if (avctx->err_recognition & AV_EF_EXPLODE)
2499  return ret;
2500  if (s2->resync_mb_x >= 0 && s2->resync_mb_y >= 0)
2501  ff_er_add_slice(&s2->er, s2->resync_mb_x,
2502  s2->resync_mb_y, s2->mb_x, s2->mb_y,
2504  } else {
2505  ff_er_add_slice(&s2->er, s2->resync_mb_x,
2506  s2->resync_mb_y, s2->mb_x - 1, s2->mb_y,
2508  }
2509  }
2510  }
2511  break;
2512  }
2513  }
2514 }
2515 
2516 static int mpeg_decode_frame(AVCodecContext *avctx, AVFrame *picture,
2517  int *got_output, AVPacket *avpkt)
2518 {
2519  const uint8_t *buf = avpkt->data;
2520  int ret;
2521  int buf_size = avpkt->size;
2522  Mpeg1Context *s = avctx->priv_data;
2523  MpegEncContext *s2 = &s->mpeg_enc_ctx;
2524 
2525  if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == SEQ_END_CODE)) {
2526  /* special case for last picture */
2527  if (s2->low_delay == 0 && s2->next_picture_ptr) {
2528  int ret = av_frame_ref(picture, s2->next_picture_ptr->f);
2529  if (ret < 0)
2530  return ret;
2531 
2532  s2->next_picture_ptr = NULL;
2533 
2534  *got_output = 1;
2535  }
2536  return buf_size;
2537  }
2538 
2539  if (!s2->context_initialized &&
2540  (s2->codec_tag == AV_RL32("VCR2") || s2->codec_tag == AV_RL32("BW10")))
2541  vcr2_init_sequence(avctx);
2542 
2543  s->slice_count = 0;
2544 
2545  if (avctx->extradata && !s->extradata_decoded) {
2546  ret = decode_chunks(avctx, picture, got_output,
2547  avctx->extradata, avctx->extradata_size);
2548  if (*got_output) {
2549  av_log(avctx, AV_LOG_ERROR, "picture in extradata\n");
2550  av_frame_unref(picture);
2551  *got_output = 0;
2552  }
2553  s->extradata_decoded = 1;
2554  if (ret < 0 && (avctx->err_recognition & AV_EF_EXPLODE)) {
2555  s2->current_picture_ptr = NULL;
2556  return ret;
2557  }
2558  }
2559 
2560  ret = decode_chunks(avctx, picture, got_output, buf, buf_size);
2561  if (ret<0 || *got_output) {
2562  s2->current_picture_ptr = NULL;
2563 
2564  if (s->timecode_frame_start != -1 && *got_output) {
2565  char tcbuf[AV_TIMECODE_STR_SIZE];
2566  AVFrameSideData *tcside = av_frame_new_side_data(picture,
2568  sizeof(int64_t));
2569  if (!tcside)
2570  return AVERROR(ENOMEM);
2571  memcpy(tcside->data, &s->timecode_frame_start, sizeof(int64_t));
2572 
2573  av_timecode_make_mpeg_tc_string(tcbuf, s->timecode_frame_start);
2574  av_dict_set(&picture->metadata, "timecode", tcbuf, 0);
2575 
2576  s->timecode_frame_start = -1;
2577  }
2578  }
2579 
2580  return ret;
2581 }
2582 
2583 static void flush(AVCodecContext *avctx)
2584 {
2585  Mpeg1Context *s = avctx->priv_data;
2586 
2587  s->sync = 0;
2588  s->closed_gop = 0;
2589 
2590  av_buffer_unref(&s->a53_buf_ref);
2591  ff_mpeg_flush(avctx);
2592 }
2593 
2595 {
2596  Mpeg1Context *s = avctx->priv_data;
2597 
2598  ff_mpv_common_end(&s->mpeg_enc_ctx);
2599  av_buffer_unref(&s->a53_buf_ref);
2600  return 0;
2601 }
2602 
2604  .p.name = "mpeg1video",
2605  CODEC_LONG_NAME("MPEG-1 video"),
2606  .p.type = AVMEDIA_TYPE_VIDEO,
2607  .p.id = AV_CODEC_ID_MPEG1VIDEO,
2608  .priv_data_size = sizeof(Mpeg1Context),
2610  .close = mpeg_decode_end,
2612  .p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
2614  .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
2615  .flush = flush,
2616  .p.max_lowres = 3,
2617  UPDATE_THREAD_CONTEXT(mpeg_decode_update_thread_context),
2618  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2619 #if CONFIG_MPEG1_NVDEC_HWACCEL
2620  HWACCEL_NVDEC(mpeg1),
2621 #endif
2622 #if CONFIG_MPEG1_VDPAU_HWACCEL
2623  HWACCEL_VDPAU(mpeg1),
2624 #endif
2625 #if CONFIG_MPEG1_VIDEOTOOLBOX_HWACCEL
2626  HWACCEL_VIDEOTOOLBOX(mpeg1),
2627 #endif
2628  NULL
2629  },
2630 };
2631 
2632 #define M2V_OFFSET(x) offsetof(Mpeg1Context, x)
2633 #define M2V_PARAM AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
2634 
2635 static const AVOption mpeg2video_options[] = {
2636  { "cc_format", "extract a specific Closed Captions format",
2637  M2V_OFFSET(cc_format), AV_OPT_TYPE_INT, { .i64 = CC_FORMAT_AUTO },
2638  CC_FORMAT_AUTO, CC_FORMAT_DVD, M2V_PARAM, .unit = "cc_format" },
2639 
2640  { "auto", "pick first seen CC substream", 0, AV_OPT_TYPE_CONST,
2641  { .i64 = CC_FORMAT_AUTO }, .flags = M2V_PARAM, .unit = "cc_format" },
2642  { "a53", "pick A/53 Part 4 CC substream", 0, AV_OPT_TYPE_CONST,
2643  { .i64 = CC_FORMAT_A53_PART4 }, .flags = M2V_PARAM, .unit = "cc_format" },
2644  { "scte20", "pick SCTE-20 CC substream", 0, AV_OPT_TYPE_CONST,
2645  { .i64 = CC_FORMAT_SCTE20 }, .flags = M2V_PARAM, .unit = "cc_format" },
2646  { "dvd", "pick DVD CC substream", 0, AV_OPT_TYPE_CONST,
2647  { .i64 = CC_FORMAT_DVD }, .flags = M2V_PARAM, .unit = "cc_format" },
2648  { NULL }
2649 };
2650 
2651 static const AVClass mpeg2video_class = {
2652  .class_name = "MPEG-2 video",
2653  .item_name = av_default_item_name,
2654  .option = mpeg2video_options,
2655  .version = LIBAVUTIL_VERSION_INT,
2656  .category = AV_CLASS_CATEGORY_DECODER,
2657 };
2658 
2660  .p.name = "mpeg2video",
2661  CODEC_LONG_NAME("MPEG-2 video"),
2662  .p.type = AVMEDIA_TYPE_VIDEO,
2663  .p.id = AV_CODEC_ID_MPEG2VIDEO,
2664  .p.priv_class = &mpeg2video_class,
2665  .priv_data_size = sizeof(Mpeg1Context),
2667  .close = mpeg_decode_end,
2669  .p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
2671  .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
2672  .flush = flush,
2673  .p.max_lowres = 3,
2675  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2676 #if CONFIG_MPEG2_DXVA2_HWACCEL
2677  HWACCEL_DXVA2(mpeg2),
2678 #endif
2679 #if CONFIG_MPEG2_D3D11VA_HWACCEL
2680  HWACCEL_D3D11VA(mpeg2),
2681 #endif
2682 #if CONFIG_MPEG2_D3D11VA2_HWACCEL
2683  HWACCEL_D3D11VA2(mpeg2),
2684 #endif
2685 #if CONFIG_MPEG2_D3D12VA_HWACCEL
2686  HWACCEL_D3D12VA(mpeg2),
2687 #endif
2688 #if CONFIG_MPEG2_NVDEC_HWACCEL
2689  HWACCEL_NVDEC(mpeg2),
2690 #endif
2691 #if CONFIG_MPEG2_VAAPI_HWACCEL
2692  HWACCEL_VAAPI(mpeg2),
2693 #endif
2694 #if CONFIG_MPEG2_VDPAU_HWACCEL
2695  HWACCEL_VDPAU(mpeg2),
2696 #endif
2697 #if CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL
2698  HWACCEL_VIDEOTOOLBOX(mpeg2),
2699 #endif
2700  NULL
2701  },
2702 };
2703 
2704 //legacy decoder
2706  .p.name = "mpegvideo",
2707  CODEC_LONG_NAME("MPEG-1 video"),
2708  .p.type = AVMEDIA_TYPE_VIDEO,
2709  .p.id = AV_CODEC_ID_MPEG2VIDEO,
2710  .priv_data_size = sizeof(Mpeg1Context),
2712  .close = mpeg_decode_end,
2714  .p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
2716  .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
2717  .flush = flush,
2718  .p.max_lowres = 3,
2719 };
2720 
2721 typedef struct IPUContext {
2723 
2724  int flags;
2725  DECLARE_ALIGNED(32, int16_t, block)[6][64];
2726 } IPUContext;
2727 
2729  int *got_frame, AVPacket *avpkt)
2730 {
2731  IPUContext *s = avctx->priv_data;
2732  MpegEncContext *m = &s->m;
2733  GetBitContext *gb = &m->gb;
2734  int ret;
2735 
2736  // Check for minimal intra MB size (considering mb header, luma & chroma dc VLC, ac EOB VLC)
2737  if (avpkt->size*8LL < (avctx->width+15)/16 * ((avctx->height+15)/16) * (2 + 3*4 + 2*2 + 2*6))
2738  return AVERROR_INVALIDDATA;
2739 
2740  ret = ff_get_buffer(avctx, frame, 0);
2741  if (ret < 0)
2742  return ret;
2743 
2744  ret = init_get_bits8(gb, avpkt->data, avpkt->size);
2745  if (ret < 0)
2746  return ret;
2747 
2748  s->flags = get_bits(gb, 8);
2749  m->intra_dc_precision = s->flags & 3;
2750  m->q_scale_type = !!(s->flags & 0x40);
2751  m->intra_vlc_format = !!(s->flags & 0x20);
2752  m->alternate_scan = !!(s->flags & 0x10);
2753 
2754  if (s->flags & 0x10) {
2757  } else {
2760  }
2761 
2762  m->last_dc[0] = m->last_dc[1] = m->last_dc[2] = 1 << (7 + (s->flags & 3));
2763  m->qscale = 1;
2764 
2765  for (int y = 0; y < avctx->height; y += 16) {
2766  int intraquant;
2767 
2768  for (int x = 0; x < avctx->width; x += 16) {
2769  if (x || y) {
2770  if (!get_bits1(gb))
2771  return AVERROR_INVALIDDATA;
2772  }
2773  if (get_bits1(gb)) {
2774  intraquant = 0;
2775  } else {
2776  if (!get_bits1(gb))
2777  return AVERROR_INVALIDDATA;
2778  intraquant = 1;
2779  }
2780 
2781  if (s->flags & 4)
2782  skip_bits1(gb);
2783 
2784  if (intraquant)
2785  m->qscale = mpeg_get_qscale(m);
2786 
2787  memset(s->block, 0, sizeof(s->block));
2788 
2789  for (int n = 0; n < 6; n++) {
2790  if (s->flags & 0x80) {
2792  m->intra_matrix,
2794  m->last_dc, s->block[n],
2795  n, m->qscale);
2796  if (ret >= 0)
2797  m->block_last_index[n] = ret;
2798  } else {
2799  ret = mpeg2_decode_block_intra(m, s->block[n], n);
2800  }
2801 
2802  if (ret < 0)
2803  return ret;
2804  }
2805 
2806  m->idsp.idct_put(frame->data[0] + y * frame->linesize[0] + x,
2807  frame->linesize[0], s->block[0]);
2808  m->idsp.idct_put(frame->data[0] + y * frame->linesize[0] + x + 8,
2809  frame->linesize[0], s->block[1]);
2810  m->idsp.idct_put(frame->data[0] + (y + 8) * frame->linesize[0] + x,
2811  frame->linesize[0], s->block[2]);
2812  m->idsp.idct_put(frame->data[0] + (y + 8) * frame->linesize[0] + x + 8,
2813  frame->linesize[0], s->block[3]);
2814  m->idsp.idct_put(frame->data[1] + (y >> 1) * frame->linesize[1] + (x >> 1),
2815  frame->linesize[1], s->block[4]);
2816  m->idsp.idct_put(frame->data[2] + (y >> 1) * frame->linesize[2] + (x >> 1),
2817  frame->linesize[2], s->block[5]);
2818  }
2819  }
2820 
2821  align_get_bits(gb);
2822  if (get_bits_left(gb) != 32)
2823  return AVERROR_INVALIDDATA;
2824 
2825  frame->pict_type = AV_PICTURE_TYPE_I;
2826  frame->flags |= AV_FRAME_FLAG_KEY;
2827  *got_frame = 1;
2828 
2829  return avpkt->size;
2830 }
2831 
2833 {
2834  IPUContext *s = avctx->priv_data;
2835  MpegEncContext *m = &s->m;
2836 
2837  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
2838 
2839  ff_mpv_decode_init(m, avctx);
2841 
2842  for (int i = 0; i < 64; i++) {
2843  int j = m->idsp.idct_permutation[i];
2845  m->intra_matrix[j] = v;
2846  m->chroma_intra_matrix[j] = v;
2847  }
2848 
2849  for (int i = 0; i < 64; i++) {
2850  int j = m->idsp.idct_permutation[i];
2852  m->inter_matrix[j] = v;
2853  m->chroma_inter_matrix[j] = v;
2854  }
2855 
2856  return 0;
2857 }
2858 
2860  .p.name = "ipu",
2861  CODEC_LONG_NAME("IPU Video"),
2862  .p.type = AVMEDIA_TYPE_VIDEO,
2863  .p.id = AV_CODEC_ID_IPU,
2864  .priv_data_size = sizeof(IPUContext),
2865  .init = ipu_decode_init,
2867  .p.capabilities = AV_CODEC_CAP_DR1,
2868 };
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:33
vcr2_init_sequence
static int vcr2_init_sequence(AVCodecContext *avctx)
Definition: mpeg12dec.c:1868
HWACCEL_D3D12VA
#define HWACCEL_D3D12VA(codec)
Definition: hwconfig.h:80
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:682
hwconfig.h
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1427
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
MB_TYPE_L0
#define MB_TYPE_L0
Definition: mpegutils.h:55
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:267
Mpeg1Context::has_afd
int has_afd
Definition: mpeg12dec.c:81
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_TIMECODE_STR_SIZE
#define AV_TIMECODE_STR_SIZE
Definition: timecode.h:33
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:260
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
M2V_OFFSET
#define M2V_OFFSET(x)
Definition: mpeg12dec.c:2632
ff_mb_pat_vlc
VLCElem ff_mb_pat_vlc[512]
Definition: mpeg12.c:126
level
uint8_t level
Definition: svq3.c:205
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
Mpeg1Context::a53_buf_ref
AVBufferRef * a53_buf_ref
Definition: mpeg12dec.c:78
ff_mpeg2_aspect
const AVRational ff_mpeg2_aspect[16]
Definition: mpeg12data.c:380
show_bits_long
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
Definition: get_bits.h:495
mpeg_decode_a53_cc
static int mpeg_decode_a53_cc(AVCodecContext *avctx, const uint8_t *p, int buf_size)
Definition: mpeg12dec.c:1931
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:695
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
decode_slice
static int decode_slice(AVCodecContext *c, void *arg)
Definition: ffv1dec.c:255
ff_mpv_export_qp_table
int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const Picture *p, int qp_type)
Definition: mpegvideo_dec.c:467
AV_CLASS_CATEGORY_DECODER
@ AV_CLASS_CATEGORY_DECODER
Definition: log.h:35
AV_STEREO3D_SIDEBYSIDE_QUINCUNX
@ AV_STEREO3D_SIDEBYSIDE_QUINCUNX
Views are next to each other, but when upscaling apply a checkerboard pattern.
Definition: stereo3d.h:114
FF_MPV_QSCALE_TYPE_MPEG2
#define FF_MPV_QSCALE_TYPE_MPEG2
Definition: mpegvideodec.h:41
ff_frame_new_side_data_from_buf
int ff_frame_new_side_data_from_buf(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef **buf, AVFrameSideData **psd)
Similar to ff_frame_new_side_data, but using an existing buffer ref.
Definition: decode.c:1971
mem_internal.h
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1222
mpeg_decode_frame
static int mpeg_decode_frame(AVCodecContext *avctx, AVFrame *picture, int *got_output, AVPacket *avpkt)
Definition: mpeg12dec.c:2516
MpegEncContext::gb
GetBitContext gb
Definition: mpegvideo.h:431
AV_EF_COMPLIANT
#define AV_EF_COMPLIANT
consider all spec non compliances as errors
Definition: defs.h:55
SEQ_END_CODE
#define SEQ_END_CODE
Definition: mpeg12.h:28
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:795
check_scantable_index
#define check_scantable_index(ctx, x)
Definition: mpeg12dec.c:150
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:59
MT_FIELD
#define MT_FIELD
Definition: mpeg12dec.c:422
EXT_START_CODE
#define EXT_START_CODE
Definition: cavs.h:39
MV_TYPE_16X8
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:269
av_div_q
AVRational av_div_q(AVRational b, AVRational c)
Divide one rational by another.
Definition: rational.c:88
AVPanScan
Pan Scan area.
Definition: defs.h:240
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1420
SLICE_MAX_START_CODE
#define SLICE_MAX_START_CODE
Definition: cavs.h:38
MB_TYPE_16x8
#define MB_TYPE_16x8
Definition: mpegutils.h:43
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:266
ipu_decode_init
static av_cold int ipu_decode_init(AVCodecContext *avctx)
Definition: mpeg12dec.c:2832
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:491
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:221
ff_mpv_report_decode_progress
void ff_mpv_report_decode_progress(MpegEncContext *s)
Definition: mpegvideo_dec.c:526
w
uint8_t w
Definition: llviddspenc.c:38
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:64
ff_mpegvideo_decoder
const FFCodec ff_mpegvideo_decoder
Definition: mpeg12dec.c:2705
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:524
mpeg_decode_mb
static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpeg12dec.c:427
Mpeg1Context::closed_gop
int closed_gop
Definition: mpeg12dec.c:89
mpeg2_decode_block_intra
static int mpeg2_decode_block_intra(MpegEncContext *s, int16_t *block, int n)
Definition: mpeg12dec.c:328
AVOption
AVOption.
Definition: opt.h:346
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:66
ff_reverse
const uint8_t ff_reverse[256]
Definition: reverse.c:23
MpegEncContext::last_dc
int last_dc[3]
last DC values for MPEG-1
Definition: mpegvideo.h:181
MB_TYPE_16x16
#define MB_TYPE_16x16
Definition: mpegutils.h:42
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:254
FFCodec
Definition: codec_internal.h:126
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:32
FF_HW_SIMPLE_CALL
#define FF_HW_SIMPLE_CALL(avctx, function)
Definition: hwaccel_internal.h:174
ff_er_add_slice
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
Definition: error_resilience.c:821
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:843
reverse.h
mpegvideo.h
MB_TYPE_L1
#define MB_TYPE_L1
Definition: mpegutils.h:56
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:225
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Mpeg1Context::first_slice
int first_slice
Definition: mpeg12dec.c:91
ER_DC_END
#define ER_DC_END
Definition: error_resilience.h:34
mpeg_decode_postinit
static int mpeg_decode_postinit(AVCodecContext *avctx)
Definition: mpeg12dec.c:903
mpegutils.h
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:94
ER_MV_ERROR
#define ER_MV_ERROR
Definition: error_resilience.h:32
thread.h
SEQ_START_CODE
#define SEQ_START_CODE
Definition: mpeg12.h:29
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1397
MV_TYPE_DMV
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:271
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:638
GET_CACHE
#define GET_CACHE(name, gb)
Definition: get_bits.h:263
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:381
ff_mpeg2_rl_vlc
RL_VLC_ELEM ff_mpeg2_rl_vlc[674]
Definition: mpeg12.c:129
Mpeg1Context::save_aspect
AVRational save_aspect
Definition: mpeg12dec.c:84
MpegEncContext::intra_scantable
ScanTable intra_scantable
Definition: mpegvideo.h:87
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:560
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:64
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
MB_TYPE_ZERO_MV
#define MB_TYPE_ZERO_MV
Definition: mpeg12dec.c:96
MT_DMV
#define MT_DMV
Definition: mpeg12dec.c:425
ff_mpv_reconstruct_mb
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo_dec.c:970
ff_mbincr_vlc
VLCElem ff_mbincr_vlc[538]
Definition: mpeg12.c:123
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:130
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegvideo.h:63
decode_chunks
static int decode_chunks(AVCodecContext *avctx, AVFrame *picture, int *got_output, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:2177
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1819
mpeg_decode_quant_matrix_extension
static void mpeg_decode_quant_matrix_extension(MpegEncContext *s)
Definition: mpeg12dec.c:1201
AVCodecContext::thread_count
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:1582
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:52
wrap
#define wrap(func)
Definition: neontest.h:65
timecode.h
GetBitContext
Definition: get_bits.h:108
AV_EF_BITSTREAM
#define AV_EF_BITSTREAM
detect bitstream specification deviations
Definition: defs.h:49
USES_LIST
#define USES_LIST(a, list)
Definition: mpegutils.h:87
slice_decode_thread
static int slice_decode_thread(AVCodecContext *c, void *arg)
Definition: mpeg12dec.c:1677
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:502
IDCTDSPContext::idct_put
void(* idct_put)(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
block -> idct -> clip to unsigned 8 bit -> dest.
Definition: idctdsp.h:62
MB_TYPE_CBP
#define MB_TYPE_CBP
Definition: mpegutils.h:59
val
static double val(void *priv, double ch)
Definition: aeval.c:78
Mpeg1Context::tmpgexs
int tmpgexs
Definition: mpeg12dec.c:90
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:72
AV_CODEC_FLAG_LOW_DELAY
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:334
mpeg12_pixfmt_list_444
static enum AVPixelFormat mpeg12_pixfmt_list_444[]
Definition: mpeg12dec.c:875
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:633
ff_print_debug_info
void ff_print_debug_info(const MpegEncContext *s, const Picture *p, AVFrame *pict)
Definition: mpegvideo_dec.c:460
mpeg1_decode_sequence
static int mpeg1_decode_sequence(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:1778
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
HAS_CBP
#define HAS_CBP(a)
Definition: mpegutils.h:89
AVRational::num
int num
Numerator.
Definition: rational.h:59
GOP_START_CODE
#define GOP_START_CODE
Definition: mpeg12.h:30
IPUContext
Definition: mpeg12dec.c:2721
mpeg1_hwaccel_pixfmt_list_420
static enum AVPixelFormat mpeg1_hwaccel_pixfmt_list_420[]
Definition: mpeg12dec.c:832
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:783
mpeg12.h
mpegvideodec.h
ff_mpeg2video_decoder
const FFCodec ff_mpeg2video_decoder
Definition: mpeg12dec.c:2659
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
Mpeg1Context::frame_rate_index
unsigned frame_rate_index
Definition: mpeg12dec.c:87
ipu_decode_frame
static int ipu_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: mpeg12dec.c:2728
ER_DC_ERROR
#define ER_DC_ERROR
Definition: error_resilience.h:31
av_cold
#define av_cold
Definition: attributes.h:90
mpeg2_hwaccel_pixfmt_list_420
static enum AVPixelFormat mpeg2_hwaccel_pixfmt_list_420[]
Definition: mpeg12dec.c:843
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:625
mpeg1_decode_picture
static int mpeg1_decode_picture(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:1030
flush
static void flush(AVCodecContext *avctx)
Definition: mpeg12dec.c:2583
Mpeg1Context::save_progressive_seq
int save_progressive_seq
Definition: mpeg12dec.c:85
emms_c
#define emms_c()
Definition: emms.h:63
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:188
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:524
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:723
A53_MAX_CC_COUNT
#define A53_MAX_CC_COUNT
Definition: mpeg12dec.c:63
ff_er_frame_end
void ff_er_frame_end(ERContext *s, int *decode_error_flags)
Indicate that a frame has finished decoding and perform error concealment in case it has been enabled...
Definition: error_resilience.c:891
Mpeg1Context::repeat_field
int repeat_field
Definition: mpeg12dec.c:74
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:286
stereo3d.h
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:134
s
#define s(width, name)
Definition: cbs_vp9.c:198
ff_mv_vlc
VLCElem ff_mv_vlc[266]
Definition: mpeg12.c:118
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
ff_mpeg1_aspect
const float ff_mpeg1_aspect[16]
Definition: mpeg12data.c:359
s1
#define s1
Definition: regdef.h:38
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:1730
SHOW_SBITS
#define SHOW_SBITS(name, gb, num)
Definition: get_bits.h:260
ff_mpeg_er_frame_start
void ff_mpeg_er_frame_start(MpegEncContext *s)
Definition: mpeg_er.c:48
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
Mpeg1Context::aspect_ratio_info
unsigned aspect_ratio_info
Definition: mpeg12dec.c:83
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
mpeg_decode_sequence_display_extension
static void mpeg_decode_sequence_display_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1120
Mpeg1Context::pan_scan
AVPanScan pan_scan
Definition: mpeg12dec.c:75
get_sbits
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:320
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
ctx
AVFormatContext * ctx
Definition: movenc.c:49
PICT_TOP_FIELD
#define PICT_TOP_FIELD
Definition: mpegutils.h:31
decode.h
mpeg12_pixfmt_list_422
static enum AVPixelFormat mpeg12_pixfmt_list_422[]
Definition: mpeg12dec.c:870
SKIP_BITS
#define SKIP_BITS(name, gb, num)
Definition: get_bits.h:241
IS_INTRA
#define IS_INTRA(x, y)
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1292
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:271
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:455
arg
const char * arg
Definition: jacosubdec.c:67
rl_vlc
static const VLCElem * rl_vlc[2]
Definition: mobiclip.c:278
if
if(ret)
Definition: filter_design.txt:179
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:219
MB_PTYPE_VLC_BITS
#define MB_PTYPE_VLC_BITS
Definition: mpeg12vlc.h:39
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
Mpeg1Context::save_width
int save_width
Definition: mpeg12dec.c:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:128
NULL
#define NULL
Definition: coverity.c:32
run
uint8_t run
Definition: svq3.c:204
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:695
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
ER_AC_ERROR
#define ER_AC_ERROR
Definition: error_resilience.h:30
SLICE_MIN_START_CODE
#define SLICE_MIN_START_CODE
Definition: mpeg12.h:32
hwaccel_internal.h
Mpeg1Context::sync
int sync
Definition: mpeg12dec.c:88
AVCHROMA_LOC_LEFT
@ AVCHROMA_LOC_LEFT
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
Definition: pixfmt.h:707
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AVCHROMA_LOC_TOPLEFT
@ AVCHROMA_LOC_TOPLEFT
ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2.
Definition: pixfmt.h:709
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:495
mpeg_decode_picture_display_extension
static void mpeg_decode_picture_display_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1144
M2V_PARAM
#define M2V_PARAM
Definition: mpeg12dec.c:2633
MpegEncContext::inter_matrix
uint16_t inter_matrix[64]
Definition: mpegvideo.h:303
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:388
profiles.h
CC_FORMAT_A53_PART4
@ CC_FORMAT_A53_PART4
Definition: mpeg12dec.c:67
FF_PTR_ADD
#define FF_PTR_ADD(ptr, off)
Definition: internal.h:80
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:247
MB_TYPE_QUANT
#define MB_TYPE_QUANT
Definition: mpegutils.h:58
avpriv_find_start_code
const uint8_t * avpriv_find_start_code(const uint8_t *p, const uint8_t *end, uint32_t *state)
lowres
static int lowres
Definition: ffplay.c:330
ff_mpeg1_rl_vlc
RL_VLC_ELEM ff_mpeg1_rl_vlc[680]
Definition: mpeg12.c:128
MB_BTYPE_VLC_BITS
#define MB_BTYPE_VLC_BITS
Definition: mpeg12vlc.h:40
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:280
CC_FORMAT_AUTO
@ CC_FORMAT_AUTO
Definition: mpeg12dec.c:66
AV_PIX_FMT_D3D12
@ AV_PIX_FMT_D3D12
Hardware surfaces for Direct3D 12.
Definition: pixfmt.h:440
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
mpeg12codecs.h
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:652
AV_FRAME_DATA_AFD
@ AV_FRAME_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:90
AVCodecContext::level
int level
Encoding level descriptor.
Definition: avcodec.h:1783
Mpeg1Context::save_height
int save_height
Definition: mpeg12dec.c:85
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
MpegEncContext::idsp
IDCTDSPContext idsp
Definition: mpegvideo.h:223
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
startcode.h
s2
#define s2
Definition: regdef.h:39
CC_FORMAT_DVD
@ CC_FORMAT_DVD
Definition: mpeg12dec.c:69
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:218
check_marker
static int check_marker(void *logctx, GetBitContext *s, const char *msg)
Definition: mpegvideodec.h:73
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:509
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1556
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:366
mpeg2video_options
static const AVOption mpeg2video_options[]
Definition: mpeg12dec.c:2635
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:322
AVPacket::size
int size
Definition: packet.h:525
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:200
AV_CODEC_ID_IPU
@ AV_CODEC_ID_IPU
Definition: codec_id.h:306
AV_FRAME_DATA_PANSCAN
@ AV_FRAME_DATA_PANSCAN
The data is the AVPanScan struct defined in libavcodec.
Definition: frame.h:53
CC_FORMAT_SCTE20
@ CC_FORMAT_SCTE20
Definition: mpeg12dec.c:68
RL_VLC_ELEM
Definition: vlc.h:56
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:384
MT_FRAME
#define MT_FRAME
Definition: mpeg12dec.c:423
codec_internal.h
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:109
shift
static int shift(int a, int b)
Definition: bonk.c:261
IPUContext::flags
int flags
Definition: mpeg12dec.c:2724
MpegEncContext::intra_matrix
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:301
ff_mpeg1_clean_buffers
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:106
ff_mpeg1video_decoder
const FFCodec ff_mpeg1video_decoder
Definition: mpeg12dec.c:2603
ff_frame_new_side_data
int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, size_t size, AVFrameSideData **psd)
Wrapper around av_frame_new_side_data, which rejects side data overridden by the demuxer.
Definition: decode.c:1933
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: codec_internal.h:54
AVFrameSideData::data
uint8_t * data
Definition: frame.h:252
MB_TYPE_SKIP
#define MB_TYPE_SKIP
Definition: mpegutils.h:50
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1594
ff_mpeg_draw_horiz_band
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo_dec.c:497
PICTURE_START_CODE
#define PICTURE_START_CODE
Definition: mpeg12.h:31
USER_START_CODE
#define USER_START_CODE
Definition: cavs.h:40
AVCodecContext::skip_bottom
int skip_bottom
Number of macroblock rows at the bottom which are skipped.
Definition: avcodec.h:1847
AVCodecHWConfigInternal
Definition: hwconfig.h:25
ff_mpeg1_default_intra_matrix
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:31
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:165
ff_mpv_frame_start
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo_dec.c:323
MB_TYPE_INTERLACED
#define MB_TYPE_INTERLACED
Definition: mpegutils.h:46
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:177
ff_mpeg_flush
void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo_dec.c:505
height
#define height
Mpeg1Context::has_stereo3d
int has_stereo3d
Definition: mpeg12dec.c:77
mpeg_decode_init
static av_cold int mpeg_decode_init(AVCodecContext *avctx)
Definition: mpeg12dec.c:792
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:114
HWACCEL_D3D11VA
#define HWACCEL_D3D11VA(codec)
Definition: hwconfig.h:78
mpegvideodata.h
attributes.h
ff_mpeg1_decode_block_intra
int ff_mpeg1_decode_block_intra(GetBitContext *gb, const uint16_t *quant_matrix, const uint8_t *scantable, int last_dc[3], int16_t *block, int index, int qscale)
Definition: mpeg12.c:172
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:270
skip_bits1
static void skip_bits1(GetBitContext *s)
Definition: get_bits.h:413
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:336
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:68
mpeg2video_class
static const AVClass mpeg2video_class
Definition: mpeg12dec.c:2651
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:126
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1593
ff_mpeg2_video_profiles
const AVProfile ff_mpeg2_video_profiles[]
Definition: profiles.c:114
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:194
emms.h
MB_TYPE_L0L1
#define MB_TYPE_L0L1
Definition: mpegutils.h:57
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:305
ff_init_scantable
av_cold void ff_init_scantable(const uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: mpegvideo.c:322
MpegEncContext::block_last_index
int block_last_index[12]
last non zero coefficient in block
Definition: mpegvideo.h:78
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
MpegEncContext::chroma_inter_matrix
uint16_t chroma_inter_matrix[64]
Definition: mpegvideo.h:304
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
AV_CODEC_FLAG2_SHOW_ALL
#define AV_CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
Definition: avcodec.h:380
AVCodecContext::properties
unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1795
ff_alternate_vertical_scan
const uint8_t ff_alternate_vertical_scan[64]
Definition: mpegvideodata.c:63
btype2mb_type
static const uint32_t btype2mb_type[11]
Definition: mpeg12dec.c:108
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:523
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:371
internal.h
ff_mpv_decode_init
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
Definition: mpegvideo_dec.c:45
mpeg_set_cc_format
static void mpeg_set_cc_format(AVCodecContext *avctx, enum Mpeg2ClosedCaptionsFormat format, const char *label)
Definition: mpeg12dec.c:1917
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:76
IS_QUANT
#define IS_QUANT(a)
Definition: mpegutils.h:83
ff_mpeg12_init_vlcs
av_cold void ff_mpeg12_init_vlcs(void)
Definition: mpeg12.c:164
FF_DEBUG_STARTCODE
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:1404
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_d2q
AVRational av_d2q(double d, int max)
Convert a double precision floating point number to a rational.
Definition: rational.c:106
MB_PAT_VLC_BITS
#define MB_PAT_VLC_BITS
Definition: mpeg12vlc.h:38
mpeg1_decode_block_inter
static int mpeg1_decode_block_inter(MpegEncContext *s, int16_t *block, int n)
Definition: mpeg12dec.c:159
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:606
ptype2mb_type
static const uint32_t ptype2mb_type[7]
Definition: mpeg12dec.c:98
IPUContext::m
MpegEncContext m
Definition: mpeg12dec.c:2722
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
MpegEncContext::intra_vlc_format
int intra_vlc_format
Definition: mpegvideo.h:450
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:702
MAX_INDEX
#define MAX_INDEX
Definition: mpeg12dec.c:149
AVCodecContext::height
int height
Definition: avcodec.h:618
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:669
HWACCEL_VIDEOTOOLBOX
#define HWACCEL_VIDEOTOOLBOX(codec)
Definition: hwconfig.h:74
Mpeg1Context::stereo3d
AVStereo3D stereo3d
Definition: mpeg12dec.c:76
idctdsp.h
avcodec.h
av_cmp_q
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
GET_RL_VLC
#define GET_RL_VLC(level, run, name, gb, table, bits, max_depth, need_update)
Definition: get_bits.h:606
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ff_mpeg12_frame_rate_tab
const AVRational ff_mpeg12_frame_rate_tab[]
Definition: mpeg12framerate.c:24
mpeg_decode_gop
static int mpeg_decode_gop(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:2146
ret
ret
Definition: filter_design.txt:187
AV_EF_AGGRESSIVE
#define AV_EF_AGGRESSIVE
consider things that a sane encoder/muxer should not do as an error
Definition: defs.h:56
pred
static const float pred[4]
Definition: siprdata.h:259
AV_FRAME_DATA_GOP_TIMECODE
@ AV_FRAME_DATA_GOP_TIMECODE
The GOP timecode in 25 bit timecode format.
Definition: frame.h:125
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ff_mpeg1_default_non_intra_matrix
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:42
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:561
TEX_VLC_BITS
#define TEX_VLC_BITS
Definition: dvdec.c:147
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
mpeg_get_pixelformat
static enum AVPixelFormat mpeg_get_pixelformat(AVCodecContext *avctx)
Definition: mpeg12dec.c:880
AV_CODEC_FLAG2_CHUNKS
#define AV_CODEC_FLAG2_CHUNKS
Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries.
Definition: avcodec.h:371
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
mpeg12data.h
mpeg_field_start
static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:1276
ff_mpeg_update_thread_context
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo_dec.c:62
skip_1stop_8data_bits
static int skip_1stop_8data_bits(GetBitContext *gb)
Definition: get_bits.h:700
AVCodecContext
main external API structure.
Definition: avcodec.h:445
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1601
av_timecode_make_mpeg_tc_string
char * av_timecode_make_mpeg_tc_string(char *buf, uint32_t tc25bit)
Get the timecode string from the 25-bit timecode format (MPEG GOP format).
Definition: timecode.c:167
MpegEncContext::intra_dc_precision
int intra_dc_precision
Definition: mpegvideo.h:444
AVCodecContext::execute
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:1612
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:259
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:281
mpeg12dec.h
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:708
AVRational::den
int den
Denominator.
Definition: rational.h:60
error_resilience.h
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
FF_HW_CALL
#define FF_HW_CALL(avctx, function,...)
Definition: hwaccel_internal.h:171
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:235
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1639
AVFrame::metadata
AVDictionary * metadata
metadata.
Definition: frame.h:692
Mpeg1Context::cc_format
enum Mpeg2ClosedCaptionsFormat cc_format
Definition: mpeg12dec.c:79
sign_extend
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:133
ff_mpv_frame_end
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo_dec.c:452
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
Mpeg1Context::slice_count
int slice_count
Definition: mpeg12dec.c:82
AVCodecContext::ticks_per_frame
attribute_deprecated int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:576
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
FF_CODEC_PROPERTY_CLOSED_CAPTIONS
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
Definition: avcodec.h:1797
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
av_buffer_realloc
int av_buffer_realloc(AVBufferRef **pbuf, size_t size)
Reallocate a given buffer.
Definition: buffer.c:183
ff_mb_ptype_vlc
VLCElem ff_mb_ptype_vlc[64]
Definition: mpeg12.c:124
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1396
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:633
get_dmv
static int get_dmv(MpegEncContext *s)
Definition: mpeg12dec.c:413
tc
#define tc
Definition: regdef.h:69
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
mpeg_decode_end
static av_cold int mpeg_decode_end(AVCodecContext *avctx)
Definition: mpeg12dec.c:2594
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
MpegEncContext::inter_scantable
ScanTable inter_scantable
if inter == intra then intra should be used to reduce the cache usage
Definition: mpegvideo.h:82
IDCTDSPContext::idct_permutation
uint8_t idct_permutation[64]
IDCT input permutation.
Definition: idctdsp.h:86
ff_ipu_decoder
const FFCodec ff_ipu_decoder
Definition: mpeg12dec.c:2859
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:34
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:250
ER_MV_END
#define ER_MV_END
Definition: error_resilience.h:35
MpegEncContext::q_scale_type
int q_scale_type
Definition: mpegvideo.h:448
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:470
Mpeg1Context::mpeg_enc_ctx
MpegEncContext mpeg_enc_ctx
Definition: mpeg12dec.c:73
ff_tlog
#define ff_tlog(ctx,...)
Definition: internal.h:141
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:263
AVPacket
This structure stores compressed data.
Definition: packet.h:501
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
ScanTable::permutated
uint8_t permutated[64]
Definition: mpegvideo.h:58
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:88
mpeg_get_qscale
static int mpeg_get_qscale(MpegEncContext *s)
Definition: mpegvideodec.h:64
mpeg_decode_sequence_extension
static void mpeg_decode_sequence_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1077
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:70
mpeg_er.h
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:618
int32_t
int32_t
Definition: audioconvert.c:56
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:474
AV_CODEC_CAP_DRAW_HORIZ_BAND
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: codec.h:44
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
Mpeg1Context::frame_rate_ext
AVRational frame_rate_ext
Definition: mpeg12dec.c:86
mpeg_decode_motion
static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred)
Definition: mpeg12dec.c:123
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
IPUContext::block
int16_t block[6][64]
Definition: mpeg12dec.c:2725
ff_mb_btype_vlc
VLCElem ff_mb_btype_vlc[64]
Definition: mpeg12.c:125
mpeg_decode_user_data
static void mpeg_decode_user_data(AVCodecContext *avctx, const uint8_t *p, int buf_size)
Definition: mpeg12dec.c:2076
h
h
Definition: vp9dsp_template.c:2038
MpegEncContext::end_mb_y
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:149
Mpeg2ClosedCaptionsFormat
Mpeg2ClosedCaptionsFormat
Definition: mpeg12dec.c:65
ER_AC_END
#define ER_AC_END
Definition: error_resilience.h:33
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:173
av_image_check_sar
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
Definition: imgutils.c:323
MV_VLC_BITS
#define MV_VLC_BITS
Definition: mpeg12vlc.h:34
Mpeg1Context::timecode_frame_start
int64_t timecode_frame_start
Definition: mpeg12dec.c:93
MpegEncContext::start_mb_y
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:148
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:215
MpegEncContext::alternate_scan
int alternate_scan
Definition: mpegvideo.h:451
DECODE_SLICE_OK
#define DECODE_SLICE_OK
Definition: mpeg12dec.c:1393
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
DECODE_SLICE_ERROR
#define DECODE_SLICE_ERROR
Definition: mpeg12dec.c:1392
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:244
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:73
load_matrix
static int load_matrix(MpegEncContext *s, uint16_t matrix0[64], uint16_t matrix1[64], int intra)
Definition: mpeg12dec.c:1178
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:642
decode_dc
static int decode_dc(GetBitContext *gb, int component)
Definition: mpeg12dec.h:28
Mpeg1Context::afd
uint8_t afd
Definition: mpeg12dec.c:80
Mpeg1Context
Definition: mpeg12dec.c:72
MpegEncContext::chroma_intra_matrix
uint16_t chroma_intra_matrix[64]
Definition: mpegvideo.h:302
mpeg_decode_picture_coding_extension
static int mpeg_decode_picture_coding_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1215
Mpeg1Context::extradata_decoded
int extradata_decoded
Definition: mpeg12dec.c:92
mpeg2_decode_block_non_intra
static int mpeg2_decode_block_non_intra(MpegEncContext *s, int16_t *block, int n)
Definition: mpeg12dec.c:243
MB_TYPE_INTRA
#define MB_TYPE_INTRA
Definition: mpegutils.h:61
MBINCR_VLC_BITS
#define MBINCR_VLC_BITS
Definition: mpeg12vlc.h:37
mpeg_decode_slice
static int mpeg_decode_slice(MpegEncContext *s, int mb_y, const uint8_t **buf, int buf_size)
Decode a slice.
Definition: mpeg12dec.c:1401