FFmpeg
mpeg12dec.c
Go to the documentation of this file.
1 /*
2  * MPEG-1/2 decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2002-2013 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * MPEG-1/2 decoder
26  */
27 
28 #include "config_components.h"
29 
30 #define UNCHECKED_BITSTREAM_READER 1
31 #include <inttypes.h>
32 
33 #include "libavutil/attributes.h"
34 #include "libavutil/imgutils.h"
35 #include "libavutil/internal.h"
36 #include "libavutil/mem_internal.h"
37 #include "libavutil/reverse.h"
38 #include "libavutil/stereo3d.h"
39 #include "libavutil/timecode.h"
40 
41 #include "avcodec.h"
42 #include "codec_internal.h"
43 #include "decode.h"
44 #include "error_resilience.h"
45 #include "hwconfig.h"
46 #include "idctdsp.h"
47 #include "internal.h"
48 #include "mpeg_er.h"
49 #include "mpeg12.h"
50 #include "mpeg12codecs.h"
51 #include "mpeg12data.h"
52 #include "mpeg12dec.h"
53 #include "mpegutils.h"
54 #include "mpegvideo.h"
55 #include "mpegvideodata.h"
56 #include "mpegvideodec.h"
57 #include "profiles.h"
58 #include "startcode.h"
59 #include "thread.h"
60 
61 #define A53_MAX_CC_COUNT 2000
62 
63 typedef struct Mpeg1Context {
65  int mpeg_enc_ctx_allocated; /* true if decoding context allocated */
66  int repeat_field; /* true if we must repeat the field */
67  AVPanScan pan_scan; /* some temporary storage for the panscan */
71  uint8_t afd;
72  int has_afd;
78  AVRational frame_rate_ext; /* MPEG-2 specific framerate modificator */
79  unsigned frame_rate_index;
80  int sync; /* Did we reach a sync point like a GOP/SEQ/KEYFrame? */
82  int tmpgexs;
85  int64_t timecode_frame_start; /*< GOP timecode frame start number, in non drop frame format */
86 } Mpeg1Context;
87 
88 #define MB_TYPE_ZERO_MV 0x20000000
89 
90 static const uint32_t ptype2mb_type[7] = {
93  MB_TYPE_L0,
98 };
99 
100 static const uint32_t btype2mb_type[11] = {
102  MB_TYPE_L1,
104  MB_TYPE_L0,
106  MB_TYPE_L0L1,
112 };
113 
114 /* as H.263, but only 17 codes */
115 static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred)
116 {
117  int code, sign, val, shift;
118 
119  code = get_vlc2(&s->gb, ff_mv_vlc.table, MV_VLC_BITS, 2);
120  if (code == 0)
121  return pred;
122  if (code < 0)
123  return 0xffff;
124 
125  sign = get_bits1(&s->gb);
126  shift = fcode - 1;
127  val = code;
128  if (shift) {
129  val = (val - 1) << shift;
130  val |= get_bits(&s->gb, shift);
131  val++;
132  }
133  if (sign)
134  val = -val;
135  val += pred;
136 
137  /* modulo decoding */
138  return sign_extend(val, 5 + shift);
139 }
140 
141 #define MAX_INDEX (64 - 1)
142 #define check_scantable_index(ctx, x) \
143  do { \
144  if ((x) > MAX_INDEX) { \
145  av_log(ctx->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", \
146  ctx->mb_x, ctx->mb_y); \
147  return AVERROR_INVALIDDATA; \
148  } \
149  } while (0)
150 
152  int16_t *block, int n)
153 {
154  int level, i, j, run;
155  uint8_t *const scantable = s->intra_scantable.permutated;
156  const uint16_t *quant_matrix = s->inter_matrix;
157  const int qscale = s->qscale;
158 
159  {
160  OPEN_READER(re, &s->gb);
161  i = -1;
162  // special case for first coefficient, no need to add second VLC table
163  UPDATE_CACHE(re, &s->gb);
164  if (((int32_t) GET_CACHE(re, &s->gb)) < 0) {
165  level = (3 * qscale * quant_matrix[0]) >> 5;
166  level = (level - 1) | 1;
167  if (GET_CACHE(re, &s->gb) & 0x40000000)
168  level = -level;
169  block[0] = level;
170  i++;
171  SKIP_BITS(re, &s->gb, 2);
172  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
173  goto end;
174  }
175  /* now quantify & encode AC coefficients */
176  for (;;) {
178  TEX_VLC_BITS, 2, 0);
179 
180  if (level != 0) {
181  i += run;
182  if (i > MAX_INDEX)
183  break;
184  j = scantable[i];
185  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
186  level = (level - 1) | 1;
187  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
188  SHOW_SBITS(re, &s->gb, 1);
189  SKIP_BITS(re, &s->gb, 1);
190  } else {
191  /* escape */
192  run = SHOW_UBITS(re, &s->gb, 6) + 1;
193  LAST_SKIP_BITS(re, &s->gb, 6);
194  UPDATE_CACHE(re, &s->gb);
195  level = SHOW_SBITS(re, &s->gb, 8);
196  SKIP_BITS(re, &s->gb, 8);
197  if (level == -128) {
198  level = SHOW_UBITS(re, &s->gb, 8) - 256;
199  SKIP_BITS(re, &s->gb, 8);
200  } else if (level == 0) {
201  level = SHOW_UBITS(re, &s->gb, 8);
202  SKIP_BITS(re, &s->gb, 8);
203  }
204  i += run;
205  if (i > MAX_INDEX)
206  break;
207  j = scantable[i];
208  if (level < 0) {
209  level = -level;
210  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
211  level = (level - 1) | 1;
212  level = -level;
213  } else {
214  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
215  level = (level - 1) | 1;
216  }
217  }
218 
219  block[j] = level;
220  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
221  break;
222  UPDATE_CACHE(re, &s->gb);
223  }
224 end:
225  LAST_SKIP_BITS(re, &s->gb, 2);
226  CLOSE_READER(re, &s->gb);
227  }
228 
230 
231  s->block_last_index[n] = i;
232  return 0;
233 }
234 
235 /**
236  * Changing this would eat up any speed benefits it has.
237  * Do not use "fast" flag if you need the code to be robust.
238  */
240  int16_t *block, int n)
241 {
242  int level, i, j, run;
243  uint8_t *const scantable = s->intra_scantable.permutated;
244  const int qscale = s->qscale;
245 
246  {
247  OPEN_READER(re, &s->gb);
248  i = -1;
249  // Special case for first coefficient, no need to add second VLC table.
250  UPDATE_CACHE(re, &s->gb);
251  if (((int32_t) GET_CACHE(re, &s->gb)) < 0) {
252  level = (3 * qscale) >> 1;
253  level = (level - 1) | 1;
254  if (GET_CACHE(re, &s->gb) & 0x40000000)
255  level = -level;
256  block[0] = level;
257  i++;
258  SKIP_BITS(re, &s->gb, 2);
259  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
260  goto end;
261  }
262 
263  /* now quantify & encode AC coefficients */
264  for (;;) {
266  TEX_VLC_BITS, 2, 0);
267 
268  if (level != 0) {
269  i += run;
270  if (i > MAX_INDEX)
271  break;
272  j = scantable[i];
273  level = ((level * 2 + 1) * qscale) >> 1;
274  level = (level - 1) | 1;
275  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
276  SHOW_SBITS(re, &s->gb, 1);
277  SKIP_BITS(re, &s->gb, 1);
278  } else {
279  /* escape */
280  run = SHOW_UBITS(re, &s->gb, 6) + 1;
281  LAST_SKIP_BITS(re, &s->gb, 6);
282  UPDATE_CACHE(re, &s->gb);
283  level = SHOW_SBITS(re, &s->gb, 8);
284  SKIP_BITS(re, &s->gb, 8);
285  if (level == -128) {
286  level = SHOW_UBITS(re, &s->gb, 8) - 256;
287  SKIP_BITS(re, &s->gb, 8);
288  } else if (level == 0) {
289  level = SHOW_UBITS(re, &s->gb, 8);
290  SKIP_BITS(re, &s->gb, 8);
291  }
292  i += run;
293  if (i > MAX_INDEX)
294  break;
295  j = scantable[i];
296  if (level < 0) {
297  level = -level;
298  level = ((level * 2 + 1) * qscale) >> 1;
299  level = (level - 1) | 1;
300  level = -level;
301  } else {
302  level = ((level * 2 + 1) * qscale) >> 1;
303  level = (level - 1) | 1;
304  }
305  }
306 
307  block[j] = level;
308  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
309  break;
310  UPDATE_CACHE(re, &s->gb);
311  }
312 end:
313  LAST_SKIP_BITS(re, &s->gb, 2);
314  CLOSE_READER(re, &s->gb);
315  }
316 
318 
319  s->block_last_index[n] = i;
320  return 0;
321 }
322 
324  int16_t *block, int n)
325 {
326  int level, i, j, run;
327  uint8_t *const scantable = s->intra_scantable.permutated;
328  const uint16_t *quant_matrix;
329  const int qscale = s->qscale;
330  int mismatch;
331 
332  mismatch = 1;
333 
334  {
335  OPEN_READER(re, &s->gb);
336  i = -1;
337  if (n < 4)
338  quant_matrix = s->inter_matrix;
339  else
340  quant_matrix = s->chroma_inter_matrix;
341 
342  // Special case for first coefficient, no need to add second VLC table.
343  UPDATE_CACHE(re, &s->gb);
344  if (((int32_t) GET_CACHE(re, &s->gb)) < 0) {
345  level = (3 * qscale * quant_matrix[0]) >> 5;
346  if (GET_CACHE(re, &s->gb) & 0x40000000)
347  level = -level;
348  block[0] = level;
349  mismatch ^= level;
350  i++;
351  SKIP_BITS(re, &s->gb, 2);
352  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
353  goto end;
354  }
355 
356  /* now quantify & encode AC coefficients */
357  for (;;) {
359  TEX_VLC_BITS, 2, 0);
360 
361  if (level != 0) {
362  i += run;
363  if (i > MAX_INDEX)
364  break;
365  j = scantable[i];
366  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
367  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
368  SHOW_SBITS(re, &s->gb, 1);
369  SKIP_BITS(re, &s->gb, 1);
370  } else {
371  /* escape */
372  run = SHOW_UBITS(re, &s->gb, 6) + 1;
373  LAST_SKIP_BITS(re, &s->gb, 6);
374  UPDATE_CACHE(re, &s->gb);
375  level = SHOW_SBITS(re, &s->gb, 12);
376  SKIP_BITS(re, &s->gb, 12);
377 
378  i += run;
379  if (i > MAX_INDEX)
380  break;
381  j = scantable[i];
382  if (level < 0) {
383  level = ((-level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
384  level = -level;
385  } else {
386  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
387  }
388  }
389 
390  mismatch ^= level;
391  block[j] = level;
392  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
393  break;
394  UPDATE_CACHE(re, &s->gb);
395  }
396 end:
397  LAST_SKIP_BITS(re, &s->gb, 2);
398  CLOSE_READER(re, &s->gb);
399  }
400  block[63] ^= (mismatch & 1);
401 
403 
404  s->block_last_index[n] = i;
405  return 0;
406 }
407 
408 /**
409  * Changing this would eat up any speed benefits it has.
410  * Do not use "fast" flag if you need the code to be robust.
411  */
413  int16_t *block, int n)
414 {
415  int level, i, j, run;
416  uint8_t *const scantable = s->intra_scantable.permutated;
417  const int qscale = s->qscale;
418  OPEN_READER(re, &s->gb);
419  i = -1;
420 
421  // special case for first coefficient, no need to add second VLC table
422  UPDATE_CACHE(re, &s->gb);
423  if (((int32_t) GET_CACHE(re, &s->gb)) < 0) {
424  level = (3 * qscale) >> 1;
425  if (GET_CACHE(re, &s->gb) & 0x40000000)
426  level = -level;
427  block[0] = level;
428  i++;
429  SKIP_BITS(re, &s->gb, 2);
430  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
431  goto end;
432  }
433 
434  /* now quantify & encode AC coefficients */
435  for (;;) {
437 
438  if (level != 0) {
439  i += run;
440  if (i > MAX_INDEX)
441  break;
442  j = scantable[i];
443  level = ((level * 2 + 1) * qscale) >> 1;
444  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
445  SHOW_SBITS(re, &s->gb, 1);
446  SKIP_BITS(re, &s->gb, 1);
447  } else {
448  /* escape */
449  run = SHOW_UBITS(re, &s->gb, 6) + 1;
450  LAST_SKIP_BITS(re, &s->gb, 6);
451  UPDATE_CACHE(re, &s->gb);
452  level = SHOW_SBITS(re, &s->gb, 12);
453  SKIP_BITS(re, &s->gb, 12);
454 
455  i += run;
456  if (i > MAX_INDEX)
457  break;
458  j = scantable[i];
459  if (level < 0) {
460  level = ((-level * 2 + 1) * qscale) >> 1;
461  level = -level;
462  } else {
463  level = ((level * 2 + 1) * qscale) >> 1;
464  }
465  }
466 
467  block[j] = level;
468  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF || i > 63)
469  break;
470 
471  UPDATE_CACHE(re, &s->gb);
472  }
473 end:
474  LAST_SKIP_BITS(re, &s->gb, 2);
475  CLOSE_READER(re, &s->gb);
476 
478 
479  s->block_last_index[n] = i;
480  return 0;
481 }
482 
484  int16_t *block, int n)
485 {
486  int level, dc, diff, i, j, run;
487  int component;
488  const RL_VLC_ELEM *rl_vlc;
489  uint8_t *const scantable = s->intra_scantable.permutated;
490  const uint16_t *quant_matrix;
491  const int qscale = s->qscale;
492  int mismatch;
493 
494  /* DC coefficient */
495  if (n < 4) {
496  quant_matrix = s->intra_matrix;
497  component = 0;
498  } else {
499  quant_matrix = s->chroma_intra_matrix;
500  component = (n & 1) + 1;
501  }
502  diff = decode_dc(&s->gb, component);
503  dc = s->last_dc[component];
504  dc += diff;
505  s->last_dc[component] = dc;
506  block[0] = dc * (1 << (3 - s->intra_dc_precision));
507  ff_tlog(s->avctx, "dc=%d\n", block[0]);
508  mismatch = block[0] ^ 1;
509  i = 0;
510  if (s->intra_vlc_format)
512  else
514 
515  {
516  OPEN_READER(re, &s->gb);
517  /* now quantify & encode AC coefficients */
518  for (;;) {
519  UPDATE_CACHE(re, &s->gb);
520  GET_RL_VLC(level, run, re, &s->gb, rl_vlc,
521  TEX_VLC_BITS, 2, 0);
522 
523  if (level == 127) {
524  break;
525  } else if (level != 0) {
526  i += run;
527  if (i > MAX_INDEX)
528  break;
529  j = scantable[i];
530  level = (level * qscale * quant_matrix[j]) >> 4;
531  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
532  SHOW_SBITS(re, &s->gb, 1);
533  LAST_SKIP_BITS(re, &s->gb, 1);
534  } else {
535  /* escape */
536  run = SHOW_UBITS(re, &s->gb, 6) + 1;
537  SKIP_BITS(re, &s->gb, 6);
538  level = SHOW_SBITS(re, &s->gb, 12);
539  LAST_SKIP_BITS(re, &s->gb, 12);
540  i += run;
541  if (i > MAX_INDEX)
542  break;
543  j = scantable[i];
544  if (level < 0) {
545  level = (-level * qscale * quant_matrix[j]) >> 4;
546  level = -level;
547  } else {
548  level = (level * qscale * quant_matrix[j]) >> 4;
549  }
550  }
551 
552  mismatch ^= level;
553  block[j] = level;
554  }
555  CLOSE_READER(re, &s->gb);
556  }
557  block[63] ^= mismatch & 1;
558 
560 
561  s->block_last_index[n] = i;
562  return 0;
563 }
564 
565 /**
566  * Changing this would eat up any speed benefits it has.
567  * Do not use "fast" flag if you need the code to be robust.
568  */
570  int16_t *block, int n)
571 {
572  int level, dc, diff, i, j, run;
573  int component;
574  const RL_VLC_ELEM *rl_vlc;
575  uint8_t *const scantable = s->intra_scantable.permutated;
576  const uint16_t *quant_matrix;
577  const int qscale = s->qscale;
578 
579  /* DC coefficient */
580  if (n < 4) {
581  quant_matrix = s->intra_matrix;
582  component = 0;
583  } else {
584  quant_matrix = s->chroma_intra_matrix;
585  component = (n & 1) + 1;
586  }
587  diff = decode_dc(&s->gb, component);
588  dc = s->last_dc[component];
589  dc += diff;
590  s->last_dc[component] = dc;
591  block[0] = dc * (1 << (3 - s->intra_dc_precision));
592  i = 0;
593  if (s->intra_vlc_format)
595  else
597 
598  {
599  OPEN_READER(re, &s->gb);
600  /* now quantify & encode AC coefficients */
601  for (;;) {
602  UPDATE_CACHE(re, &s->gb);
603  GET_RL_VLC(level, run, re, &s->gb, rl_vlc,
604  TEX_VLC_BITS, 2, 0);
605 
606  if (level >= 64 || i > 63) {
607  break;
608  } else if (level != 0) {
609  i += run;
610  j = scantable[i];
611  level = (level * qscale * quant_matrix[j]) >> 4;
612  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
613  SHOW_SBITS(re, &s->gb, 1);
614  LAST_SKIP_BITS(re, &s->gb, 1);
615  } else {
616  /* escape */
617  run = SHOW_UBITS(re, &s->gb, 6) + 1;
618  SKIP_BITS(re, &s->gb, 6);
619  level = SHOW_SBITS(re, &s->gb, 12);
620  LAST_SKIP_BITS(re, &s->gb, 12);
621  i += run;
622  j = scantable[i];
623  if (level < 0) {
624  level = (-level * qscale * quant_matrix[j]) >> 4;
625  level = -level;
626  } else {
627  level = (level * qscale * quant_matrix[j]) >> 4;
628  }
629  }
630 
631  block[j] = level;
632  }
633  CLOSE_READER(re, &s->gb);
634  }
635 
637 
638  s->block_last_index[n] = i;
639  return 0;
640 }
641 
642 /******************************************/
643 /* decoding */
644 
645 static inline int get_dmv(MpegEncContext *s)
646 {
647  if (get_bits1(&s->gb))
648  return 1 - (get_bits1(&s->gb) << 1);
649  else
650  return 0;
651 }
652 
653 /* motion type (for MPEG-2) */
654 #define MT_FIELD 1
655 #define MT_FRAME 2
656 #define MT_16X8 2
657 #define MT_DMV 3
658 
659 static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
660 {
661  int i, j, k, cbp, val, mb_type, motion_type;
662  const int mb_block_count = 4 + (1 << s->chroma_format);
663  int ret;
664 
665  ff_tlog(s->avctx, "decode_mb: x=%d y=%d\n", s->mb_x, s->mb_y);
666 
667  av_assert2(s->mb_skipped == 0);
668 
669  if (s->mb_skip_run-- != 0) {
670  if (s->pict_type == AV_PICTURE_TYPE_P) {
671  s->mb_skipped = 1;
672  s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride] =
674  } else {
675  int mb_type;
676 
677  if (s->mb_x)
678  mb_type = s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1];
679  else
680  // FIXME not sure if this is allowed in MPEG at all
681  mb_type = s->current_picture.mb_type[s->mb_width + (s->mb_y - 1) * s->mb_stride - 1];
682  if (IS_INTRA(mb_type)) {
683  av_log(s->avctx, AV_LOG_ERROR, "skip with previntra\n");
684  return AVERROR_INVALIDDATA;
685  }
686  s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride] =
687  mb_type | MB_TYPE_SKIP;
688 
689  if ((s->mv[0][0][0] | s->mv[0][0][1] | s->mv[1][0][0] | s->mv[1][0][1]) == 0)
690  s->mb_skipped = 1;
691  }
692 
693  return 0;
694  }
695 
696  switch (s->pict_type) {
697  default:
698  case AV_PICTURE_TYPE_I:
699  if (get_bits1(&s->gb) == 0) {
700  if (get_bits1(&s->gb) == 0) {
701  av_log(s->avctx, AV_LOG_ERROR,
702  "Invalid mb type in I-frame at %d %d\n",
703  s->mb_x, s->mb_y);
704  return AVERROR_INVALIDDATA;
705  }
706  mb_type = MB_TYPE_QUANT | MB_TYPE_INTRA;
707  } else {
708  mb_type = MB_TYPE_INTRA;
709  }
710  break;
711  case AV_PICTURE_TYPE_P:
712  mb_type = get_vlc2(&s->gb, ff_mb_ptype_vlc.table, MB_PTYPE_VLC_BITS, 1);
713  if (mb_type < 0) {
714  av_log(s->avctx, AV_LOG_ERROR,
715  "Invalid mb type in P-frame at %d %d\n", s->mb_x, s->mb_y);
716  return AVERROR_INVALIDDATA;
717  }
718  mb_type = ptype2mb_type[mb_type];
719  break;
720  case AV_PICTURE_TYPE_B:
721  mb_type = get_vlc2(&s->gb, ff_mb_btype_vlc.table, MB_BTYPE_VLC_BITS, 1);
722  if (mb_type < 0) {
723  av_log(s->avctx, AV_LOG_ERROR,
724  "Invalid mb type in B-frame at %d %d\n", s->mb_x, s->mb_y);
725  return AVERROR_INVALIDDATA;
726  }
727  mb_type = btype2mb_type[mb_type];
728  break;
729  }
730  ff_tlog(s->avctx, "mb_type=%x\n", mb_type);
731 // motion_type = 0; /* avoid warning */
732  if (IS_INTRA(mb_type)) {
733  s->bdsp.clear_blocks(s->block[0]);
734 
735  if (!s->chroma_y_shift)
736  s->bdsp.clear_blocks(s->block[6]);
737 
738  /* compute DCT type */
739  // FIXME: add an interlaced_dct coded var?
740  if (s->picture_structure == PICT_FRAME &&
741  !s->frame_pred_frame_dct)
742  s->interlaced_dct = get_bits1(&s->gb);
743 
744  if (IS_QUANT(mb_type))
745  s->qscale = mpeg_get_qscale(s);
746 
747  if (s->concealment_motion_vectors) {
748  /* just parse them */
749  if (s->picture_structure != PICT_FRAME)
750  skip_bits1(&s->gb); /* field select */
751 
752  s->mv[0][0][0] =
753  s->last_mv[0][0][0] =
754  s->last_mv[0][1][0] = mpeg_decode_motion(s, s->mpeg_f_code[0][0],
755  s->last_mv[0][0][0]);
756  s->mv[0][0][1] =
757  s->last_mv[0][0][1] =
758  s->last_mv[0][1][1] = mpeg_decode_motion(s, s->mpeg_f_code[0][1],
759  s->last_mv[0][0][1]);
760 
761  check_marker(s->avctx, &s->gb, "after concealment_motion_vectors");
762  } else {
763  /* reset mv prediction */
764  memset(s->last_mv, 0, sizeof(s->last_mv));
765  }
766  s->mb_intra = 1;
767 
768  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
769  if (s->avctx->flags2 & AV_CODEC_FLAG2_FAST) {
770  for (i = 0; i < 6; i++)
771  mpeg2_fast_decode_block_intra(s, *s->pblocks[i], i);
772  } else {
773  for (i = 0; i < mb_block_count; i++)
774  if ((ret = mpeg2_decode_block_intra(s, *s->pblocks[i], i)) < 0)
775  return ret;
776  }
777  } else {
778  for (i = 0; i < 6; i++) {
780  s->intra_matrix,
781  s->intra_scantable.permutated,
782  s->last_dc, *s->pblocks[i],
783  i, s->qscale);
784  if (ret < 0) {
785  av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n",
786  s->mb_x, s->mb_y);
787  return ret;
788  }
789 
790  s->block_last_index[i] = ret;
791  }
792  }
793  } else {
794  if (mb_type & MB_TYPE_ZERO_MV) {
795  av_assert2(mb_type & MB_TYPE_CBP);
796 
797  s->mv_dir = MV_DIR_FORWARD;
798  if (s->picture_structure == PICT_FRAME) {
799  if (s->picture_structure == PICT_FRAME
800  && !s->frame_pred_frame_dct)
801  s->interlaced_dct = get_bits1(&s->gb);
802  s->mv_type = MV_TYPE_16X16;
803  } else {
804  s->mv_type = MV_TYPE_FIELD;
805  mb_type |= MB_TYPE_INTERLACED;
806  s->field_select[0][0] = s->picture_structure - 1;
807  }
808 
809  if (IS_QUANT(mb_type))
810  s->qscale = mpeg_get_qscale(s);
811 
812  s->last_mv[0][0][0] = 0;
813  s->last_mv[0][0][1] = 0;
814  s->last_mv[0][1][0] = 0;
815  s->last_mv[0][1][1] = 0;
816  s->mv[0][0][0] = 0;
817  s->mv[0][0][1] = 0;
818  } else {
819  av_assert2(mb_type & MB_TYPE_L0L1);
820  // FIXME decide if MBs in field pictures are MB_TYPE_INTERLACED
821  /* get additional motion vector type */
822  if (s->picture_structure == PICT_FRAME && s->frame_pred_frame_dct) {
823  motion_type = MT_FRAME;
824  } else {
825  motion_type = get_bits(&s->gb, 2);
826  if (s->picture_structure == PICT_FRAME && HAS_CBP(mb_type))
827  s->interlaced_dct = get_bits1(&s->gb);
828  }
829 
830  if (IS_QUANT(mb_type))
831  s->qscale = mpeg_get_qscale(s);
832 
833  /* motion vectors */
834  s->mv_dir = (mb_type >> 13) & 3;
835  ff_tlog(s->avctx, "motion_type=%d\n", motion_type);
836  switch (motion_type) {
837  case MT_FRAME: /* or MT_16X8 */
838  if (s->picture_structure == PICT_FRAME) {
839  mb_type |= MB_TYPE_16x16;
840  s->mv_type = MV_TYPE_16X16;
841  for (i = 0; i < 2; i++) {
842  if (USES_LIST(mb_type, i)) {
843  /* MT_FRAME */
844  s->mv[i][0][0] =
845  s->last_mv[i][0][0] =
846  s->last_mv[i][1][0] =
847  mpeg_decode_motion(s, s->mpeg_f_code[i][0],
848  s->last_mv[i][0][0]);
849  s->mv[i][0][1] =
850  s->last_mv[i][0][1] =
851  s->last_mv[i][1][1] =
852  mpeg_decode_motion(s, s->mpeg_f_code[i][1],
853  s->last_mv[i][0][1]);
854  /* full_pel: only for MPEG-1 */
855  if (s->full_pel[i]) {
856  s->mv[i][0][0] *= 2;
857  s->mv[i][0][1] *= 2;
858  }
859  }
860  }
861  } else {
862  mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
863  s->mv_type = MV_TYPE_16X8;
864  for (i = 0; i < 2; i++) {
865  if (USES_LIST(mb_type, i)) {
866  /* MT_16X8 */
867  for (j = 0; j < 2; j++) {
868  s->field_select[i][j] = get_bits1(&s->gb);
869  for (k = 0; k < 2; k++) {
870  val = mpeg_decode_motion(s, s->mpeg_f_code[i][k],
871  s->last_mv[i][j][k]);
872  s->last_mv[i][j][k] = val;
873  s->mv[i][j][k] = val;
874  }
875  }
876  }
877  }
878  }
879  break;
880  case MT_FIELD:
881  s->mv_type = MV_TYPE_FIELD;
882  if (s->picture_structure == PICT_FRAME) {
883  mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
884  for (i = 0; i < 2; i++) {
885  if (USES_LIST(mb_type, i)) {
886  for (j = 0; j < 2; j++) {
887  s->field_select[i][j] = get_bits1(&s->gb);
888  val = mpeg_decode_motion(s, s->mpeg_f_code[i][0],
889  s->last_mv[i][j][0]);
890  s->last_mv[i][j][0] = val;
891  s->mv[i][j][0] = val;
892  ff_tlog(s->avctx, "fmx=%d\n", val);
893  val = mpeg_decode_motion(s, s->mpeg_f_code[i][1],
894  s->last_mv[i][j][1] >> 1);
895  s->last_mv[i][j][1] = 2 * val;
896  s->mv[i][j][1] = val;
897  ff_tlog(s->avctx, "fmy=%d\n", val);
898  }
899  }
900  }
901  } else {
902  av_assert0(!s->progressive_sequence);
903  mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED;
904  for (i = 0; i < 2; i++) {
905  if (USES_LIST(mb_type, i)) {
906  s->field_select[i][0] = get_bits1(&s->gb);
907  for (k = 0; k < 2; k++) {
908  val = mpeg_decode_motion(s, s->mpeg_f_code[i][k],
909  s->last_mv[i][0][k]);
910  s->last_mv[i][0][k] = val;
911  s->last_mv[i][1][k] = val;
912  s->mv[i][0][k] = val;
913  }
914  }
915  }
916  }
917  break;
918  case MT_DMV:
919  if (s->progressive_sequence){
920  av_log(s->avctx, AV_LOG_ERROR, "MT_DMV in progressive_sequence\n");
921  return AVERROR_INVALIDDATA;
922  }
923  s->mv_type = MV_TYPE_DMV;
924  for (i = 0; i < 2; i++) {
925  if (USES_LIST(mb_type, i)) {
926  int dmx, dmy, mx, my, m;
927  const int my_shift = s->picture_structure == PICT_FRAME;
928 
929  mx = mpeg_decode_motion(s, s->mpeg_f_code[i][0],
930  s->last_mv[i][0][0]);
931  s->last_mv[i][0][0] = mx;
932  s->last_mv[i][1][0] = mx;
933  dmx = get_dmv(s);
934  my = mpeg_decode_motion(s, s->mpeg_f_code[i][1],
935  s->last_mv[i][0][1] >> my_shift);
936  dmy = get_dmv(s);
937 
938 
939  s->last_mv[i][0][1] = my * (1 << my_shift);
940  s->last_mv[i][1][1] = my * (1 << my_shift);
941 
942  s->mv[i][0][0] = mx;
943  s->mv[i][0][1] = my;
944  s->mv[i][1][0] = mx; // not used
945  s->mv[i][1][1] = my; // not used
946 
947  if (s->picture_structure == PICT_FRAME) {
948  mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED;
949 
950  // m = 1 + 2 * s->top_field_first;
951  m = s->top_field_first ? 1 : 3;
952 
953  /* top -> top pred */
954  s->mv[i][2][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
955  s->mv[i][2][1] = ((my * m + (my > 0)) >> 1) + dmy - 1;
956  m = 4 - m;
957  s->mv[i][3][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
958  s->mv[i][3][1] = ((my * m + (my > 0)) >> 1) + dmy + 1;
959  } else {
960  mb_type |= MB_TYPE_16x16;
961 
962  s->mv[i][2][0] = ((mx + (mx > 0)) >> 1) + dmx;
963  s->mv[i][2][1] = ((my + (my > 0)) >> 1) + dmy;
964  if (s->picture_structure == PICT_TOP_FIELD)
965  s->mv[i][2][1]--;
966  else
967  s->mv[i][2][1]++;
968  }
969  }
970  }
971  break;
972  default:
973  av_log(s->avctx, AV_LOG_ERROR,
974  "00 motion_type at %d %d\n", s->mb_x, s->mb_y);
975  return AVERROR_INVALIDDATA;
976  }
977  }
978 
979  s->mb_intra = 0;
980  if (HAS_CBP(mb_type)) {
981  s->bdsp.clear_blocks(s->block[0]);
982 
983  cbp = get_vlc2(&s->gb, ff_mb_pat_vlc.table, MB_PAT_VLC_BITS, 1);
984  if (mb_block_count > 6) {
985  cbp *= 1 << mb_block_count - 6;
986  cbp |= get_bits(&s->gb, mb_block_count - 6);
987  s->bdsp.clear_blocks(s->block[6]);
988  }
989  if (cbp <= 0) {
990  av_log(s->avctx, AV_LOG_ERROR,
991  "invalid cbp %d at %d %d\n", cbp, s->mb_x, s->mb_y);
992  return AVERROR_INVALIDDATA;
993  }
994 
995  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
996  if (s->avctx->flags2 & AV_CODEC_FLAG2_FAST) {
997  for (i = 0; i < 6; i++) {
998  if (cbp & 32)
999  mpeg2_fast_decode_block_non_intra(s, *s->pblocks[i], i);
1000  else
1001  s->block_last_index[i] = -1;
1002  cbp += cbp;
1003  }
1004  } else {
1005  cbp <<= 12 - mb_block_count;
1006 
1007  for (i = 0; i < mb_block_count; i++) {
1008  if (cbp & (1 << 11)) {
1009  if ((ret = mpeg2_decode_block_non_intra(s, *s->pblocks[i], i)) < 0)
1010  return ret;
1011  } else {
1012  s->block_last_index[i] = -1;
1013  }
1014  cbp += cbp;
1015  }
1016  }
1017  } else {
1018  if (s->avctx->flags2 & AV_CODEC_FLAG2_FAST) {
1019  for (i = 0; i < 6; i++) {
1020  if (cbp & 32)
1021  mpeg1_fast_decode_block_inter(s, *s->pblocks[i], i);
1022  else
1023  s->block_last_index[i] = -1;
1024  cbp += cbp;
1025  }
1026  } else {
1027  for (i = 0; i < 6; i++) {
1028  if (cbp & 32) {
1029  if ((ret = mpeg1_decode_block_inter(s, *s->pblocks[i], i)) < 0)
1030  return ret;
1031  } else {
1032  s->block_last_index[i] = -1;
1033  }
1034  cbp += cbp;
1035  }
1036  }
1037  }
1038  } else {
1039  for (i = 0; i < 12; i++)
1040  s->block_last_index[i] = -1;
1041  }
1042  }
1043 
1044  s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride] = mb_type;
1045 
1046  return 0;
1047 }
1048 
1050 {
1051  Mpeg1Context *s = avctx->priv_data;
1052  MpegEncContext *s2 = &s->mpeg_enc_ctx;
1053 
1054  if ( avctx->codec_tag != AV_RL32("VCR2")
1055  && avctx->codec_tag != AV_RL32("BW10"))
1056  avctx->coded_width = avctx->coded_height = 0; // do not trust dimensions from input
1057  ff_mpv_decode_init(s2, avctx);
1058 
1059  /* we need some permutation to store matrices,
1060  * until the decoder sets the real permutation. */
1063 
1064  s2->chroma_format = 1;
1065  s->mpeg_enc_ctx_allocated = 0;
1066  s->repeat_field = 0;
1067  avctx->color_range = AVCOL_RANGE_MPEG;
1068  return 0;
1069 }
1070 
1071 #if HAVE_THREADS
1072 static int mpeg_decode_update_thread_context(AVCodecContext *avctx,
1073  const AVCodecContext *avctx_from)
1074 {
1075  Mpeg1Context *ctx = avctx->priv_data, *ctx_from = avctx_from->priv_data;
1076  MpegEncContext *s = &ctx->mpeg_enc_ctx, *s1 = &ctx_from->mpeg_enc_ctx;
1077  int err;
1078 
1079  if (avctx == avctx_from ||
1080  !ctx_from->mpeg_enc_ctx_allocated ||
1081  !s1->context_initialized)
1082  return 0;
1083 
1084  err = ff_mpeg_update_thread_context(avctx, avctx_from);
1085  if (err)
1086  return err;
1087 
1088  if (!ctx->mpeg_enc_ctx_allocated)
1089  memcpy(s + 1, s1 + 1, sizeof(Mpeg1Context) - sizeof(MpegEncContext));
1090 
1091  return 0;
1092 }
1093 #endif
1094 
1095 static void quant_matrix_rebuild(uint16_t *matrix, const uint8_t *old_perm,
1096  const uint8_t *new_perm)
1097 {
1098  uint16_t temp_matrix[64];
1099  int i;
1100 
1101  memcpy(temp_matrix, matrix, 64 * sizeof(uint16_t));
1102 
1103  for (i = 0; i < 64; i++)
1104  matrix[new_perm[i]] = temp_matrix[old_perm[i]];
1105 }
1106 
1108 #if CONFIG_MPEG1_NVDEC_HWACCEL
1110 #endif
1111 #if CONFIG_MPEG1_VDPAU_HWACCEL
1113 #endif
1116 };
1117 
1119 #if CONFIG_MPEG2_NVDEC_HWACCEL
1121 #endif
1122 #if CONFIG_MPEG2_VDPAU_HWACCEL
1124 #endif
1125 #if CONFIG_MPEG2_DXVA2_HWACCEL
1127 #endif
1128 #if CONFIG_MPEG2_D3D11VA_HWACCEL
1131 #endif
1132 #if CONFIG_MPEG2_VAAPI_HWACCEL
1134 #endif
1135 #if CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL
1137 #endif
1140 };
1141 
1142 static const enum AVPixelFormat mpeg12_pixfmt_list_422[] = {
1145 };
1146 
1147 static const enum AVPixelFormat mpeg12_pixfmt_list_444[] = {
1150 };
1151 
1153 {
1154  Mpeg1Context *s1 = avctx->priv_data;
1155  MpegEncContext *s = &s1->mpeg_enc_ctx;
1156  const enum AVPixelFormat *pix_fmts;
1157 
1158  if (CONFIG_GRAY && (avctx->flags & AV_CODEC_FLAG_GRAY))
1159  return AV_PIX_FMT_GRAY8;
1160 
1161  if (s->chroma_format < 2)
1165  else if (s->chroma_format == 2)
1167  else
1169 
1170  return ff_thread_get_format(avctx, pix_fmts);
1171 }
1172 
1173 /* Call this function when we know all parameters.
1174  * It may be called in different places for MPEG-1 and MPEG-2. */
1176 {
1177  Mpeg1Context *s1 = avctx->priv_data;
1178  MpegEncContext *s = &s1->mpeg_enc_ctx;
1179  uint8_t old_permutation[64];
1180  int ret;
1181 
1182  if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
1183  // MPEG-1 aspect
1184  AVRational aspect_inv = av_d2q(ff_mpeg1_aspect[s1->aspect_ratio_info], 255);
1185  avctx->sample_aspect_ratio = (AVRational) { aspect_inv.den, aspect_inv.num };
1186  } else { // MPEG-2
1187  // MPEG-2 aspect
1188  if (s1->aspect_ratio_info > 1) {
1189  AVRational dar =
1190  av_mul_q(av_div_q(ff_mpeg2_aspect[s1->aspect_ratio_info],
1191  (AVRational) { s1->pan_scan.width,
1192  s1->pan_scan.height }),
1193  (AVRational) { s->width, s->height });
1194 
1195  /* We ignore the spec here and guess a bit as reality does not
1196  * match the spec, see for example res_change_ffmpeg_aspect.ts
1197  * and sequence-display-aspect.mpg.
1198  * issue1613, 621, 562 */
1199  if ((s1->pan_scan.width == 0) || (s1->pan_scan.height == 0) ||
1200  (av_cmp_q(dar, (AVRational) { 4, 3 }) &&
1201  av_cmp_q(dar, (AVRational) { 16, 9 }))) {
1202  s->avctx->sample_aspect_ratio =
1203  av_div_q(ff_mpeg2_aspect[s1->aspect_ratio_info],
1204  (AVRational) { s->width, s->height });
1205  } else {
1206  s->avctx->sample_aspect_ratio =
1207  av_div_q(ff_mpeg2_aspect[s1->aspect_ratio_info],
1208  (AVRational) { s1->pan_scan.width, s1->pan_scan.height });
1209 // issue1613 4/3 16/9 -> 16/9
1210 // res_change_ffmpeg_aspect.ts 4/3 225/44 ->4/3
1211 // widescreen-issue562.mpg 4/3 16/9 -> 16/9
1212 // s->avctx->sample_aspect_ratio = av_mul_q(s->avctx->sample_aspect_ratio, (AVRational) {s->width, s->height});
1213  ff_dlog(avctx, "aspect A %d/%d\n",
1214  ff_mpeg2_aspect[s1->aspect_ratio_info].num,
1215  ff_mpeg2_aspect[s1->aspect_ratio_info].den);
1216  ff_dlog(avctx, "aspect B %d/%d\n", s->avctx->sample_aspect_ratio.num,
1217  s->avctx->sample_aspect_ratio.den);
1218  }
1219  } else {
1220  s->avctx->sample_aspect_ratio =
1221  ff_mpeg2_aspect[s1->aspect_ratio_info];
1222  }
1223  } // MPEG-2
1224 
1225  if (av_image_check_sar(s->width, s->height,
1226  avctx->sample_aspect_ratio) < 0) {
1227  av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
1228  avctx->sample_aspect_ratio.num,
1229  avctx->sample_aspect_ratio.den);
1230  avctx->sample_aspect_ratio = (AVRational){ 0, 1 };
1231  }
1232 
1233  if ((s1->mpeg_enc_ctx_allocated == 0) ||
1234  avctx->coded_width != s->width ||
1235  avctx->coded_height != s->height ||
1236  s1->save_width != s->width ||
1237  s1->save_height != s->height ||
1238  av_cmp_q(s1->save_aspect, s->avctx->sample_aspect_ratio) ||
1239  (s1->save_progressive_seq != s->progressive_sequence && FFALIGN(s->height, 16) != FFALIGN(s->height, 32)) ||
1240  0) {
1241  if (s1->mpeg_enc_ctx_allocated) {
1243  s1->mpeg_enc_ctx_allocated = 0;
1244  }
1245 
1246  ret = ff_set_dimensions(avctx, s->width, s->height);
1247  if (ret < 0)
1248  return ret;
1249 
1250  if (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO && s->bit_rate) {
1251  avctx->rc_max_rate = s->bit_rate;
1252  } else if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO && s->bit_rate &&
1253  (s->bit_rate != 0x3FFFF*400 || s->vbv_delay != 0xFFFF)) {
1254  avctx->bit_rate = s->bit_rate;
1255  }
1256  s1->save_aspect = s->avctx->sample_aspect_ratio;
1257  s1->save_width = s->width;
1258  s1->save_height = s->height;
1259  s1->save_progressive_seq = s->progressive_sequence;
1260 
1261  /* low_delay may be forced, in this case we will have B-frames
1262  * that behave like P-frames. */
1263  avctx->has_b_frames = !s->low_delay;
1264 
1265  if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
1266  // MPEG-1 fps
1267  avctx->framerate = ff_mpeg12_frame_rate_tab[s1->frame_rate_index];
1268  avctx->ticks_per_frame = 1;
1269 
1271  } else { // MPEG-2
1272  // MPEG-2 fps
1273  av_reduce(&s->avctx->framerate.num,
1274  &s->avctx->framerate.den,
1275  ff_mpeg12_frame_rate_tab[s1->frame_rate_index].num * s1->frame_rate_ext.num,
1276  ff_mpeg12_frame_rate_tab[s1->frame_rate_index].den * s1->frame_rate_ext.den,
1277  1 << 30);
1278  avctx->ticks_per_frame = 2;
1279 
1280  switch (s->chroma_format) {
1281  case 1: avctx->chroma_sample_location = AVCHROMA_LOC_LEFT; break;
1282  case 2:
1283  case 3: avctx->chroma_sample_location = AVCHROMA_LOC_TOPLEFT; break;
1284  default: av_assert0(0);
1285  }
1286  } // MPEG-2
1287 
1288  avctx->pix_fmt = mpeg_get_pixelformat(avctx);
1289 
1290  /* Quantization matrices may need reordering
1291  * if DCT permutation is changed. */
1292  memcpy(old_permutation, s->idsp.idct_permutation, 64 * sizeof(uint8_t));
1293 
1295  if ((ret = ff_mpv_common_init(s)) < 0)
1296  return ret;
1297 
1298  quant_matrix_rebuild(s->intra_matrix, old_permutation, s->idsp.idct_permutation);
1299  quant_matrix_rebuild(s->inter_matrix, old_permutation, s->idsp.idct_permutation);
1300  quant_matrix_rebuild(s->chroma_intra_matrix, old_permutation, s->idsp.idct_permutation);
1301  quant_matrix_rebuild(s->chroma_inter_matrix, old_permutation, s->idsp.idct_permutation);
1302 
1303  s1->mpeg_enc_ctx_allocated = 1;
1304  }
1305  return 0;
1306 }
1307 
1308 static int mpeg1_decode_picture(AVCodecContext *avctx, const uint8_t *buf,
1309  int buf_size)
1310 {
1311  Mpeg1Context *s1 = avctx->priv_data;
1312  MpegEncContext *s = &s1->mpeg_enc_ctx;
1313  int ref, f_code, vbv_delay, ret;
1314 
1315  ret = init_get_bits8(&s->gb, buf, buf_size);
1316  if (ret < 0)
1317  return ret;
1318 
1319  ref = get_bits(&s->gb, 10); /* temporal ref */
1320  s->pict_type = get_bits(&s->gb, 3);
1321  if (s->pict_type == 0 || s->pict_type > 3)
1322  return AVERROR_INVALIDDATA;
1323 
1324  vbv_delay = get_bits(&s->gb, 16);
1325  s->vbv_delay = vbv_delay;
1326  if (s->pict_type == AV_PICTURE_TYPE_P ||
1327  s->pict_type == AV_PICTURE_TYPE_B) {
1328  s->full_pel[0] = get_bits1(&s->gb);
1329  f_code = get_bits(&s->gb, 3);
1330  if (f_code == 0 && (avctx->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT)))
1331  return AVERROR_INVALIDDATA;
1332  f_code += !f_code;
1333  s->mpeg_f_code[0][0] = f_code;
1334  s->mpeg_f_code[0][1] = f_code;
1335  }
1336  if (s->pict_type == AV_PICTURE_TYPE_B) {
1337  s->full_pel[1] = get_bits1(&s->gb);
1338  f_code = get_bits(&s->gb, 3);
1339  if (f_code == 0 && (avctx->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT)))
1340  return AVERROR_INVALIDDATA;
1341  f_code += !f_code;
1342  s->mpeg_f_code[1][0] = f_code;
1343  s->mpeg_f_code[1][1] = f_code;
1344  }
1345  s->current_picture.f->pict_type = s->pict_type;
1346  s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1347 
1348  if (avctx->debug & FF_DEBUG_PICT_INFO)
1349  av_log(avctx, AV_LOG_DEBUG,
1350  "vbv_delay %d, ref %d type:%d\n", vbv_delay, ref, s->pict_type);
1351 
1352  s->y_dc_scale = 8;
1353  s->c_dc_scale = 8;
1354  return 0;
1355 }
1356 
1358 {
1359  MpegEncContext *s = &s1->mpeg_enc_ctx;
1360  int horiz_size_ext, vert_size_ext;
1361  int bit_rate_ext;
1362  AVCPBProperties *cpb_props;
1363 
1364  skip_bits(&s->gb, 1); /* profile and level esc*/
1365  s->avctx->profile = get_bits(&s->gb, 3);
1366  s->avctx->level = get_bits(&s->gb, 4);
1367  s->progressive_sequence = get_bits1(&s->gb); /* progressive_sequence */
1368  s->chroma_format = get_bits(&s->gb, 2); /* chroma_format 1=420, 2=422, 3=444 */
1369 
1370  if (!s->chroma_format) {
1371  s->chroma_format = 1;
1372  av_log(s->avctx, AV_LOG_WARNING, "Chroma format invalid\n");
1373  }
1374 
1375  horiz_size_ext = get_bits(&s->gb, 2);
1376  vert_size_ext = get_bits(&s->gb, 2);
1377  s->width |= (horiz_size_ext << 12);
1378  s->height |= (vert_size_ext << 12);
1379  bit_rate_ext = get_bits(&s->gb, 12); /* XXX: handle it */
1380  s->bit_rate += (bit_rate_ext << 18) * 400LL;
1381  check_marker(s->avctx, &s->gb, "after bit rate extension");
1382  s1->rc_buffer_size += get_bits(&s->gb, 8) * 1024 * 16 << 10;
1383 
1384  s->low_delay = get_bits1(&s->gb);
1385  if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY)
1386  s->low_delay = 1;
1387 
1388  s1->frame_rate_ext.num = get_bits(&s->gb, 2) + 1;
1389  s1->frame_rate_ext.den = get_bits(&s->gb, 5) + 1;
1390 
1391  ff_dlog(s->avctx, "sequence extension\n");
1392  s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG2VIDEO;
1393 
1394  if (cpb_props = ff_add_cpb_side_data(s->avctx)) {
1395  cpb_props->buffer_size = s1->rc_buffer_size;
1396  if (s->bit_rate != 0x3FFFF*400)
1397  cpb_props->max_bitrate = s->bit_rate;
1398  }
1399 
1400  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1401  av_log(s->avctx, AV_LOG_DEBUG,
1402  "profile: %d, level: %d ps: %d cf:%d vbv buffer: %d, bitrate:%"PRId64"\n",
1403  s->avctx->profile, s->avctx->level, s->progressive_sequence, s->chroma_format,
1404  s1->rc_buffer_size, s->bit_rate);
1405 }
1406 
1408 {
1409  MpegEncContext *s = &s1->mpeg_enc_ctx;
1410  int color_description, w, h;
1411 
1412  skip_bits(&s->gb, 3); /* video format */
1413  color_description = get_bits1(&s->gb);
1414  if (color_description) {
1415  s->avctx->color_primaries = get_bits(&s->gb, 8);
1416  s->avctx->color_trc = get_bits(&s->gb, 8);
1417  s->avctx->colorspace = get_bits(&s->gb, 8);
1418  }
1419  w = get_bits(&s->gb, 14);
1420  skip_bits(&s->gb, 1); // marker
1421  h = get_bits(&s->gb, 14);
1422  // remaining 3 bits are zero padding
1423 
1424  s1->pan_scan.width = 16 * w;
1425  s1->pan_scan.height = 16 * h;
1426 
1427  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1428  av_log(s->avctx, AV_LOG_DEBUG, "sde w:%d, h:%d\n", w, h);
1429 }
1430 
1432 {
1433  MpegEncContext *s = &s1->mpeg_enc_ctx;
1434  int i, nofco;
1435 
1436  nofco = 1;
1437  if (s->progressive_sequence) {
1438  if (s->repeat_first_field) {
1439  nofco++;
1440  if (s->top_field_first)
1441  nofco++;
1442  }
1443  } else {
1444  if (s->picture_structure == PICT_FRAME) {
1445  nofco++;
1446  if (s->repeat_first_field)
1447  nofco++;
1448  }
1449  }
1450  for (i = 0; i < nofco; i++) {
1451  s1->pan_scan.position[i][0] = get_sbits(&s->gb, 16);
1452  skip_bits(&s->gb, 1); // marker
1453  s1->pan_scan.position[i][1] = get_sbits(&s->gb, 16);
1454  skip_bits(&s->gb, 1); // marker
1455  }
1456 
1457  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1458  av_log(s->avctx, AV_LOG_DEBUG,
1459  "pde (%"PRId16",%"PRId16") (%"PRId16",%"PRId16") (%"PRId16",%"PRId16")\n",
1460  s1->pan_scan.position[0][0], s1->pan_scan.position[0][1],
1461  s1->pan_scan.position[1][0], s1->pan_scan.position[1][1],
1462  s1->pan_scan.position[2][0], s1->pan_scan.position[2][1]);
1463 }
1464 
1465 static int load_matrix(MpegEncContext *s, uint16_t matrix0[64],
1466  uint16_t matrix1[64], int intra)
1467 {
1468  int i;
1469 
1470  for (i = 0; i < 64; i++) {
1471  int j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
1472  int v = get_bits(&s->gb, 8);
1473  if (v == 0) {
1474  av_log(s->avctx, AV_LOG_ERROR, "matrix damaged\n");
1475  return AVERROR_INVALIDDATA;
1476  }
1477  if (intra && i == 0 && v != 8) {
1478  av_log(s->avctx, AV_LOG_DEBUG, "intra matrix specifies invalid DC quantizer %d, ignoring\n", v);
1479  v = 8; // needed by pink.mpg / issue1046
1480  }
1481  matrix0[j] = v;
1482  if (matrix1)
1483  matrix1[j] = v;
1484  }
1485  return 0;
1486 }
1487 
1489 {
1490  ff_dlog(s->avctx, "matrix extension\n");
1491 
1492  if (get_bits1(&s->gb))
1493  load_matrix(s, s->chroma_intra_matrix, s->intra_matrix, 1);
1494  if (get_bits1(&s->gb))
1495  load_matrix(s, s->chroma_inter_matrix, s->inter_matrix, 0);
1496  if (get_bits1(&s->gb))
1497  load_matrix(s, s->chroma_intra_matrix, NULL, 1);
1498  if (get_bits1(&s->gb))
1499  load_matrix(s, s->chroma_inter_matrix, NULL, 0);
1500 }
1501 
1503 {
1504  MpegEncContext *s = &s1->mpeg_enc_ctx;
1505 
1506  s->full_pel[0] = s->full_pel[1] = 0;
1507  s->mpeg_f_code[0][0] = get_bits(&s->gb, 4);
1508  s->mpeg_f_code[0][1] = get_bits(&s->gb, 4);
1509  s->mpeg_f_code[1][0] = get_bits(&s->gb, 4);
1510  s->mpeg_f_code[1][1] = get_bits(&s->gb, 4);
1511  s->mpeg_f_code[0][0] += !s->mpeg_f_code[0][0];
1512  s->mpeg_f_code[0][1] += !s->mpeg_f_code[0][1];
1513  s->mpeg_f_code[1][0] += !s->mpeg_f_code[1][0];
1514  s->mpeg_f_code[1][1] += !s->mpeg_f_code[1][1];
1515  if (!s->pict_type && s1->mpeg_enc_ctx_allocated) {
1516  av_log(s->avctx, AV_LOG_ERROR, "Missing picture start code\n");
1517  if (s->avctx->err_recognition & AV_EF_EXPLODE)
1518  return AVERROR_INVALIDDATA;
1519  av_log(s->avctx, AV_LOG_WARNING, "Guessing pict_type from mpeg_f_code\n");
1520  if (s->mpeg_f_code[1][0] == 15 && s->mpeg_f_code[1][1] == 15) {
1521  if (s->mpeg_f_code[0][0] == 15 && s->mpeg_f_code[0][1] == 15)
1522  s->pict_type = AV_PICTURE_TYPE_I;
1523  else
1524  s->pict_type = AV_PICTURE_TYPE_P;
1525  } else
1526  s->pict_type = AV_PICTURE_TYPE_B;
1527  s->current_picture.f->pict_type = s->pict_type;
1528  s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1529  }
1530 
1531  s->intra_dc_precision = get_bits(&s->gb, 2);
1532  s->picture_structure = get_bits(&s->gb, 2);
1533  s->top_field_first = get_bits1(&s->gb);
1534  s->frame_pred_frame_dct = get_bits1(&s->gb);
1535  s->concealment_motion_vectors = get_bits1(&s->gb);
1536  s->q_scale_type = get_bits1(&s->gb);
1537  s->intra_vlc_format = get_bits1(&s->gb);
1538  s->alternate_scan = get_bits1(&s->gb);
1539  s->repeat_first_field = get_bits1(&s->gb);
1540  s->chroma_420_type = get_bits1(&s->gb);
1541  s->progressive_frame = get_bits1(&s->gb);
1542 
1543  if (s->alternate_scan) {
1544  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
1545  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
1546  } else {
1547  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
1548  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
1549  }
1550 
1551  /* composite display not parsed */
1552  ff_dlog(s->avctx, "intra_dc_precision=%d\n", s->intra_dc_precision);
1553  ff_dlog(s->avctx, "picture_structure=%d\n", s->picture_structure);
1554  ff_dlog(s->avctx, "top field first=%d\n", s->top_field_first);
1555  ff_dlog(s->avctx, "repeat first field=%d\n", s->repeat_first_field);
1556  ff_dlog(s->avctx, "conceal=%d\n", s->concealment_motion_vectors);
1557  ff_dlog(s->avctx, "intra_vlc_format=%d\n", s->intra_vlc_format);
1558  ff_dlog(s->avctx, "alternate_scan=%d\n", s->alternate_scan);
1559  ff_dlog(s->avctx, "frame_pred_frame_dct=%d\n", s->frame_pred_frame_dct);
1560  ff_dlog(s->avctx, "progressive_frame=%d\n", s->progressive_frame);
1561 
1562  return 0;
1563 }
1564 
1565 static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
1566 {
1567  AVCodecContext *avctx = s->avctx;
1568  Mpeg1Context *s1 = (Mpeg1Context *) s;
1569  int ret;
1570 
1571  if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)) {
1572  if (s->mb_width * s->mb_height * 11LL / (33 * 2 * 8) > buf_size)
1573  return AVERROR_INVALIDDATA;
1574  }
1575 
1576  /* start frame decoding */
1577  if (s->first_field || s->picture_structure == PICT_FRAME) {
1578  AVFrameSideData *pan_scan;
1579 
1580  if ((ret = ff_mpv_frame_start(s, avctx)) < 0)
1581  return ret;
1582 
1584 
1585  /* first check if we must repeat the frame */
1586  s->current_picture_ptr->f->repeat_pict = 0;
1587  if (s->repeat_first_field) {
1588  if (s->progressive_sequence) {
1589  if (s->top_field_first)
1590  s->current_picture_ptr->f->repeat_pict = 4;
1591  else
1592  s->current_picture_ptr->f->repeat_pict = 2;
1593  } else if (s->progressive_frame) {
1594  s->current_picture_ptr->f->repeat_pict = 1;
1595  }
1596  }
1597 
1598  pan_scan = av_frame_new_side_data(s->current_picture_ptr->f,
1600  sizeof(s1->pan_scan));
1601  if (!pan_scan)
1602  return AVERROR(ENOMEM);
1603  memcpy(pan_scan->data, &s1->pan_scan, sizeof(s1->pan_scan));
1604 
1605  if (s1->a53_buf_ref) {
1607  s->current_picture_ptr->f, AV_FRAME_DATA_A53_CC,
1608  s1->a53_buf_ref);
1609  if (!sd)
1610  av_buffer_unref(&s1->a53_buf_ref);
1611  s1->a53_buf_ref = NULL;
1612  }
1613 
1614  if (s1->has_stereo3d) {
1615  AVStereo3D *stereo = av_stereo3d_create_side_data(s->current_picture_ptr->f);
1616  if (!stereo)
1617  return AVERROR(ENOMEM);
1618 
1619  *stereo = s1->stereo3d;
1620  s1->has_stereo3d = 0;
1621  }
1622 
1623  if (s1->has_afd) {
1624  AVFrameSideData *sd =
1625  av_frame_new_side_data(s->current_picture_ptr->f,
1626  AV_FRAME_DATA_AFD, 1);
1627  if (!sd)
1628  return AVERROR(ENOMEM);
1629 
1630  *sd->data = s1->afd;
1631  s1->has_afd = 0;
1632  }
1633 
1634  if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME))
1635  ff_thread_finish_setup(avctx);
1636  } else { // second field
1637  int i;
1638 
1639  if (!s->current_picture_ptr) {
1640  av_log(s->avctx, AV_LOG_ERROR, "first field missing\n");
1641  return AVERROR_INVALIDDATA;
1642  }
1643 
1644  if (s->avctx->hwaccel) {
1645  if ((ret = s->avctx->hwaccel->end_frame(s->avctx)) < 0) {
1646  av_log(avctx, AV_LOG_ERROR,
1647  "hardware accelerator failed to decode first field\n");
1648  return ret;
1649  }
1650  }
1651 
1652  for (i = 0; i < 4; i++) {
1653  s->current_picture.f->data[i] = s->current_picture_ptr->f->data[i];
1654  if (s->picture_structure == PICT_BOTTOM_FIELD)
1655  s->current_picture.f->data[i] +=
1656  s->current_picture_ptr->f->linesize[i];
1657  }
1658  }
1659 
1660  if (avctx->hwaccel) {
1661  if ((ret = avctx->hwaccel->start_frame(avctx, buf, buf_size)) < 0)
1662  return ret;
1663  }
1664 
1665  return 0;
1666 }
1667 
1668 #define DECODE_SLICE_ERROR -1
1669 #define DECODE_SLICE_OK 0
1670 
1671 /**
1672  * Decode a slice.
1673  * MpegEncContext.mb_y must be set to the MB row from the startcode.
1674  * @return DECODE_SLICE_ERROR if the slice is damaged,
1675  * DECODE_SLICE_OK if this slice is OK
1676  */
1677 static int mpeg_decode_slice(MpegEncContext *s, int mb_y,
1678  const uint8_t **buf, int buf_size)
1679 {
1680  AVCodecContext *avctx = s->avctx;
1681  const int lowres = s->avctx->lowres;
1682  const int field_pic = s->picture_structure != PICT_FRAME;
1683  int ret;
1684 
1685  s->resync_mb_x =
1686  s->resync_mb_y = -1;
1687 
1688  av_assert0(mb_y < s->mb_height);
1689 
1690  ret = init_get_bits8(&s->gb, *buf, buf_size);
1691  if (ret < 0)
1692  return ret;
1693 
1694  if (s->codec_id != AV_CODEC_ID_MPEG1VIDEO && s->mb_height > 2800/16)
1695  skip_bits(&s->gb, 3);
1696 
1698  s->interlaced_dct = 0;
1699 
1700  s->qscale = mpeg_get_qscale(s);
1701 
1702  if (s->qscale == 0) {
1703  av_log(s->avctx, AV_LOG_ERROR, "qscale == 0\n");
1704  return AVERROR_INVALIDDATA;
1705  }
1706 
1707  /* extra slice info */
1708  if (skip_1stop_8data_bits(&s->gb) < 0)
1709  return AVERROR_INVALIDDATA;
1710 
1711  s->mb_x = 0;
1712 
1713  if (mb_y == 0 && s->codec_tag == AV_RL32("SLIF")) {
1714  skip_bits1(&s->gb);
1715  } else {
1716  while (get_bits_left(&s->gb) > 0) {
1717  int code = get_vlc2(&s->gb, ff_mbincr_vlc.table,
1718  MBINCR_VLC_BITS, 2);
1719  if (code < 0) {
1720  av_log(s->avctx, AV_LOG_ERROR, "first mb_incr damaged\n");
1721  return AVERROR_INVALIDDATA;
1722  }
1723  if (code >= 33) {
1724  if (code == 33)
1725  s->mb_x += 33;
1726  /* otherwise, stuffing, nothing to do */
1727  } else {
1728  s->mb_x += code;
1729  break;
1730  }
1731  }
1732  }
1733 
1734  if (s->mb_x >= (unsigned) s->mb_width) {
1735  av_log(s->avctx, AV_LOG_ERROR, "initial skip overflow\n");
1736  return AVERROR_INVALIDDATA;
1737  }
1738 
1739  if (avctx->hwaccel && avctx->hwaccel->decode_slice) {
1740  const uint8_t *buf_end, *buf_start = *buf - 4; /* include start_code */
1741  int start_code = -1;
1742  buf_end = avpriv_find_start_code(buf_start + 2, *buf + buf_size, &start_code);
1743  if (buf_end < *buf + buf_size)
1744  buf_end -= 4;
1745  s->mb_y = mb_y;
1746  if (avctx->hwaccel->decode_slice(avctx, buf_start, buf_end - buf_start) < 0)
1747  return DECODE_SLICE_ERROR;
1748  *buf = buf_end;
1749  return DECODE_SLICE_OK;
1750  }
1751 
1752  s->resync_mb_x = s->mb_x;
1753  s->resync_mb_y = s->mb_y = mb_y;
1754  s->mb_skip_run = 0;
1756 
1757  if (s->mb_y == 0 && s->mb_x == 0 && (s->first_field || s->picture_structure == PICT_FRAME)) {
1758  if (s->avctx->debug & FF_DEBUG_PICT_INFO) {
1759  av_log(s->avctx, AV_LOG_DEBUG,
1760  "qp:%d fc:%2d%2d%2d%2d %c %s %s %s %s dc:%d pstruct:%d fdct:%d cmv:%d qtype:%d ivlc:%d rff:%d %s\n",
1761  s->qscale,
1762  s->mpeg_f_code[0][0], s->mpeg_f_code[0][1],
1763  s->mpeg_f_code[1][0], s->mpeg_f_code[1][1],
1764  s->pict_type == AV_PICTURE_TYPE_I ? 'I' :
1765  (s->pict_type == AV_PICTURE_TYPE_P ? 'P' :
1766  (s->pict_type == AV_PICTURE_TYPE_B ? 'B' : 'S')),
1767  s->progressive_sequence ? "ps" : "",
1768  s->progressive_frame ? "pf" : "",
1769  s->alternate_scan ? "alt" : "",
1770  s->top_field_first ? "top" : "",
1771  s->intra_dc_precision, s->picture_structure,
1772  s->frame_pred_frame_dct, s->concealment_motion_vectors,
1773  s->q_scale_type, s->intra_vlc_format,
1774  s->repeat_first_field, s->chroma_420_type ? "420" : "");
1775  }
1776  }
1777 
1778  for (;;) {
1779  if ((ret = mpeg_decode_mb(s, s->block)) < 0)
1780  return ret;
1781 
1782  // Note motion_val is normally NULL unless we want to extract the MVs.
1783  if (s->current_picture.motion_val[0]) {
1784  const int wrap = s->b8_stride;
1785  int xy = s->mb_x * 2 + s->mb_y * 2 * wrap;
1786  int b8_xy = 4 * (s->mb_x + s->mb_y * s->mb_stride);
1787  int motion_x, motion_y, dir, i;
1788 
1789  for (i = 0; i < 2; i++) {
1790  for (dir = 0; dir < 2; dir++) {
1791  if (s->mb_intra ||
1792  (dir == 1 && s->pict_type != AV_PICTURE_TYPE_B)) {
1793  motion_x = motion_y = 0;
1794  } else if (s->mv_type == MV_TYPE_16X16 ||
1795  (s->mv_type == MV_TYPE_FIELD && field_pic)) {
1796  motion_x = s->mv[dir][0][0];
1797  motion_y = s->mv[dir][0][1];
1798  } else { /* if ((s->mv_type == MV_TYPE_FIELD) || (s->mv_type == MV_TYPE_16X8)) */
1799  motion_x = s->mv[dir][i][0];
1800  motion_y = s->mv[dir][i][1];
1801  }
1802 
1803  s->current_picture.motion_val[dir][xy][0] = motion_x;
1804  s->current_picture.motion_val[dir][xy][1] = motion_y;
1805  s->current_picture.motion_val[dir][xy + 1][0] = motion_x;
1806  s->current_picture.motion_val[dir][xy + 1][1] = motion_y;
1807  s->current_picture.ref_index [dir][b8_xy] =
1808  s->current_picture.ref_index [dir][b8_xy + 1] = s->field_select[dir][i];
1809  av_assert2(s->field_select[dir][i] == 0 ||
1810  s->field_select[dir][i] == 1);
1811  }
1812  xy += wrap;
1813  b8_xy += 2;
1814  }
1815  }
1816 
1817  s->dest[0] += 16 >> lowres;
1818  s->dest[1] +=(16 >> lowres) >> s->chroma_x_shift;
1819  s->dest[2] +=(16 >> lowres) >> s->chroma_x_shift;
1820 
1821  ff_mpv_reconstruct_mb(s, s->block);
1822 
1823  if (++s->mb_x >= s->mb_width) {
1824  const int mb_size = 16 >> s->avctx->lowres;
1825  int left;
1826 
1827  ff_mpeg_draw_horiz_band(s, mb_size * (s->mb_y >> field_pic), mb_size);
1829 
1830  s->mb_x = 0;
1831  s->mb_y += 1 << field_pic;
1832 
1833  if (s->mb_y >= s->mb_height) {
1834  int left = get_bits_left(&s->gb);
1835  int is_d10 = s->chroma_format == 2 &&
1836  s->pict_type == AV_PICTURE_TYPE_I &&
1837  avctx->profile == 0 && avctx->level == 5 &&
1838  s->intra_dc_precision == 2 &&
1839  s->q_scale_type == 1 && s->alternate_scan == 0 &&
1840  s->progressive_frame == 0
1841  /* vbv_delay == 0xBBB || 0xE10 */;
1842 
1843  if (left >= 32 && !is_d10) {
1844  GetBitContext gb = s->gb;
1845  align_get_bits(&gb);
1846  if (show_bits(&gb, 24) == 0x060E2B) {
1847  av_log(avctx, AV_LOG_DEBUG, "Invalid MXF data found in video stream\n");
1848  is_d10 = 1;
1849  }
1850  if (left > 32 && show_bits_long(&gb, 32) == 0x201) {
1851  av_log(avctx, AV_LOG_DEBUG, "skipping m704 alpha (unsupported)\n");
1852  goto eos;
1853  }
1854  }
1855 
1856  if (left < 0 ||
1857  (left && show_bits(&s->gb, FFMIN(left, 23)) && !is_d10) ||
1858  ((avctx->err_recognition & (AV_EF_BITSTREAM | AV_EF_AGGRESSIVE)) && left > 8)) {
1859  av_log(avctx, AV_LOG_ERROR, "end mismatch left=%d %0X at %d %d\n",
1860  left, left>0 ? show_bits(&s->gb, FFMIN(left, 23)) : 0, s->mb_x, s->mb_y);
1861  return AVERROR_INVALIDDATA;
1862  } else
1863  goto eos;
1864  }
1865  // There are some files out there which are missing the last slice
1866  // in cases where the slice is completely outside the visible
1867  // area, we detect this here instead of running into the end expecting
1868  // more data
1869  left = get_bits_left(&s->gb);
1870  if (s->mb_y >= ((s->height + 15) >> 4) &&
1871  !s->progressive_sequence &&
1872  left <= 25 &&
1873  left >= 0 &&
1874  s->mb_skip_run == -1 &&
1875  (!left || show_bits(&s->gb, left) == 0))
1876  goto eos;
1877 
1879  }
1880 
1881  /* skip mb handling */
1882  if (s->mb_skip_run == -1) {
1883  /* read increment again */
1884  s->mb_skip_run = 0;
1885  for (;;) {
1886  int code = get_vlc2(&s->gb, ff_mbincr_vlc.table,
1887  MBINCR_VLC_BITS, 2);
1888  if (code < 0) {
1889  av_log(s->avctx, AV_LOG_ERROR, "mb incr damaged\n");
1890  return AVERROR_INVALIDDATA;
1891  }
1892  if (code >= 33) {
1893  if (code == 33) {
1894  s->mb_skip_run += 33;
1895  } else if (code == 35) {
1896  if (s->mb_skip_run != 0 || show_bits(&s->gb, 15) != 0) {
1897  av_log(s->avctx, AV_LOG_ERROR, "slice mismatch\n");
1898  return AVERROR_INVALIDDATA;
1899  }
1900  goto eos; /* end of slice */
1901  }
1902  /* otherwise, stuffing, nothing to do */
1903  } else {
1904  s->mb_skip_run += code;
1905  break;
1906  }
1907  }
1908  if (s->mb_skip_run) {
1909  int i;
1910  if (s->pict_type == AV_PICTURE_TYPE_I) {
1911  av_log(s->avctx, AV_LOG_ERROR,
1912  "skipped MB in I-frame at %d %d\n", s->mb_x, s->mb_y);
1913  return AVERROR_INVALIDDATA;
1914  }
1915 
1916  /* skip mb */
1917  s->mb_intra = 0;
1918  for (i = 0; i < 12; i++)
1919  s->block_last_index[i] = -1;
1920  if (s->picture_structure == PICT_FRAME)
1921  s->mv_type = MV_TYPE_16X16;
1922  else
1923  s->mv_type = MV_TYPE_FIELD;
1924  if (s->pict_type == AV_PICTURE_TYPE_P) {
1925  /* if P type, zero motion vector is implied */
1926  s->mv_dir = MV_DIR_FORWARD;
1927  s->mv[0][0][0] = s->mv[0][0][1] = 0;
1928  s->last_mv[0][0][0] = s->last_mv[0][0][1] = 0;
1929  s->last_mv[0][1][0] = s->last_mv[0][1][1] = 0;
1930  s->field_select[0][0] = (s->picture_structure - 1) & 1;
1931  } else {
1932  /* if B type, reuse previous vectors and directions */
1933  s->mv[0][0][0] = s->last_mv[0][0][0];
1934  s->mv[0][0][1] = s->last_mv[0][0][1];
1935  s->mv[1][0][0] = s->last_mv[1][0][0];
1936  s->mv[1][0][1] = s->last_mv[1][0][1];
1937  s->field_select[0][0] = (s->picture_structure - 1) & 1;
1938  s->field_select[1][0] = (s->picture_structure - 1) & 1;
1939  }
1940  }
1941  }
1942  }
1943 eos: // end of slice
1944  if (get_bits_left(&s->gb) < 0) {
1945  av_log(s, AV_LOG_ERROR, "overread %d\n", -get_bits_left(&s->gb));
1946  return AVERROR_INVALIDDATA;
1947  }
1948  *buf += (get_bits_count(&s->gb) - 1) / 8;
1949  ff_dlog(s, "Slice start:%d %d end:%d %d\n", s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y);
1950  return 0;
1951 }
1952 
1954 {
1955  MpegEncContext *s = *(void **) arg;
1956  const uint8_t *buf = s->gb.buffer;
1957  int mb_y = s->start_mb_y;
1958  const int field_pic = s->picture_structure != PICT_FRAME;
1959 
1960  s->er.error_count = (3 * (s->end_mb_y - s->start_mb_y) * s->mb_width) >> field_pic;
1961 
1962  for (;;) {
1963  uint32_t start_code;
1964  int ret;
1965 
1966  ret = mpeg_decode_slice(s, mb_y, &buf, s->gb.buffer_end - buf);
1967  emms_c();
1968  ff_dlog(c, "ret:%d resync:%d/%d mb:%d/%d ts:%d/%d ec:%d\n",
1969  ret, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y,
1970  s->start_mb_y, s->end_mb_y, s->er.error_count);
1971  if (ret < 0) {
1972  if (c->err_recognition & AV_EF_EXPLODE)
1973  return ret;
1974  if (s->resync_mb_x >= 0 && s->resync_mb_y >= 0)
1975  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
1976  s->mb_x, s->mb_y,
1978  } else {
1979  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
1980  s->mb_x - 1, s->mb_y,
1982  }
1983 
1984  if (s->mb_y == s->end_mb_y)
1985  return 0;
1986 
1987  start_code = -1;
1988  buf = avpriv_find_start_code(buf, s->gb.buffer_end, &start_code);
1989  if (start_code < SLICE_MIN_START_CODE || start_code > SLICE_MAX_START_CODE)
1990  return AVERROR_INVALIDDATA;
1992  if (s->codec_id != AV_CODEC_ID_MPEG1VIDEO && s->mb_height > 2800/16)
1993  mb_y += (*buf&0xE0)<<2;
1994  mb_y <<= field_pic;
1995  if (s->picture_structure == PICT_BOTTOM_FIELD)
1996  mb_y++;
1997  if (mb_y >= s->end_mb_y)
1998  return AVERROR_INVALIDDATA;
1999  }
2000 }
2001 
2002 /**
2003  * Handle slice ends.
2004  * @return 1 if it seems to be the last slice
2005  */
2006 static int slice_end(AVCodecContext *avctx, AVFrame *pict)
2007 {
2008  Mpeg1Context *s1 = avctx->priv_data;
2009  MpegEncContext *s = &s1->mpeg_enc_ctx;
2010 
2011  if (!s1->mpeg_enc_ctx_allocated || !s->current_picture_ptr)
2012  return 0;
2013 
2014  if (s->avctx->hwaccel) {
2015  int ret = s->avctx->hwaccel->end_frame(s->avctx);
2016  if (ret < 0) {
2017  av_log(avctx, AV_LOG_ERROR,
2018  "hardware accelerator failed to decode picture\n");
2019  return ret;
2020  }
2021  }
2022 
2023  /* end of slice reached */
2024  if (/* s->mb_y << field_pic == s->mb_height && */ !s->first_field && !s1->first_slice) {
2025  /* end of image */
2026 
2027  ff_er_frame_end(&s->er);
2028 
2030 
2031  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
2032  int ret = av_frame_ref(pict, s->current_picture_ptr->f);
2033  if (ret < 0)
2034  return ret;
2035  ff_print_debug_info(s, s->current_picture_ptr, pict);
2036  ff_mpv_export_qp_table(s, pict, s->current_picture_ptr, FF_MPV_QSCALE_TYPE_MPEG2);
2037  } else {
2038  /* latency of 1 frame for I- and P-frames */
2039  if (s->last_picture_ptr) {
2040  int ret = av_frame_ref(pict, s->last_picture_ptr->f);
2041  if (ret < 0)
2042  return ret;
2043  ff_print_debug_info(s, s->last_picture_ptr, pict);
2044  ff_mpv_export_qp_table(s, pict, s->last_picture_ptr, FF_MPV_QSCALE_TYPE_MPEG2);
2045  }
2046  }
2047 
2048  return 1;
2049  } else {
2050  return 0;
2051  }
2052 }
2053 
2055  const uint8_t *buf, int buf_size)
2056 {
2057  Mpeg1Context *s1 = avctx->priv_data;
2058  MpegEncContext *s = &s1->mpeg_enc_ctx;
2059  int width, height;
2060  int i, v, j;
2061 
2062  int ret = init_get_bits8(&s->gb, buf, buf_size);
2063  if (ret < 0)
2064  return ret;
2065 
2066  width = get_bits(&s->gb, 12);
2067  height = get_bits(&s->gb, 12);
2068  if (width == 0 || height == 0) {
2069  av_log(avctx, AV_LOG_WARNING,
2070  "Invalid horizontal or vertical size value.\n");
2072  return AVERROR_INVALIDDATA;
2073  }
2074  s1->aspect_ratio_info = get_bits(&s->gb, 4);
2075  if (s1->aspect_ratio_info == 0) {
2076  av_log(avctx, AV_LOG_ERROR, "aspect ratio has forbidden 0 value\n");
2078  return AVERROR_INVALIDDATA;
2079  }
2080  s1->frame_rate_index = get_bits(&s->gb, 4);
2081  if (s1->frame_rate_index == 0 || s1->frame_rate_index > 13) {
2082  av_log(avctx, AV_LOG_WARNING,
2083  "frame_rate_index %d is invalid\n", s1->frame_rate_index);
2084  s1->frame_rate_index = 1;
2085  }
2086  s->bit_rate = get_bits(&s->gb, 18) * 400LL;
2087  if (check_marker(s->avctx, &s->gb, "in sequence header") == 0) {
2088  return AVERROR_INVALIDDATA;
2089  }
2090 
2091  s1->rc_buffer_size = get_bits(&s->gb, 10) * 1024 * 16;
2092  skip_bits(&s->gb, 1);
2093 
2094  /* get matrix */
2095  if (get_bits1(&s->gb)) {
2096  load_matrix(s, s->chroma_intra_matrix, s->intra_matrix, 1);
2097  } else {
2098  for (i = 0; i < 64; i++) {
2099  j = s->idsp.idct_permutation[i];
2101  s->intra_matrix[j] = v;
2102  s->chroma_intra_matrix[j] = v;
2103  }
2104  }
2105  if (get_bits1(&s->gb)) {
2106  load_matrix(s, s->chroma_inter_matrix, s->inter_matrix, 0);
2107  } else {
2108  for (i = 0; i < 64; i++) {
2109  int j = s->idsp.idct_permutation[i];
2111  s->inter_matrix[j] = v;
2112  s->chroma_inter_matrix[j] = v;
2113  }
2114  }
2115 
2116  if (show_bits(&s->gb, 23) != 0) {
2117  av_log(s->avctx, AV_LOG_ERROR, "sequence header damaged\n");
2118  return AVERROR_INVALIDDATA;
2119  }
2120 
2121  s->width = width;
2122  s->height = height;
2123 
2124  /* We set MPEG-2 parameters so that it emulates MPEG-1. */
2125  s->progressive_sequence = 1;
2126  s->progressive_frame = 1;
2127  s->picture_structure = PICT_FRAME;
2128  s->first_field = 0;
2129  s->frame_pred_frame_dct = 1;
2130  s->chroma_format = 1;
2131  s->codec_id =
2132  s->avctx->codec_id = AV_CODEC_ID_MPEG1VIDEO;
2133  s->out_format = FMT_MPEG1;
2134  if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY)
2135  s->low_delay = 1;
2136 
2137  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2138  av_log(s->avctx, AV_LOG_DEBUG, "vbv buffer: %d, bitrate:%"PRId64", aspect_ratio_info: %d \n",
2139  s1->rc_buffer_size, s->bit_rate, s1->aspect_ratio_info);
2140 
2141  return 0;
2142 }
2143 
2145 {
2146  Mpeg1Context *s1 = avctx->priv_data;
2147  MpegEncContext *s = &s1->mpeg_enc_ctx;
2148  int i, v, ret;
2149 
2150  /* start new MPEG-1 context decoding */
2151  s->out_format = FMT_MPEG1;
2152  if (s1->mpeg_enc_ctx_allocated) {
2154  s1->mpeg_enc_ctx_allocated = 0;
2155  }
2156  s->width = avctx->coded_width;
2157  s->height = avctx->coded_height;
2158  avctx->has_b_frames = 0; // true?
2159  s->low_delay = 1;
2160 
2161  avctx->pix_fmt = mpeg_get_pixelformat(avctx);
2162 
2164  if ((ret = ff_mpv_common_init(s)) < 0)
2165  return ret;
2166  s1->mpeg_enc_ctx_allocated = 1;
2167 
2168  for (i = 0; i < 64; i++) {
2169  int j = s->idsp.idct_permutation[i];
2171  s->intra_matrix[j] = v;
2172  s->chroma_intra_matrix[j] = v;
2173 
2175  s->inter_matrix[j] = v;
2176  s->chroma_inter_matrix[j] = v;
2177  }
2178 
2179  s->progressive_sequence = 1;
2180  s->progressive_frame = 1;
2181  s->picture_structure = PICT_FRAME;
2182  s->first_field = 0;
2183  s->frame_pred_frame_dct = 1;
2184  s->chroma_format = 1;
2185  if (s->codec_tag == AV_RL32("BW10")) {
2186  s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG1VIDEO;
2187  } else {
2188  s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG2VIDEO;
2189  }
2190  s1->save_width = s->width;
2191  s1->save_height = s->height;
2192  s1->save_progressive_seq = s->progressive_sequence;
2193  return 0;
2194 }
2195 
2197  const uint8_t *p, int buf_size)
2198 {
2199  Mpeg1Context *s1 = avctx->priv_data;
2200 
2201  if (buf_size >= 6 &&
2202  p[0] == 'G' && p[1] == 'A' && p[2] == '9' && p[3] == '4' &&
2203  p[4] == 3 && (p[5] & 0x40)) {
2204  /* extract A53 Part 4 CC data */
2205  int cc_count = p[5] & 0x1f;
2206  if (cc_count > 0 && buf_size >= 7 + cc_count * 3) {
2207  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
2208  const uint64_t new_size = (old_size + cc_count
2209  * UINT64_C(3));
2210  int ret;
2211 
2212  if (new_size > 3*A53_MAX_CC_COUNT)
2213  return AVERROR(EINVAL);
2214 
2215  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
2216  if (ret >= 0)
2217  memcpy(s1->a53_buf_ref->data + old_size, p + 7, cc_count * UINT64_C(3));
2218 
2220  }
2221  return 1;
2222  } else if (buf_size >= 2 &&
2223  p[0] == 0x03 && (p[1]&0x7f) == 0x01) {
2224  /* extract SCTE-20 CC data */
2225  GetBitContext gb;
2226  int cc_count = 0;
2227  int i, ret;
2228 
2229  ret = init_get_bits8(&gb, p + 2, buf_size - 2);
2230  if (ret < 0)
2231  return ret;
2232  cc_count = get_bits(&gb, 5);
2233  if (cc_count > 0) {
2234  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
2235  const uint64_t new_size = (old_size + cc_count
2236  * UINT64_C(3));
2237  if (new_size > 3*A53_MAX_CC_COUNT)
2238  return AVERROR(EINVAL);
2239 
2240  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
2241  if (ret >= 0) {
2242  uint8_t field, cc1, cc2;
2243  uint8_t *cap = s1->a53_buf_ref->data;
2244 
2245  memset(s1->a53_buf_ref->data + old_size, 0, cc_count * 3);
2246  for (i = 0; i < cc_count && get_bits_left(&gb) >= 26; i++) {
2247  skip_bits(&gb, 2); // priority
2248  field = get_bits(&gb, 2);
2249  skip_bits(&gb, 5); // line_offset
2250  cc1 = get_bits(&gb, 8);
2251  cc2 = get_bits(&gb, 8);
2252  skip_bits(&gb, 1); // marker
2253 
2254  if (!field) { // forbidden
2255  cap[0] = cap[1] = cap[2] = 0x00;
2256  } else {
2257  field = (field == 2 ? 1 : 0);
2258  if (!s1->mpeg_enc_ctx.top_field_first) field = !field;
2259  cap[0] = 0x04 | field;
2260  cap[1] = ff_reverse[cc1];
2261  cap[2] = ff_reverse[cc2];
2262  }
2263  cap += 3;
2264  }
2265  }
2267  }
2268  return 1;
2269  } else if (buf_size >= 11 &&
2270  p[0] == 'C' && p[1] == 'C' && p[2] == 0x01 && p[3] == 0xf8) {
2271  /* extract DVD CC data
2272  *
2273  * uint32_t user_data_start_code 0x000001B2 (big endian)
2274  * uint16_t user_identifier 0x4343 "CC"
2275  * uint8_t user_data_type_code 0x01
2276  * uint8_t caption_block_size 0xF8
2277  * uint8_t
2278  * bit 7 caption_odd_field_first 1=odd field (CC1/CC2) first 0=even field (CC3/CC4) first
2279  * bit 6 caption_filler 0
2280  * bit 5:1 caption_block_count number of caption blocks (pairs of caption words = frames). Most DVDs use 15 per start of GOP.
2281  * bit 0 caption_extra_field_added 1=one additional caption word
2282  *
2283  * struct caption_field_block {
2284  * uint8_t
2285  * bit 7:1 caption_filler 0x7F (all 1s)
2286  * bit 0 caption_field_odd 1=odd field (this is CC1/CC2) 0=even field (this is CC3/CC4)
2287  * uint8_t caption_first_byte
2288  * uint8_t caption_second_byte
2289  * } caption_block[(caption_block_count * 2) + caption_extra_field_added];
2290  *
2291  * Some DVDs encode caption data for both fields with caption_field_odd=1. The only way to decode the fields
2292  * correctly is to start on the field indicated by caption_odd_field_first and count between odd/even fields.
2293  * Don't assume that the first caption word is the odd field. There do exist MPEG files in the wild that start
2294  * on the even field. There also exist DVDs in the wild that encode an odd field count and the
2295  * caption_extra_field_added/caption_odd_field_first bits change per packet to allow that. */
2296  int cc_count = 0;
2297  int i, ret;
2298  // There is a caption count field in the data, but it is often
2299  // incorrect. So count the number of captions present.
2300  for (i = 5; i + 6 <= buf_size && ((p[i] & 0xfe) == 0xfe); i += 6)
2301  cc_count++;
2302  // Transform the DVD format into A53 Part 4 format
2303  if (cc_count > 0) {
2304  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
2305  const uint64_t new_size = (old_size + cc_count
2306  * UINT64_C(6));
2307  if (new_size > 3*A53_MAX_CC_COUNT)
2308  return AVERROR(EINVAL);
2309 
2310  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
2311  if (ret >= 0) {
2312  uint8_t field1 = !!(p[4] & 0x80);
2313  uint8_t *cap = s1->a53_buf_ref->data;
2314  p += 5;
2315  for (i = 0; i < cc_count; i++) {
2316  cap[0] = (p[0] == 0xff && field1) ? 0xfc : 0xfd;
2317  cap[1] = p[1];
2318  cap[2] = p[2];
2319  cap[3] = (p[3] == 0xff && !field1) ? 0xfc : 0xfd;
2320  cap[4] = p[4];
2321  cap[5] = p[5];
2322  cap += 6;
2323  p += 6;
2324  }
2325  }
2327  }
2328  return 1;
2329  }
2330  return 0;
2331 }
2332 
2334  const uint8_t *p, int buf_size)
2335 {
2336  Mpeg1Context *s = avctx->priv_data;
2337  const uint8_t *buf_end = p + buf_size;
2338  Mpeg1Context *s1 = avctx->priv_data;
2339 
2340 #if 0
2341  int i;
2342  for(i=0; !(!p[i-2] && !p[i-1] && p[i]==1) && i<buf_size; i++){
2343  av_log(avctx, AV_LOG_ERROR, "%c", p[i]);
2344  }
2345  av_log(avctx, AV_LOG_ERROR, "\n");
2346 #endif
2347 
2348  if (buf_size > 29){
2349  int i;
2350  for(i=0; i<20; i++)
2351  if (!memcmp(p+i, "\0TMPGEXS\0", 9)){
2352  s->tmpgexs= 1;
2353  }
2354  }
2355  /* we parse the DTG active format information */
2356  if (buf_end - p >= 5 &&
2357  p[0] == 'D' && p[1] == 'T' && p[2] == 'G' && p[3] == '1') {
2358  int flags = p[4];
2359  p += 5;
2360  if (flags & 0x80) {
2361  /* skip event id */
2362  p += 2;
2363  }
2364  if (flags & 0x40) {
2365  if (buf_end - p < 1)
2366  return;
2367  s1->has_afd = 1;
2368  s1->afd = p[0] & 0x0f;
2369  }
2370  } else if (buf_end - p >= 6 &&
2371  p[0] == 'J' && p[1] == 'P' && p[2] == '3' && p[3] == 'D' &&
2372  p[4] == 0x03) { // S3D_video_format_length
2373  // the 0x7F mask ignores the reserved_bit value
2374  const uint8_t S3D_video_format_type = p[5] & 0x7F;
2375 
2376  if (S3D_video_format_type == 0x03 ||
2377  S3D_video_format_type == 0x04 ||
2378  S3D_video_format_type == 0x08 ||
2379  S3D_video_format_type == 0x23) {
2380 
2381  s1->has_stereo3d = 1;
2382 
2383  switch (S3D_video_format_type) {
2384  case 0x03:
2385  s1->stereo3d.type = AV_STEREO3D_SIDEBYSIDE;
2386  break;
2387  case 0x04:
2388  s1->stereo3d.type = AV_STEREO3D_TOPBOTTOM;
2389  break;
2390  case 0x08:
2391  s1->stereo3d.type = AV_STEREO3D_2D;
2392  break;
2393  case 0x23:
2394  s1->stereo3d.type = AV_STEREO3D_SIDEBYSIDE_QUINCUNX;
2395  break;
2396  }
2397  }
2398  } else if (mpeg_decode_a53_cc(avctx, p, buf_size)) {
2399  return;
2400  }
2401 }
2402 
2404  const uint8_t *buf, int buf_size)
2405 {
2406  Mpeg1Context *s1 = avctx->priv_data;
2407  MpegEncContext *s = &s1->mpeg_enc_ctx;
2408  int broken_link;
2409  int64_t tc;
2410 
2411  int ret = init_get_bits8(&s->gb, buf, buf_size);
2412  if (ret < 0)
2413  return ret;
2414 
2415  tc = s1->timecode_frame_start = get_bits(&s->gb, 25);
2416 
2417  s1->closed_gop = get_bits1(&s->gb);
2418  /* broken_link indicates that after editing the
2419  * reference frames of the first B-Frames after GOP I-Frame
2420  * are missing (open gop) */
2421  broken_link = get_bits1(&s->gb);
2422 
2423  if (s->avctx->debug & FF_DEBUG_PICT_INFO) {
2424  char tcbuf[AV_TIMECODE_STR_SIZE];
2426  av_log(s->avctx, AV_LOG_DEBUG,
2427  "GOP (%s) closed_gop=%d broken_link=%d\n",
2428  tcbuf, s1->closed_gop, broken_link);
2429  }
2430 
2431  return 0;
2432 }
2433 
2434 static int decode_chunks(AVCodecContext *avctx, AVFrame *picture,
2435  int *got_output, const uint8_t *buf, int buf_size)
2436 {
2437  Mpeg1Context *s = avctx->priv_data;
2438  MpegEncContext *s2 = &s->mpeg_enc_ctx;
2439  const uint8_t *buf_ptr = buf;
2440  const uint8_t *buf_end = buf + buf_size;
2441  int ret, input_size;
2442  int last_code = 0, skip_frame = 0;
2443  int picture_start_code_seen = 0;
2444 
2445  for (;;) {
2446  /* find next start code */
2447  uint32_t start_code = -1;
2448  buf_ptr = avpriv_find_start_code(buf_ptr, buf_end, &start_code);
2449  if (start_code > 0x1ff) {
2450  if (!skip_frame) {
2451  if (HAVE_THREADS &&
2452  (avctx->active_thread_type & FF_THREAD_SLICE) &&
2453  !avctx->hwaccel) {
2454  int i;
2455  av_assert0(avctx->thread_count > 1);
2456 
2457  avctx->execute(avctx, slice_decode_thread,
2458  &s2->thread_context[0], NULL,
2459  s->slice_count, sizeof(void *));
2460  for (i = 0; i < s->slice_count; i++)
2461  s2->er.error_count += s2->thread_context[i]->er.error_count;
2462  }
2463 
2464  ret = slice_end(avctx, picture);
2465  if (ret < 0)
2466  return ret;
2467  else if (ret) {
2468  // FIXME: merge with the stuff in mpeg_decode_slice
2469  if (s2->last_picture_ptr || s2->low_delay || s2->pict_type == AV_PICTURE_TYPE_B)
2470  *got_output = 1;
2471  }
2472  }
2473  s2->pict_type = 0;
2474 
2475  if (avctx->err_recognition & AV_EF_EXPLODE && s2->er.error_count)
2476  return AVERROR_INVALIDDATA;
2477 
2478  return FFMAX(0, buf_ptr - buf);
2479  }
2480 
2481  input_size = buf_end - buf_ptr;
2482 
2483  if (avctx->debug & FF_DEBUG_STARTCODE)
2484  av_log(avctx, AV_LOG_DEBUG, "%3"PRIX32" at %"PTRDIFF_SPECIFIER" left %d\n",
2485  start_code, buf_ptr - buf, input_size);
2486 
2487  /* prepare data for next start code */
2488  switch (start_code) {
2489  case SEQ_START_CODE:
2490  if (last_code == 0) {
2491  mpeg1_decode_sequence(avctx, buf_ptr, input_size);
2492  if (buf != avctx->extradata)
2493  s->sync = 1;
2494  } else {
2495  av_log(avctx, AV_LOG_ERROR,
2496  "ignoring SEQ_START_CODE after %X\n", last_code);
2497  if (avctx->err_recognition & AV_EF_EXPLODE)
2498  return AVERROR_INVALIDDATA;
2499  }
2500  break;
2501 
2502  case PICTURE_START_CODE:
2503  if (picture_start_code_seen && s2->picture_structure == PICT_FRAME) {
2504  /* If it's a frame picture, there can't be more than one picture header.
2505  Yet, it does happen and we need to handle it. */
2506  av_log(avctx, AV_LOG_WARNING, "ignoring extra picture following a frame-picture\n");
2507  break;
2508  }
2509  picture_start_code_seen = 1;
2510 
2511  if (s2->width <= 0 || s2->height <= 0) {
2512  av_log(avctx, AV_LOG_ERROR, "Invalid frame dimensions %dx%d.\n",
2513  s2->width, s2->height);
2514  return AVERROR_INVALIDDATA;
2515  }
2516 
2517  if (s->tmpgexs){
2518  s2->intra_dc_precision= 3;
2519  s2->intra_matrix[0]= 1;
2520  }
2521  if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE) &&
2522  !avctx->hwaccel && s->slice_count) {
2523  int i;
2524 
2525  avctx->execute(avctx, slice_decode_thread,
2526  s2->thread_context, NULL,
2527  s->slice_count, sizeof(void *));
2528  for (i = 0; i < s->slice_count; i++)
2529  s2->er.error_count += s2->thread_context[i]->er.error_count;
2530  s->slice_count = 0;
2531  }
2532  if (last_code == 0 || last_code == SLICE_MIN_START_CODE) {
2533  ret = mpeg_decode_postinit(avctx);
2534  if (ret < 0) {
2535  av_log(avctx, AV_LOG_ERROR,
2536  "mpeg_decode_postinit() failure\n");
2537  return ret;
2538  }
2539 
2540  /* We have a complete image: we try to decompress it. */
2541  if (mpeg1_decode_picture(avctx, buf_ptr, input_size) < 0)
2542  s2->pict_type = 0;
2543  s->first_slice = 1;
2544  last_code = PICTURE_START_CODE;
2545  } else {
2546  av_log(avctx, AV_LOG_ERROR,
2547  "ignoring pic after %X\n", last_code);
2548  if (avctx->err_recognition & AV_EF_EXPLODE)
2549  return AVERROR_INVALIDDATA;
2550  }
2551  break;
2552  case EXT_START_CODE:
2553  ret = init_get_bits8(&s2->gb, buf_ptr, input_size);
2554  if (ret < 0)
2555  return ret;
2556 
2557  switch (get_bits(&s2->gb, 4)) {
2558  case 0x1:
2559  if (last_code == 0) {
2561  } else {
2562  av_log(avctx, AV_LOG_ERROR,
2563  "ignoring seq ext after %X\n", last_code);
2564  if (avctx->err_recognition & AV_EF_EXPLODE)
2565  return AVERROR_INVALIDDATA;
2566  }
2567  break;
2568  case 0x2:
2570  break;
2571  case 0x3:
2573  break;
2574  case 0x7:
2576  break;
2577  case 0x8:
2578  if (last_code == PICTURE_START_CODE) {
2580  if (ret < 0)
2581  return ret;
2582  } else {
2583  av_log(avctx, AV_LOG_ERROR,
2584  "ignoring pic cod ext after %X\n", last_code);
2585  if (avctx->err_recognition & AV_EF_EXPLODE)
2586  return AVERROR_INVALIDDATA;
2587  }
2588  break;
2589  }
2590  break;
2591  case USER_START_CODE:
2592  mpeg_decode_user_data(avctx, buf_ptr, input_size);
2593  break;
2594  case GOP_START_CODE:
2595  if (last_code == 0) {
2596  s2->first_field = 0;
2597  ret = mpeg_decode_gop(avctx, buf_ptr, input_size);
2598  if (ret < 0)
2599  return ret;
2600  s->sync = 1;
2601  } else {
2602  av_log(avctx, AV_LOG_ERROR,
2603  "ignoring GOP_START_CODE after %X\n", last_code);
2604  if (avctx->err_recognition & AV_EF_EXPLODE)
2605  return AVERROR_INVALIDDATA;
2606  }
2607  break;
2608  default:
2610  start_code <= SLICE_MAX_START_CODE && last_code == PICTURE_START_CODE) {
2611  if (s2->progressive_sequence && !s2->progressive_frame) {
2612  s2->progressive_frame = 1;
2613  av_log(s2->avctx, AV_LOG_ERROR,
2614  "interlaced frame in progressive sequence, ignoring\n");
2615  }
2616 
2617  if (s2->picture_structure == 0 ||
2618  (s2->progressive_frame && s2->picture_structure != PICT_FRAME)) {
2619  av_log(s2->avctx, AV_LOG_ERROR,
2620  "picture_structure %d invalid, ignoring\n",
2621  s2->picture_structure);
2622  s2->picture_structure = PICT_FRAME;
2623  }
2624 
2625  if (s2->progressive_sequence && !s2->frame_pred_frame_dct)
2626  av_log(s2->avctx, AV_LOG_WARNING, "invalid frame_pred_frame_dct\n");
2627 
2628  if (s2->picture_structure == PICT_FRAME) {
2629  s2->first_field = 0;
2630  s2->v_edge_pos = 16 * s2->mb_height;
2631  } else {
2632  s2->first_field ^= 1;
2633  s2->v_edge_pos = 8 * s2->mb_height;
2634  memset(s2->mbskip_table, 0, s2->mb_stride * s2->mb_height);
2635  }
2636  }
2638  start_code <= SLICE_MAX_START_CODE && last_code != 0) {
2639  const int field_pic = s2->picture_structure != PICT_FRAME;
2640  int mb_y = start_code - SLICE_MIN_START_CODE;
2641  last_code = SLICE_MIN_START_CODE;
2642  if (s2->codec_id != AV_CODEC_ID_MPEG1VIDEO && s2->mb_height > 2800/16)
2643  mb_y += (*buf_ptr&0xE0)<<2;
2644 
2645  mb_y <<= field_pic;
2646  if (s2->picture_structure == PICT_BOTTOM_FIELD)
2647  mb_y++;
2648 
2649  if (buf_end - buf_ptr < 2) {
2650  av_log(s2->avctx, AV_LOG_ERROR, "slice too small\n");
2651  return AVERROR_INVALIDDATA;
2652  }
2653 
2654  if (mb_y >= s2->mb_height) {
2655  av_log(s2->avctx, AV_LOG_ERROR,
2656  "slice below image (%d >= %d)\n", mb_y, s2->mb_height);
2657  return AVERROR_INVALIDDATA;
2658  }
2659 
2660  if (!s2->last_picture_ptr) {
2661  /* Skip B-frames if we do not have reference frames and
2662  * GOP is not closed. */
2663  if (s2->pict_type == AV_PICTURE_TYPE_B) {
2664  if (!s->closed_gop) {
2665  skip_frame = 1;
2666  av_log(s2->avctx, AV_LOG_DEBUG,
2667  "Skipping B slice due to open GOP\n");
2668  break;
2669  }
2670  }
2671  }
2672  if (s2->pict_type == AV_PICTURE_TYPE_I || (s2->avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL))
2673  s->sync = 1;
2674  if (!s2->next_picture_ptr) {
2675  /* Skip P-frames if we do not have a reference frame or
2676  * we have an invalid header. */
2677  if (s2->pict_type == AV_PICTURE_TYPE_P && !s->sync) {
2678  skip_frame = 1;
2679  av_log(s2->avctx, AV_LOG_DEBUG,
2680  "Skipping P slice due to !sync\n");
2681  break;
2682  }
2683  }
2684  if ((avctx->skip_frame >= AVDISCARD_NONREF &&
2685  s2->pict_type == AV_PICTURE_TYPE_B) ||
2686  (avctx->skip_frame >= AVDISCARD_NONKEY &&
2687  s2->pict_type != AV_PICTURE_TYPE_I) ||
2688  avctx->skip_frame >= AVDISCARD_ALL) {
2689  skip_frame = 1;
2690  break;
2691  }
2692 
2693  if (!s->mpeg_enc_ctx_allocated)
2694  break;
2695 
2696  if (s2->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
2697  if (mb_y < avctx->skip_top ||
2698  mb_y >= s2->mb_height - avctx->skip_bottom)
2699  break;
2700  }
2701 
2702  if (!s2->pict_type) {
2703  av_log(avctx, AV_LOG_ERROR, "Missing picture start code\n");
2704  if (avctx->err_recognition & AV_EF_EXPLODE)
2705  return AVERROR_INVALIDDATA;
2706  break;
2707  }
2708 
2709  if (s->first_slice) {
2710  skip_frame = 0;
2711  s->first_slice = 0;
2712  if ((ret = mpeg_field_start(s2, buf, buf_size)) < 0)
2713  return ret;
2714  }
2715  if (!s2->current_picture_ptr) {
2716  av_log(avctx, AV_LOG_ERROR,
2717  "current_picture not initialized\n");
2718  return AVERROR_INVALIDDATA;
2719  }
2720 
2721  if (HAVE_THREADS &&
2722  (avctx->active_thread_type & FF_THREAD_SLICE) &&
2723  !avctx->hwaccel) {
2724  int threshold = (s2->mb_height * s->slice_count +
2725  s2->slice_context_count / 2) /
2726  s2->slice_context_count;
2727  av_assert0(avctx->thread_count > 1);
2728  if (threshold <= mb_y) {
2729  MpegEncContext *thread_context = s2->thread_context[s->slice_count];
2730 
2731  thread_context->start_mb_y = mb_y;
2732  thread_context->end_mb_y = s2->mb_height;
2733  if (s->slice_count) {
2734  s2->thread_context[s->slice_count - 1]->end_mb_y = mb_y;
2735  ret = ff_update_duplicate_context(thread_context, s2);
2736  if (ret < 0)
2737  return ret;
2738  }
2739  ret = init_get_bits8(&thread_context->gb, buf_ptr, input_size);
2740  if (ret < 0)
2741  return ret;
2742  s->slice_count++;
2743  }
2744  buf_ptr += 2; // FIXME add minimum number of bytes per slice
2745  } else {
2746  ret = mpeg_decode_slice(s2, mb_y, &buf_ptr, input_size);
2747  emms_c();
2748 
2749  if (ret < 0) {
2750  if (avctx->err_recognition & AV_EF_EXPLODE)
2751  return ret;
2752  if (s2->resync_mb_x >= 0 && s2->resync_mb_y >= 0)
2753  ff_er_add_slice(&s2->er, s2->resync_mb_x,
2754  s2->resync_mb_y, s2->mb_x, s2->mb_y,
2756  } else {
2757  ff_er_add_slice(&s2->er, s2->resync_mb_x,
2758  s2->resync_mb_y, s2->mb_x - 1, s2->mb_y,
2760  }
2761  }
2762  }
2763  break;
2764  }
2765  }
2766 }
2767 
2768 static int mpeg_decode_frame(AVCodecContext *avctx, AVFrame *picture,
2769  int *got_output, AVPacket *avpkt)
2770 {
2771  const uint8_t *buf = avpkt->data;
2772  int ret;
2773  int buf_size = avpkt->size;
2774  Mpeg1Context *s = avctx->priv_data;
2775  MpegEncContext *s2 = &s->mpeg_enc_ctx;
2776 
2777  if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == SEQ_END_CODE)) {
2778  /* special case for last picture */
2779  if (s2->low_delay == 0 && s2->next_picture_ptr) {
2780  int ret = av_frame_ref(picture, s2->next_picture_ptr->f);
2781  if (ret < 0)
2782  return ret;
2783 
2784  s2->next_picture_ptr = NULL;
2785 
2786  *got_output = 1;
2787  }
2788  return buf_size;
2789  }
2790 
2791  if (s->mpeg_enc_ctx_allocated == 0 && ( s2->codec_tag == AV_RL32("VCR2")
2792  || s2->codec_tag == AV_RL32("BW10")
2793  ))
2794  vcr2_init_sequence(avctx);
2795 
2796  s->slice_count = 0;
2797 
2798  if (avctx->extradata && !s->extradata_decoded) {
2799  ret = decode_chunks(avctx, picture, got_output,
2800  avctx->extradata, avctx->extradata_size);
2801  if (*got_output) {
2802  av_log(avctx, AV_LOG_ERROR, "picture in extradata\n");
2803  av_frame_unref(picture);
2804  *got_output = 0;
2805  }
2806  s->extradata_decoded = 1;
2807  if (ret < 0 && (avctx->err_recognition & AV_EF_EXPLODE)) {
2808  s2->current_picture_ptr = NULL;
2809  return ret;
2810  }
2811  }
2812 
2813  ret = decode_chunks(avctx, picture, got_output, buf, buf_size);
2814  if (ret<0 || *got_output) {
2815  s2->current_picture_ptr = NULL;
2816 
2817  if (s->timecode_frame_start != -1 && *got_output) {
2818  char tcbuf[AV_TIMECODE_STR_SIZE];
2819  AVFrameSideData *tcside = av_frame_new_side_data(picture,
2821  sizeof(int64_t));
2822  if (!tcside)
2823  return AVERROR(ENOMEM);
2824  memcpy(tcside->data, &s->timecode_frame_start, sizeof(int64_t));
2825 
2826  av_timecode_make_mpeg_tc_string(tcbuf, s->timecode_frame_start);
2827  av_dict_set(&picture->metadata, "timecode", tcbuf, 0);
2828 
2829  s->timecode_frame_start = -1;
2830  }
2831  }
2832 
2833  return ret;
2834 }
2835 
2836 static void flush(AVCodecContext *avctx)
2837 {
2838  Mpeg1Context *s = avctx->priv_data;
2839 
2840  s->sync = 0;
2841  s->closed_gop = 0;
2842 
2843  av_buffer_unref(&s->a53_buf_ref);
2844  ff_mpeg_flush(avctx);
2845 }
2846 
2848 {
2849  Mpeg1Context *s = avctx->priv_data;
2850 
2851  if (s->mpeg_enc_ctx_allocated)
2852  ff_mpv_common_end(&s->mpeg_enc_ctx);
2853  av_buffer_unref(&s->a53_buf_ref);
2854  return 0;
2855 }
2856 
2858  .p.name = "mpeg1video",
2859  CODEC_LONG_NAME("MPEG-1 video"),
2860  .p.type = AVMEDIA_TYPE_VIDEO,
2861  .p.id = AV_CODEC_ID_MPEG1VIDEO,
2862  .priv_data_size = sizeof(Mpeg1Context),
2864  .close = mpeg_decode_end,
2866  .p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
2868  .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
2869  .flush = flush,
2870  .p.max_lowres = 3,
2871  UPDATE_THREAD_CONTEXT(mpeg_decode_update_thread_context),
2872  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2873 #if CONFIG_MPEG1_NVDEC_HWACCEL
2874  HWACCEL_NVDEC(mpeg1),
2875 #endif
2876 #if CONFIG_MPEG1_VDPAU_HWACCEL
2877  HWACCEL_VDPAU(mpeg1),
2878 #endif
2879 #if CONFIG_MPEG1_VIDEOTOOLBOX_HWACCEL
2880  HWACCEL_VIDEOTOOLBOX(mpeg1),
2881 #endif
2882  NULL
2883  },
2884 };
2885 
2887  .p.name = "mpeg2video",
2888  CODEC_LONG_NAME("MPEG-2 video"),
2889  .p.type = AVMEDIA_TYPE_VIDEO,
2890  .p.id = AV_CODEC_ID_MPEG2VIDEO,
2891  .priv_data_size = sizeof(Mpeg1Context),
2893  .close = mpeg_decode_end,
2895  .p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
2897  .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
2898  .flush = flush,
2899  .p.max_lowres = 3,
2901  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2902 #if CONFIG_MPEG2_DXVA2_HWACCEL
2903  HWACCEL_DXVA2(mpeg2),
2904 #endif
2905 #if CONFIG_MPEG2_D3D11VA_HWACCEL
2906  HWACCEL_D3D11VA(mpeg2),
2907 #endif
2908 #if CONFIG_MPEG2_D3D11VA2_HWACCEL
2909  HWACCEL_D3D11VA2(mpeg2),
2910 #endif
2911 #if CONFIG_MPEG2_NVDEC_HWACCEL
2912  HWACCEL_NVDEC(mpeg2),
2913 #endif
2914 #if CONFIG_MPEG2_VAAPI_HWACCEL
2915  HWACCEL_VAAPI(mpeg2),
2916 #endif
2917 #if CONFIG_MPEG2_VDPAU_HWACCEL
2918  HWACCEL_VDPAU(mpeg2),
2919 #endif
2920 #if CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL
2921  HWACCEL_VIDEOTOOLBOX(mpeg2),
2922 #endif
2923  NULL
2924  },
2925 };
2926 
2927 //legacy decoder
2929  .p.name = "mpegvideo",
2930  CODEC_LONG_NAME("MPEG-1 video"),
2931  .p.type = AVMEDIA_TYPE_VIDEO,
2932  .p.id = AV_CODEC_ID_MPEG2VIDEO,
2933  .priv_data_size = sizeof(Mpeg1Context),
2935  .close = mpeg_decode_end,
2937  .p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
2939  .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
2940  .flush = flush,
2941  .p.max_lowres = 3,
2942 };
2943 
2944 typedef struct IPUContext {
2946 
2947  int flags;
2948  DECLARE_ALIGNED(32, int16_t, block)[6][64];
2949 } IPUContext;
2950 
2952  int *got_frame, AVPacket *avpkt)
2953 {
2954  IPUContext *s = avctx->priv_data;
2955  MpegEncContext *m = &s->m;
2956  GetBitContext *gb = &m->gb;
2957  int ret;
2958 
2959  ret = ff_get_buffer(avctx, frame, 0);
2960  if (ret < 0)
2961  return ret;
2962 
2963  ret = init_get_bits8(gb, avpkt->data, avpkt->size);
2964  if (ret < 0)
2965  return ret;
2966 
2967  s->flags = get_bits(gb, 8);
2968  m->intra_dc_precision = s->flags & 3;
2969  m->q_scale_type = !!(s->flags & 0x40);
2970  m->intra_vlc_format = !!(s->flags & 0x20);
2971  m->alternate_scan = !!(s->flags & 0x10);
2972 
2973  if (s->flags & 0x10) {
2976  } else {
2979  }
2980 
2981  m->last_dc[0] = m->last_dc[1] = m->last_dc[2] = 1 << (7 + (s->flags & 3));
2982  m->qscale = 1;
2983 
2984  for (int y = 0; y < avctx->height; y += 16) {
2985  int intraquant;
2986 
2987  for (int x = 0; x < avctx->width; x += 16) {
2988  if (x || y) {
2989  if (!get_bits1(gb))
2990  return AVERROR_INVALIDDATA;
2991  }
2992  if (get_bits1(gb)) {
2993  intraquant = 0;
2994  } else {
2995  if (!get_bits1(gb))
2996  return AVERROR_INVALIDDATA;
2997  intraquant = 1;
2998  }
2999 
3000  if (s->flags & 4)
3001  skip_bits1(gb);
3002 
3003  if (intraquant)
3004  m->qscale = mpeg_get_qscale(m);
3005 
3006  memset(s->block, 0, sizeof(s->block));
3007 
3008  for (int n = 0; n < 6; n++) {
3009  if (s->flags & 0x80) {
3011  m->intra_matrix,
3013  m->last_dc, s->block[n],
3014  n, m->qscale);
3015  if (ret >= 0)
3016  m->block_last_index[n] = ret;
3017  } else {
3018  ret = mpeg2_decode_block_intra(m, s->block[n], n);
3019  }
3020 
3021  if (ret < 0)
3022  return ret;
3023  }
3024 
3025  m->idsp.idct_put(frame->data[0] + y * frame->linesize[0] + x,
3026  frame->linesize[0], s->block[0]);
3027  m->idsp.idct_put(frame->data[0] + y * frame->linesize[0] + x + 8,
3028  frame->linesize[0], s->block[1]);
3029  m->idsp.idct_put(frame->data[0] + (y + 8) * frame->linesize[0] + x,
3030  frame->linesize[0], s->block[2]);
3031  m->idsp.idct_put(frame->data[0] + (y + 8) * frame->linesize[0] + x + 8,
3032  frame->linesize[0], s->block[3]);
3033  m->idsp.idct_put(frame->data[1] + (y >> 1) * frame->linesize[1] + (x >> 1),
3034  frame->linesize[1], s->block[4]);
3035  m->idsp.idct_put(frame->data[2] + (y >> 1) * frame->linesize[2] + (x >> 1),
3036  frame->linesize[2], s->block[5]);
3037  }
3038  }
3039 
3040  align_get_bits(gb);
3041  if (get_bits_left(gb) != 32)
3042  return AVERROR_INVALIDDATA;
3043 
3044  frame->pict_type = AV_PICTURE_TYPE_I;
3045  frame->key_frame = 1;
3046  *got_frame = 1;
3047 
3048  return avpkt->size;
3049 }
3050 
3052 {
3053  IPUContext *s = avctx->priv_data;
3054  MpegEncContext *m = &s->m;
3055 
3056  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
3057 
3058  ff_mpv_decode_init(m, avctx);
3059  ff_mpv_idct_init(m);
3061 
3062  for (int i = 0; i < 64; i++) {
3063  int j = m->idsp.idct_permutation[i];
3065  m->intra_matrix[j] = v;
3066  m->chroma_intra_matrix[j] = v;
3067  }
3068 
3069  for (int i = 0; i < 64; i++) {
3070  int j = m->idsp.idct_permutation[i];
3072  m->inter_matrix[j] = v;
3073  m->chroma_inter_matrix[j] = v;
3074  }
3075 
3076  return 0;
3077 }
3078 
3080 {
3081  IPUContext *s = avctx->priv_data;
3082 
3083  ff_mpv_common_end(&s->m);
3084 
3085  return 0;
3086 }
3087 
3089  .p.name = "ipu",
3090  CODEC_LONG_NAME("IPU Video"),
3091  .p.type = AVMEDIA_TYPE_VIDEO,
3092  .p.id = AV_CODEC_ID_IPU,
3093  .priv_data_size = sizeof(IPUContext),
3094  .init = ipu_decode_init,
3096  .close = ipu_decode_end,
3097  .p.capabilities = AV_CODEC_CAP_DR1,
3098  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
3099 };
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:38
vcr2_init_sequence
static int vcr2_init_sequence(AVCodecContext *avctx)
Definition: mpeg12dec.c:2144
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:682
hwconfig.h
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1405
MB_TYPE_L0
#define MB_TYPE_L0
Definition: mpegutils.h:60
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:262
Mpeg1Context::has_afd
int has_afd
Definition: mpeg12dec.c:72
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_TIMECODE_STR_SIZE
#define AV_TIMECODE_STR_SIZE
Definition: timecode.h:33
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:253
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
level
uint8_t level
Definition: svq3.c:204
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
Mpeg1Context::a53_buf_ref
AVBufferRef * a53_buf_ref
Definition: mpeg12dec.c:70
ff_mpeg2_aspect
const AVRational ff_mpeg2_aspect[16]
Definition: mpeg12data.c:380
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
show_bits_long
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
Definition: get_bits.h:474
mpeg_decode_a53_cc
static int mpeg_decode_a53_cc(AVCodecContext *avctx, const uint8_t *p, int buf_size)
Definition: mpeg12dec.c:2196
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:664
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegutils.h:117
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
ff_mpv_export_qp_table
int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const Picture *p, int qp_type)
Definition: mpegvideo_dec.c:506
AV_STEREO3D_SIDEBYSIDE_QUINCUNX
@ AV_STEREO3D_SIDEBYSIDE_QUINCUNX
Views are next to each other, but when upscaling apply a checkerboard pattern.
Definition: stereo3d.h:114
FF_MPV_QSCALE_TYPE_MPEG2
#define FF_MPV_QSCALE_TYPE_MPEG2
Definition: mpegvideodec.h:41
mem_internal.h
mpeg_decode_frame
static int mpeg_decode_frame(AVCodecContext *avctx, AVFrame *picture, int *got_output, AVPacket *avpkt)
Definition: mpeg12dec.c:2768
MpegEncContext::gb
GetBitContext gb
Definition: mpegvideo.h:426
AV_EF_COMPLIANT
#define AV_EF_COMPLIANT
consider all spec non compliances as errors
Definition: defs.h:55
ff_thread_get_format
#define ff_thread_get_format
Definition: thread.h:65
SEQ_END_CODE
#define SEQ_END_CODE
Definition: mpeg12.h:28
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:670
check_scantable_index
#define check_scantable_index(ctx, x)
Definition: mpeg12dec.c:142
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:59
MT_FIELD
#define MT_FIELD
Definition: mpeg12dec.c:654
EXT_START_CODE
#define EXT_START_CODE
Definition: cavs.h:39
MV_TYPE_16X8
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:264
ff_mbincr_vlc
VLC ff_mbincr_vlc
Definition: mpeg12.c:123
av_div_q
AVRational av_div_q(AVRational b, AVRational c)
Divide one rational by another.
Definition: rational.c:88
matrix
Definition: vc1dsp.c:42
AVPanScan
Pan Scan area.
Definition: defs.h:97
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1382
SLICE_MAX_START_CODE
#define SLICE_MAX_START_CODE
Definition: cavs.h:38
MB_TYPE_16x8
#define MB_TYPE_16x8
Definition: mpegutils.h:48
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:256
ipu_decode_init
static av_cold int ipu_decode_init(AVCodecContext *avctx)
Definition: mpeg12dec.c:3051
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:490
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:198
ff_mpv_report_decode_progress
void ff_mpv_report_decode_progress(MpegEncContext *s)
Definition: mpegvideo_dec.c:565
w
uint8_t w
Definition: llviddspenc.c:38
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:67
ff_mpegvideo_decoder
const FFCodec ff_mpegvideo_decoder
Definition: mpeg12dec.c:2928
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:374
ipu_decode_end
static av_cold int ipu_decode_end(AVCodecContext *avctx)
Definition: mpeg12dec.c:3079
mpeg_decode_mb
static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpeg12dec.c:659
Mpeg1Context::closed_gop
int closed_gop
Definition: mpeg12dec.c:81
mpeg2_decode_block_intra
static int mpeg2_decode_block_intra(MpegEncContext *s, int16_t *block, int n)
Definition: mpeg12dec.c:483
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:69
ff_reverse
const uint8_t ff_reverse[256]
Definition: reverse.c:23
MpegEncContext::last_dc
int last_dc[3]
last DC values for MPEG-1
Definition: mpegvideo.h:176
MB_TYPE_16x16
#define MB_TYPE_16x16
Definition: mpegutils.h:47
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:247
FFCodec
Definition: codec_internal.h:127
mpeg2_fast_decode_block_intra
static int mpeg2_fast_decode_block_intra(MpegEncContext *s, int16_t *block, int n)
Changing this would eat up any speed benefits it has.
Definition: mpeg12dec.c:569
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:37
ff_er_add_slice
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
Definition: error_resilience.c:822
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:848
reverse.h
mpegvideo.h
MB_TYPE_L1
#define MB_TYPE_L1
Definition: mpegutils.h:61
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:216
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Mpeg1Context::first_slice
int first_slice
Definition: mpeg12dec.c:83
ER_DC_END
#define ER_DC_END
Definition: error_resilience.h:35
mpeg_decode_postinit
static int mpeg_decode_postinit(AVCodecContext *avctx)
Definition: mpeg12dec.c:1175
ff_add_cpb_side_data
AVCPBProperties * ff_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
Definition: utils.c:1028
mpegutils.h
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
ER_MV_ERROR
#define ER_MV_ERROR
Definition: error_resilience.h:33
thread.h
ff_mb_pat_vlc
VLC ff_mb_pat_vlc
Definition: mpeg12.c:126
SEQ_START_CODE
#define SEQ_START_CODE
Definition: mpeg12.h:29
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1359
MV_TYPE_DMV
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:266
GET_CACHE
#define GET_CACHE(name, gb)
Definition: get_bits.h:253
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:371
ff_mpeg2_rl_vlc
RL_VLC_ELEM ff_mpeg2_rl_vlc[674]
Definition: mpeg12.c:129
Mpeg1Context::save_aspect
AVRational save_aspect
Definition: mpeg12dec.c:75
MpegEncContext::intra_scantable
ScanTable intra_scantable
Definition: mpegvideo.h:81
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:1750
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:64
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:325
MB_TYPE_ZERO_MV
#define MB_TYPE_ZERO_MV
Definition: mpeg12dec.c:88
MT_DMV
#define MT_DMV
Definition: mpeg12dec.c:657
ff_mpv_reconstruct_mb
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo_dec.c:1006
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
decode_chunks
static int decode_chunks(AVCodecContext *avctx, AVFrame *picture, int *got_output, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:2434
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1713
mpeg_decode_quant_matrix_extension
static void mpeg_decode_quant_matrix_extension(MpegEncContext *s)
Definition: mpeg12dec.c:1488
AVCodecContext::thread_count
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:1502
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:52
wrap
#define wrap(func)
Definition: neontest.h:65
timecode.h
GetBitContext
Definition: get_bits.h:107
AV_EF_BITSTREAM
#define AV_EF_BITSTREAM
detect bitstream specification deviations
Definition: defs.h:49
USES_LIST
#define USES_LIST(a, list)
Definition: mpegutils.h:92
slice_decode_thread
static int slice_decode_thread(AVCodecContext *c, void *arg)
Definition: mpeg12dec.c:1953
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:506
IDCTDSPContext::idct_put
void(* idct_put)(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
block -> idct -> clip to unsigned 8 bit -> dest.
Definition: idctdsp.h:63
MB_TYPE_CBP
#define MB_TYPE_CBP
Definition: mpegutils.h:64
val
static double val(void *priv, double ch)
Definition: aeval.c:77
Mpeg1Context::tmpgexs
int tmpgexs
Definition: mpeg12dec.c:82
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:75
AV_CODEC_FLAG_LOW_DELAY
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:313
mpeg12_pixfmt_list_444
static enum AVPixelFormat mpeg12_pixfmt_list_444[]
Definition: mpeg12dec.c:1147
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:613
ff_print_debug_info
void ff_print_debug_info(const MpegEncContext *s, const Picture *p, AVFrame *pict)
Definition: mpegvideo_dec.c:499
mpeg1_decode_sequence
static int mpeg1_decode_sequence(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:2054
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
HAS_CBP
#define HAS_CBP(a)
Definition: mpegutils.h:94
AVRational::num
int num
Numerator.
Definition: rational.h:59
GOP_START_CODE
#define GOP_START_CODE
Definition: mpeg12.h:30
IPUContext
Definition: mpeg12dec.c:2944
mpeg1_hwaccel_pixfmt_list_420
static enum AVPixelFormat mpeg1_hwaccel_pixfmt_list_420[]
Definition: mpeg12dec.c:1107
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:784
mpeg12.h
mpegvideodec.h
ff_mpeg2video_decoder
const FFCodec ff_mpeg2video_decoder
Definition: mpeg12dec.c:2886
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
Mpeg1Context::frame_rate_index
unsigned frame_rate_index
Definition: mpeg12dec.c:79
ipu_decode_frame
static int ipu_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: mpeg12dec.c:2951
ER_DC_ERROR
#define ER_DC_ERROR
Definition: error_resilience.h:32
av_cold
#define av_cold
Definition: attributes.h:90
mpeg2_hwaccel_pixfmt_list_420
static enum AVPixelFormat mpeg2_hwaccel_pixfmt_list_420[]
Definition: mpeg12dec.c:1118
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:524
mpeg1_decode_picture
static int mpeg1_decode_picture(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:1308
flush
static void flush(AVCodecContext *avctx)
Definition: mpeg12dec.c:2836
Mpeg1Context::save_progressive_seq
int save_progressive_seq
Definition: mpeg12dec.c:76
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:187
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:528
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:721
A53_MAX_CC_COUNT
#define A53_MAX_CC_COUNT
Definition: mpeg12dec.c:61
Mpeg1Context::repeat_field
int repeat_field
Definition: mpeg12dec.c:66
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:306
stereo3d.h
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:127
s
#define s(width, name)
Definition: cbs_vp9.c:256
ff_mpeg1_aspect
const float ff_mpeg1_aspect[16]
Definition: mpeg12data.c:359
AVCodecContext::ticks_per_frame
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:557
s1
#define s1
Definition: regdef.h:38
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2006
Mpeg1Context::mpeg_enc_ctx_allocated
int mpeg_enc_ctx_allocated
Definition: mpeg12dec.c:65
SHOW_SBITS
#define SHOW_SBITS(name, gb, num)
Definition: get_bits.h:250
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:365
ff_mpeg_er_frame_start
void ff_mpeg_er_frame_start(MpegEncContext *s)
Definition: mpeg_er.c:47
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
Mpeg1Context::aspect_ratio_info
unsigned aspect_ratio_info
Definition: mpeg12dec.c:74
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
mpeg_decode_sequence_display_extension
static void mpeg_decode_sequence_display_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1407
Mpeg1Context::pan_scan
AVPanScan pan_scan
Definition: mpeg12dec.c:67
get_sbits
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:310
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
ctx
AVFormatContext * ctx
Definition: movenc.c:48
PICT_TOP_FIELD
#define PICT_TOP_FIELD
Definition: mpegutils.h:36
decode.h
mpeg12_pixfmt_list_422
static enum AVPixelFormat mpeg12_pixfmt_list_422[]
Definition: mpeg12dec.c:1142
SKIP_BITS
#define SKIP_BITS(name, gb, num)
Definition: get_bits.h:231
IS_INTRA
#define IS_INTRA(x, y)
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1254
AVCPBProperties
This structure describes the bitrate properties of an encoded bitstream.
Definition: defs.h:126
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:436
arg
const char * arg
Definition: jacosubdec.c:67
if
if(ret)
Definition: filter_design.txt:179
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:76
Mpeg1Context::rc_buffer_size
int rc_buffer_size
Definition: mpeg12dec.c:77
MB_PTYPE_VLC_BITS
#define MB_PTYPE_VLC_BITS
Definition: mpeg12vlc.h:39
Mpeg1Context::save_width
int save_width
Definition: mpeg12dec.c:76
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:149
NULL
#define NULL
Definition: coverity.c:32
run
uint8_t run
Definition: svq3.c:203
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1009
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
ER_AC_ERROR
#define ER_AC_ERROR
Definition: error_resilience.h:31
SLICE_MIN_START_CODE
#define SLICE_MIN_START_CODE
Definition: mpeg12.h:32
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:342
Mpeg1Context::sync
int sync
Definition: mpeg12dec.c:80
AVCHROMA_LOC_LEFT
@ AVCHROMA_LOC_LEFT
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
Definition: pixfmt.h:682
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AVCHROMA_LOC_TOPLEFT
@ AVCHROMA_LOC_TOPLEFT
ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2.
Definition: pixfmt.h:684
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:476
mpeg_decode_picture_display_extension
static void mpeg_decode_picture_display_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1431
MpegEncContext::inter_matrix
uint16_t inter_matrix[64]
Definition: mpegvideo.h:298
AV_CODEC_FLAG2_FAST
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:336
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:378
profiles.h
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:237
MB_TYPE_QUANT
#define MB_TYPE_QUANT
Definition: mpegutils.h:63
avpriv_find_start_code
const uint8_t * avpriv_find_start_code(const uint8_t *p, const uint8_t *end, uint32_t *state)
lowres
static int lowres
Definition: ffplay.c:335
av_frame_new_side_data_from_buf
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
Definition: frame.c:638
ff_mpeg1_rl_vlc
RL_VLC_ELEM ff_mpeg1_rl_vlc[680]
Definition: mpeg12.c:128
MB_BTYPE_VLC_BITS
#define MB_BTYPE_VLC_BITS
Definition: mpeg12vlc.h:40
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:281
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
mpeg12codecs.h
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:631
AV_FRAME_DATA_AFD
@ AV_FRAME_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:90
AVCodecContext::level
int level
level
Definition: avcodec.h:1691
Mpeg1Context::save_height
int save_height
Definition: mpeg12dec.c:76
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
MpegEncContext::idsp
IDCTDSPContext idsp
Definition: mpegvideo.h:218
ff_mb_ptype_vlc
VLC ff_mb_ptype_vlc
Definition: mpeg12.c:124
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
quant_matrix_rebuild
static void quant_matrix_rebuild(uint16_t *matrix, const uint8_t *old_perm, const uint8_t *new_perm)
Definition: mpeg12dec.c:1095
startcode.h
s2
#define s2
Definition: regdef.h:39
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:75
check_marker
static int check_marker(void *logctx, GetBitContext *s, const char *msg)
Definition: mpegvideodec.h:73
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:513
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1473
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:301
AVPacket::size
int size
Definition: packet.h:375
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:115
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:195
AV_CODEC_ID_IPU
@ AV_CODEC_ID_IPU
Definition: codec_id.h:309
AV_FRAME_DATA_PANSCAN
@ AV_FRAME_DATA_PANSCAN
The data is the AVPanScan struct defined in libavcodec.
Definition: frame.h:53
RL_VLC_ELEM
Definition: vlc.h:37
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:344
MT_FRAME
#define MT_FRAME
Definition: mpeg12dec.c:655
codec_internal.h
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:87
shift
static int shift(int a, int b)
Definition: bonk.c:257
IPUContext::flags
int flags
Definition: mpeg12dec.c:2947
MpegEncContext::intra_matrix
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:296
ff_mpeg1_clean_buffers
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:106
ff_mpeg1video_decoder
const FFCodec ff_mpeg1video_decoder
Definition: mpeg12dec.c:2857
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: codec_internal.h:54
AVFrameSideData::data
uint8_t * data
Definition: frame.h:238
MB_TYPE_SKIP
#define MB_TYPE_SKIP
Definition: mpegutils.h:55
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1514
ff_mpeg_draw_horiz_band
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo_dec.c:536
PICTURE_START_CODE
#define PICTURE_START_CODE
Definition: mpeg12.h:31
USER_START_CODE
#define USER_START_CODE
Definition: cavs.h:40
AVCodecContext::skip_bottom
int skip_bottom
Number of macroblock rows at the bottom which are skipped.
Definition: avcodec.h:940
AVCodecHWConfigInternal
Definition: hwconfig.h:29
ff_mpeg1_default_intra_matrix
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:31
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:162
ff_mpv_frame_start
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo_dec.c:271
MB_TYPE_INTERLACED
#define MB_TYPE_INTERLACED
Definition: mpegutils.h:51
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:176
ff_mpeg_flush
void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo_dec.c:544
height
#define height
Mpeg1Context::has_stereo3d
int has_stereo3d
Definition: mpeg12dec.c:69
mpeg_decode_init
static av_cold int mpeg_decode_init(AVCodecContext *avctx)
Definition: mpeg12dec.c:1049
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:111
HWACCEL_D3D11VA
#define HWACCEL_D3D11VA(codec)
Definition: hwconfig.h:79
mpegvideodata.h
attributes.h
ff_mpeg1_decode_block_intra
int ff_mpeg1_decode_block_intra(GetBitContext *gb, const uint16_t *quant_matrix, const uint8_t *scantable, int last_dc[3], int16_t *block, int index, int qscale)
Definition: mpeg12.c:172
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:265
skip_bits1
static void skip_bits1(GetBitContext *s)
Definition: get_bits.h:403
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:333
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:71
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:119
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1513
ff_mpeg2_video_profiles
const AVProfile ff_mpeg2_video_profiles[]
Definition: profiles.c:100
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:187
MB_TYPE_L0L1
#define MB_TYPE_L0L1
Definition: mpegutils.h:62
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:302
ff_init_scantable
av_cold void ff_init_scantable(const uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: mpegvideo.c:321
MpegEncContext::block_last_index
int block_last_index[12]
last non zero coefficient in block
Definition: mpegvideo.h:72
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
MpegEncContext::chroma_inter_matrix
uint16_t chroma_inter_matrix[64]
Definition: mpegvideo.h:299
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
AV_CODEC_FLAG2_SHOW_ALL
#define AV_CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
Definition: avcodec.h:359
AVCodecContext::properties
unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1851
ff_alternate_vertical_scan
const uint8_t ff_alternate_vertical_scan[64]
Definition: mpegvideodata.c:63
btype2mb_type
static const uint32_t btype2mb_type[11]
Definition: mpeg12dec.c:100
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:527
AVHWAccel::decode_slice
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:2165
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:361
internal.h
ff_mpv_decode_init
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
Definition: mpegvideo_dec.c:42
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:76
AVCPBProperties::max_bitrate
int64_t max_bitrate
Maximum bitrate of the stream, in bits per second.
Definition: defs.h:131
IS_QUANT
#define IS_QUANT(a)
Definition: mpegutils.h:88
ff_mpeg12_init_vlcs
av_cold void ff_mpeg12_init_vlcs(void)
Definition: mpeg12.c:164
FF_DEBUG_STARTCODE
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:1366
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_d2q
AVRational av_d2q(double d, int max)
Convert a double precision floating point number to a rational.
Definition: rational.c:106
MB_PAT_VLC_BITS
#define MB_PAT_VLC_BITS
Definition: mpeg12vlc.h:38
mpeg1_decode_block_inter
static int mpeg1_decode_block_inter(MpegEncContext *s, int16_t *block, int n)
Definition: mpeg12dec.c:151
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:478
ptype2mb_type
static const uint32_t ptype2mb_type[7]
Definition: mpeg12dec.c:90
IPUContext::m
MpegEncContext m
Definition: mpeg12dec.c:2945
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:191
MpegEncContext::intra_vlc_format
int intra_vlc_format
Definition: mpegvideo.h:445
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1016
MAX_INDEX
#define MAX_INDEX
Definition: mpeg12dec.c:141
AVCodecContext::height
int height
Definition: avcodec.h:598
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:635
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:644
HWACCEL_VIDEOTOOLBOX
#define HWACCEL_VIDEOTOOLBOX(codec)
Definition: hwconfig.h:77
Mpeg1Context::stereo3d
AVStereo3D stereo3d
Definition: mpeg12dec.c:68
idctdsp.h
avcodec.h
av_cmp_q
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
GET_RL_VLC
#define GET_RL_VLC(level, run, name, gb, table, bits, max_depth, need_update)
Definition: get_bits.h:585
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ff_mpeg12_frame_rate_tab
const AVRational ff_mpeg12_frame_rate_tab[]
Definition: mpeg12framerate.c:24
mpeg_decode_gop
static int mpeg_decode_gop(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:2403
ret
ret
Definition: filter_design.txt:187
AV_EF_AGGRESSIVE
#define AV_EF_AGGRESSIVE
consider things that a sane encoder/muxer should not do as an error
Definition: defs.h:56
pred
static const float pred[4]
Definition: siprdata.h:259
AV_FRAME_DATA_GOP_TIMECODE
@ AV_FRAME_DATA_GOP_TIMECODE
The GOP timecode in 25 bit timecode format.
Definition: frame.h:125
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ff_mpeg1_default_non_intra_matrix
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:42
AVCPBProperties::buffer_size
int64_t buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
Definition: defs.h:147
mpeg1_fast_decode_block_inter
static int mpeg1_fast_decode_block_inter(MpegEncContext *s, int16_t *block, int n)
Changing this would eat up any speed benefits it has.
Definition: mpeg12dec.c:239
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:540
TEX_VLC_BITS
#define TEX_VLC_BITS
Definition: dvdec.c:146
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
mpeg_get_pixelformat
static enum AVPixelFormat mpeg_get_pixelformat(AVCodecContext *avctx)
Definition: mpeg12dec.c:1152
AV_CODEC_FLAG2_CHUNKS
#define AV_CODEC_FLAG2_CHUNKS
Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries.
Definition: avcodec.h:350
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
mpeg12data.h
mpeg2_fast_decode_block_non_intra
static int mpeg2_fast_decode_block_non_intra(MpegEncContext *s, int16_t *block, int n)
Changing this would eat up any speed benefits it has.
Definition: mpeg12dec.c:412
mpeg_field_start
static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:1565
ff_mpeg_update_thread_context
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo_dec.c:58
skip_1stop_8data_bits
static int skip_1stop_8data_bits(GetBitContext *gb)
Definition: get_bits.h:669
AVCodecContext
main external API structure.
Definition: avcodec.h:426
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1521
av_timecode_make_mpeg_tc_string
char * av_timecode_make_mpeg_tc_string(char *buf, uint32_t tc25bit)
Get the timecode string from the 25-bit timecode format (MPEG GOP format).
Definition: timecode.c:167
MpegEncContext::intra_dc_precision
int intra_dc_precision
Definition: mpegvideo.h:439
AVCodecContext::execute
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:1532
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:249
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
mpeg12dec.h
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:683
AVRational::den
int den
Denominator.
Definition: rational.h:60
error_resilience.h
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1565
AVFrame::metadata
AVDictionary * metadata
metadata.
Definition: frame.h:639
ff_mb_btype_vlc
VLC ff_mb_btype_vlc
Definition: mpeg12.c:125
sign_extend
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:133
ff_mpv_frame_end
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo_dec.c:491
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
Mpeg1Context::slice_count
int slice_count
Definition: mpeg12dec.c:73
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
VLC::table
VLCElem * table
Definition: vlc.h:33
FF_CODEC_PROPERTY_CLOSED_CAPTIONS
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
Definition: avcodec.h:1853
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
av_buffer_realloc
int av_buffer_realloc(AVBufferRef **pbuf, size_t size)
Reallocate a given buffer.
Definition: buffer.c:183
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1358
AVHWAccel::start_frame
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: avcodec.h:2138
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:613
get_dmv
static int get_dmv(MpegEncContext *s)
Definition: mpeg12dec.c:645
tc
#define tc
Definition: regdef.h:69
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
mpeg_decode_end
static av_cold int mpeg_decode_end(AVCodecContext *avctx)
Definition: mpeg12dec.c:2847
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
MpegEncContext::inter_scantable
ScanTable inter_scantable
if inter == intra then intra should be used to reduce the cache usage
Definition: mpegvideo.h:76
IDCTDSPContext::idct_permutation
uint8_t idct_permutation[64]
IDCT input permutation.
Definition: idctdsp.h:87
ff_ipu_decoder
const FFCodec ff_ipu_decoder
Definition: mpeg12dec.c:3088
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:34
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:236
ER_MV_END
#define ER_MV_END
Definition: error_resilience.h:36
ff_mv_vlc
VLC ff_mv_vlc
Definition: mpeg12.c:118
MpegEncContext::q_scale_type
int q_scale_type
Definition: mpegvideo.h:443
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:451
Mpeg1Context::mpeg_enc_ctx
MpegEncContext mpeg_enc_ctx
Definition: mpeg12dec.c:64
ff_tlog
#define ff_tlog(ctx,...)
Definition: internal.h:162
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:258
AVPacket
This structure stores compressed data.
Definition: packet.h:351
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:453
ScanTable::permutated
uint8_t permutated[64]
Definition: mpegvideo.h:60
ff_er_frame_end
void ff_er_frame_end(ERContext *s)
Definition: error_resilience.c:892
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:86
mpeg_get_qscale
static int mpeg_get_qscale(MpegEncContext *s)
Definition: mpegvideodec.h:64
mpeg_decode_sequence_extension
static void mpeg_decode_sequence_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1357
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:73
mpeg_er.h
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:598
int32_t
int32_t
Definition: audioconvert.c:56
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AV_CODEC_CAP_DRAW_HORIZ_BAND
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: codec.h:44
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
Mpeg1Context::frame_rate_ext
AVRational frame_rate_ext
Definition: mpeg12dec.c:78
mpeg_decode_motion
static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred)
Definition: mpeg12dec.c:115
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
IPUContext::block
int16_t block[6][64]
Definition: mpeg12dec.c:2948
mpeg_decode_user_data
static void mpeg_decode_user_data(AVCodecContext *avctx, const uint8_t *p, int buf_size)
Definition: mpeg12dec.c:2333
h
h
Definition: vp9dsp_template.c:2038
MpegEncContext::end_mb_y
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:144
ER_AC_END
#define ER_AC_END
Definition: error_resilience.h:34
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:173
av_image_check_sar
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
Definition: imgutils.c:323
MV_VLC_BITS
#define MV_VLC_BITS
Definition: mpeg12vlc.h:34
Mpeg1Context::timecode_frame_start
int64_t timecode_frame_start
Definition: mpeg12dec.c:85
MpegEncContext::start_mb_y
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:143
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:72
MpegEncContext::alternate_scan
int alternate_scan
Definition: mpegvideo.h:446
DECODE_SLICE_OK
#define DECODE_SLICE_OK
Definition: mpeg12dec.c:1669
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
DECODE_SLICE_ERROR
#define DECODE_SLICE_ERROR
Definition: mpeg12dec.c:1668
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:67
load_matrix
static int load_matrix(MpegEncContext *s, uint16_t matrix0[64], uint16_t matrix1[64], int intra)
Definition: mpeg12dec.c:1465
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:795
decode_dc
static int decode_dc(GetBitContext *gb, int component)
Definition: mpeg12dec.h:28
Mpeg1Context::afd
uint8_t afd
Definition: mpeg12dec.c:71
Mpeg1Context
Definition: mpeg12dec.c:63
MpegEncContext::chroma_intra_matrix
uint16_t chroma_intra_matrix[64]
Definition: mpegvideo.h:297
mpeg_decode_picture_coding_extension
static int mpeg_decode_picture_coding_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1502
Mpeg1Context::extradata_decoded
int extradata_decoded
Definition: mpeg12dec.c:84
mpeg2_decode_block_non_intra
static int mpeg2_decode_block_non_intra(MpegEncContext *s, int16_t *block, int n)
Definition: mpeg12dec.c:323
MB_TYPE_INTRA
#define MB_TYPE_INTRA
Definition: mpegutils.h:66
MBINCR_VLC_BITS
#define MBINCR_VLC_BITS
Definition: mpeg12vlc.h:37
mpeg_decode_slice
static int mpeg_decode_slice(MpegEncContext *s, int mb_y, const uint8_t **buf, int buf_size)
Decode a slice.
Definition: mpeg12dec.c:1677
re
float re
Definition: fft.c:79
rl_vlc
static VLC rl_vlc[2]
Definition: mobiclip.c:277