FFmpeg
mpeg12dec.c
Go to the documentation of this file.
1 /*
2  * MPEG-1/2 decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2002-2013 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * MPEG-1/2 decoder
26  */
27 
28 #include "config_components.h"
29 
30 #define UNCHECKED_BITSTREAM_READER 1
31 #include <inttypes.h>
32 
33 #include "libavutil/attributes.h"
34 #include "libavutil/imgutils.h"
35 #include "libavutil/internal.h"
36 #include "libavutil/mem_internal.h"
37 #include "libavutil/reverse.h"
38 #include "libavutil/stereo3d.h"
39 #include "libavutil/timecode.h"
40 
41 #include "avcodec.h"
42 #include "codec_internal.h"
43 #include "decode.h"
44 #include "error_resilience.h"
45 #include "hwconfig.h"
46 #include "idctdsp.h"
47 #include "internal.h"
48 #include "mpeg_er.h"
49 #include "mpeg12.h"
50 #include "mpeg12codecs.h"
51 #include "mpeg12data.h"
52 #include "mpeg12dec.h"
53 #include "mpegutils.h"
54 #include "mpegvideo.h"
55 #include "mpegvideodata.h"
56 #include "mpegvideodec.h"
57 #include "profiles.h"
58 #include "startcode.h"
59 #include "thread.h"
60 
61 #define A53_MAX_CC_COUNT 2000
62 
63 typedef struct Mpeg1Context {
65  int mpeg_enc_ctx_allocated; /* true if decoding context allocated */
66  int repeat_field; /* true if we must repeat the field */
67  AVPanScan pan_scan; /* some temporary storage for the panscan */
71  uint8_t afd;
72  int has_afd;
78  AVRational frame_rate_ext; /* MPEG-2 specific framerate modificator */
79  unsigned frame_rate_index;
80  int sync; /* Did we reach a sync point like a GOP/SEQ/KEYFrame? */
82  int tmpgexs;
85  int64_t timecode_frame_start; /*< GOP timecode frame start number, in non drop frame format */
86 } Mpeg1Context;
87 
88 #define MB_TYPE_ZERO_MV 0x20000000
89 
90 static const uint32_t ptype2mb_type[7] = {
93  MB_TYPE_L0,
98 };
99 
100 static const uint32_t btype2mb_type[11] = {
102  MB_TYPE_L1,
104  MB_TYPE_L0,
106  MB_TYPE_L0L1,
112 };
113 
114 /* as H.263, but only 17 codes */
115 static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred)
116 {
117  int code, sign, val, shift;
118 
119  code = get_vlc2(&s->gb, ff_mv_vlc.table, MV_VLC_BITS, 2);
120  if (code == 0)
121  return pred;
122  if (code < 0)
123  return 0xffff;
124 
125  sign = get_bits1(&s->gb);
126  shift = fcode - 1;
127  val = code;
128  if (shift) {
129  val = (val - 1) << shift;
130  val |= get_bits(&s->gb, shift);
131  val++;
132  }
133  if (sign)
134  val = -val;
135  val += pred;
136 
137  /* modulo decoding */
138  return sign_extend(val, 5 + shift);
139 }
140 
141 #define MAX_INDEX (64 - 1)
142 #define check_scantable_index(ctx, x) \
143  do { \
144  if ((x) > MAX_INDEX) { \
145  av_log(ctx->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", \
146  ctx->mb_x, ctx->mb_y); \
147  return AVERROR_INVALIDDATA; \
148  } \
149  } while (0)
150 
152  int16_t *block, int n)
153 {
154  int level, i, j, run;
155  uint8_t *const scantable = s->intra_scantable.permutated;
156  const uint16_t *quant_matrix = s->inter_matrix;
157  const int qscale = s->qscale;
158 
159  {
160  OPEN_READER(re, &s->gb);
161  i = -1;
162  // special case for first coefficient, no need to add second VLC table
163  UPDATE_CACHE(re, &s->gb);
164  if (((int32_t) GET_CACHE(re, &s->gb)) < 0) {
165  level = (3 * qscale * quant_matrix[0]) >> 5;
166  level = (level - 1) | 1;
167  if (GET_CACHE(re, &s->gb) & 0x40000000)
168  level = -level;
169  block[0] = level;
170  i++;
171  SKIP_BITS(re, &s->gb, 2);
172  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
173  goto end;
174  }
175  /* now quantify & encode AC coefficients */
176  for (;;) {
178  TEX_VLC_BITS, 2, 0);
179 
180  if (level != 0) {
181  i += run;
182  if (i > MAX_INDEX)
183  break;
184  j = scantable[i];
185  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
186  level = (level - 1) | 1;
187  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
188  SHOW_SBITS(re, &s->gb, 1);
189  SKIP_BITS(re, &s->gb, 1);
190  } else {
191  /* escape */
192  run = SHOW_UBITS(re, &s->gb, 6) + 1;
193  LAST_SKIP_BITS(re, &s->gb, 6);
194  UPDATE_CACHE(re, &s->gb);
195  level = SHOW_SBITS(re, &s->gb, 8);
196  SKIP_BITS(re, &s->gb, 8);
197  if (level == -128) {
198  level = SHOW_UBITS(re, &s->gb, 8) - 256;
199  SKIP_BITS(re, &s->gb, 8);
200  } else if (level == 0) {
201  level = SHOW_UBITS(re, &s->gb, 8);
202  SKIP_BITS(re, &s->gb, 8);
203  }
204  i += run;
205  if (i > MAX_INDEX)
206  break;
207  j = scantable[i];
208  if (level < 0) {
209  level = -level;
210  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
211  level = (level - 1) | 1;
212  level = -level;
213  } else {
214  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
215  level = (level - 1) | 1;
216  }
217  }
218 
219  block[j] = level;
220  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
221  break;
222  UPDATE_CACHE(re, &s->gb);
223  }
224 end:
225  LAST_SKIP_BITS(re, &s->gb, 2);
226  CLOSE_READER(re, &s->gb);
227  }
228 
230 
231  s->block_last_index[n] = i;
232  return 0;
233 }
234 
235 /**
236  * Changing this would eat up any speed benefits it has.
237  * Do not use "fast" flag if you need the code to be robust.
238  */
240  int16_t *block, int n)
241 {
242  int level, i, j, run;
243  uint8_t *const scantable = s->intra_scantable.permutated;
244  const int qscale = s->qscale;
245 
246  {
247  OPEN_READER(re, &s->gb);
248  i = -1;
249  // Special case for first coefficient, no need to add second VLC table.
250  UPDATE_CACHE(re, &s->gb);
251  if (((int32_t) GET_CACHE(re, &s->gb)) < 0) {
252  level = (3 * qscale) >> 1;
253  level = (level - 1) | 1;
254  if (GET_CACHE(re, &s->gb) & 0x40000000)
255  level = -level;
256  block[0] = level;
257  i++;
258  SKIP_BITS(re, &s->gb, 2);
259  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
260  goto end;
261  }
262 
263  /* now quantify & encode AC coefficients */
264  for (;;) {
266  TEX_VLC_BITS, 2, 0);
267 
268  if (level != 0) {
269  i += run;
270  if (i > MAX_INDEX)
271  break;
272  j = scantable[i];
273  level = ((level * 2 + 1) * qscale) >> 1;
274  level = (level - 1) | 1;
275  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
276  SHOW_SBITS(re, &s->gb, 1);
277  SKIP_BITS(re, &s->gb, 1);
278  } else {
279  /* escape */
280  run = SHOW_UBITS(re, &s->gb, 6) + 1;
281  LAST_SKIP_BITS(re, &s->gb, 6);
282  UPDATE_CACHE(re, &s->gb);
283  level = SHOW_SBITS(re, &s->gb, 8);
284  SKIP_BITS(re, &s->gb, 8);
285  if (level == -128) {
286  level = SHOW_UBITS(re, &s->gb, 8) - 256;
287  SKIP_BITS(re, &s->gb, 8);
288  } else if (level == 0) {
289  level = SHOW_UBITS(re, &s->gb, 8);
290  SKIP_BITS(re, &s->gb, 8);
291  }
292  i += run;
293  if (i > MAX_INDEX)
294  break;
295  j = scantable[i];
296  if (level < 0) {
297  level = -level;
298  level = ((level * 2 + 1) * qscale) >> 1;
299  level = (level - 1) | 1;
300  level = -level;
301  } else {
302  level = ((level * 2 + 1) * qscale) >> 1;
303  level = (level - 1) | 1;
304  }
305  }
306 
307  block[j] = level;
308  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
309  break;
310  UPDATE_CACHE(re, &s->gb);
311  }
312 end:
313  LAST_SKIP_BITS(re, &s->gb, 2);
314  CLOSE_READER(re, &s->gb);
315  }
316 
318 
319  s->block_last_index[n] = i;
320  return 0;
321 }
322 
324  int16_t *block, int n)
325 {
326  int level, i, j, run;
327  uint8_t *const scantable = s->intra_scantable.permutated;
328  const uint16_t *quant_matrix;
329  const int qscale = s->qscale;
330  int mismatch;
331 
332  mismatch = 1;
333 
334  {
335  OPEN_READER(re, &s->gb);
336  i = -1;
337  if (n < 4)
338  quant_matrix = s->inter_matrix;
339  else
340  quant_matrix = s->chroma_inter_matrix;
341 
342  // Special case for first coefficient, no need to add second VLC table.
343  UPDATE_CACHE(re, &s->gb);
344  if (((int32_t) GET_CACHE(re, &s->gb)) < 0) {
345  level = (3 * qscale * quant_matrix[0]) >> 5;
346  if (GET_CACHE(re, &s->gb) & 0x40000000)
347  level = -level;
348  block[0] = level;
349  mismatch ^= level;
350  i++;
351  SKIP_BITS(re, &s->gb, 2);
352  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
353  goto end;
354  }
355 
356  /* now quantify & encode AC coefficients */
357  for (;;) {
359  TEX_VLC_BITS, 2, 0);
360 
361  if (level != 0) {
362  i += run;
363  if (i > MAX_INDEX)
364  break;
365  j = scantable[i];
366  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
367  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
368  SHOW_SBITS(re, &s->gb, 1);
369  SKIP_BITS(re, &s->gb, 1);
370  } else {
371  /* escape */
372  run = SHOW_UBITS(re, &s->gb, 6) + 1;
373  LAST_SKIP_BITS(re, &s->gb, 6);
374  UPDATE_CACHE(re, &s->gb);
375  level = SHOW_SBITS(re, &s->gb, 12);
376  SKIP_BITS(re, &s->gb, 12);
377 
378  i += run;
379  if (i > MAX_INDEX)
380  break;
381  j = scantable[i];
382  if (level < 0) {
383  level = ((-level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
384  level = -level;
385  } else {
386  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
387  }
388  }
389 
390  mismatch ^= level;
391  block[j] = level;
392  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
393  break;
394  UPDATE_CACHE(re, &s->gb);
395  }
396 end:
397  LAST_SKIP_BITS(re, &s->gb, 2);
398  CLOSE_READER(re, &s->gb);
399  }
400  block[63] ^= (mismatch & 1);
401 
403 
404  s->block_last_index[n] = i;
405  return 0;
406 }
407 
408 /**
409  * Changing this would eat up any speed benefits it has.
410  * Do not use "fast" flag if you need the code to be robust.
411  */
413  int16_t *block, int n)
414 {
415  int level, i, j, run;
416  uint8_t *const scantable = s->intra_scantable.permutated;
417  const int qscale = s->qscale;
418  OPEN_READER(re, &s->gb);
419  i = -1;
420 
421  // special case for first coefficient, no need to add second VLC table
422  UPDATE_CACHE(re, &s->gb);
423  if (((int32_t) GET_CACHE(re, &s->gb)) < 0) {
424  level = (3 * qscale) >> 1;
425  if (GET_CACHE(re, &s->gb) & 0x40000000)
426  level = -level;
427  block[0] = level;
428  i++;
429  SKIP_BITS(re, &s->gb, 2);
430  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
431  goto end;
432  }
433 
434  /* now quantify & encode AC coefficients */
435  for (;;) {
437 
438  if (level != 0) {
439  i += run;
440  if (i > MAX_INDEX)
441  break;
442  j = scantable[i];
443  level = ((level * 2 + 1) * qscale) >> 1;
444  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
445  SHOW_SBITS(re, &s->gb, 1);
446  SKIP_BITS(re, &s->gb, 1);
447  } else {
448  /* escape */
449  run = SHOW_UBITS(re, &s->gb, 6) + 1;
450  LAST_SKIP_BITS(re, &s->gb, 6);
451  UPDATE_CACHE(re, &s->gb);
452  level = SHOW_SBITS(re, &s->gb, 12);
453  SKIP_BITS(re, &s->gb, 12);
454 
455  i += run;
456  if (i > MAX_INDEX)
457  break;
458  j = scantable[i];
459  if (level < 0) {
460  level = ((-level * 2 + 1) * qscale) >> 1;
461  level = -level;
462  } else {
463  level = ((level * 2 + 1) * qscale) >> 1;
464  }
465  }
466 
467  block[j] = level;
468  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF || i > 63)
469  break;
470 
471  UPDATE_CACHE(re, &s->gb);
472  }
473 end:
474  LAST_SKIP_BITS(re, &s->gb, 2);
475  CLOSE_READER(re, &s->gb);
476 
478 
479  s->block_last_index[n] = i;
480  return 0;
481 }
482 
484  int16_t *block, int n)
485 {
486  int level, dc, diff, i, j, run;
487  int component;
488  const RL_VLC_ELEM *rl_vlc;
489  uint8_t *const scantable = s->intra_scantable.permutated;
490  const uint16_t *quant_matrix;
491  const int qscale = s->qscale;
492  int mismatch;
493 
494  /* DC coefficient */
495  if (n < 4) {
496  quant_matrix = s->intra_matrix;
497  component = 0;
498  } else {
499  quant_matrix = s->chroma_intra_matrix;
500  component = (n & 1) + 1;
501  }
502  diff = decode_dc(&s->gb, component);
503  dc = s->last_dc[component];
504  dc += diff;
505  s->last_dc[component] = dc;
506  block[0] = dc * (1 << (3 - s->intra_dc_precision));
507  ff_tlog(s->avctx, "dc=%d\n", block[0]);
508  mismatch = block[0] ^ 1;
509  i = 0;
510  if (s->intra_vlc_format)
512  else
514 
515  {
516  OPEN_READER(re, &s->gb);
517  /* now quantify & encode AC coefficients */
518  for (;;) {
519  UPDATE_CACHE(re, &s->gb);
520  GET_RL_VLC(level, run, re, &s->gb, rl_vlc,
521  TEX_VLC_BITS, 2, 0);
522 
523  if (level == 127) {
524  break;
525  } else if (level != 0) {
526  i += run;
527  if (i > MAX_INDEX)
528  break;
529  j = scantable[i];
530  level = (level * qscale * quant_matrix[j]) >> 4;
531  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
532  SHOW_SBITS(re, &s->gb, 1);
533  LAST_SKIP_BITS(re, &s->gb, 1);
534  } else {
535  /* escape */
536  run = SHOW_UBITS(re, &s->gb, 6) + 1;
537  SKIP_BITS(re, &s->gb, 6);
538  level = SHOW_SBITS(re, &s->gb, 12);
539  LAST_SKIP_BITS(re, &s->gb, 12);
540  i += run;
541  if (i > MAX_INDEX)
542  break;
543  j = scantable[i];
544  if (level < 0) {
545  level = (-level * qscale * quant_matrix[j]) >> 4;
546  level = -level;
547  } else {
548  level = (level * qscale * quant_matrix[j]) >> 4;
549  }
550  }
551 
552  mismatch ^= level;
553  block[j] = level;
554  }
555  CLOSE_READER(re, &s->gb);
556  }
557  block[63] ^= mismatch & 1;
558 
560 
561  s->block_last_index[n] = i;
562  return 0;
563 }
564 
565 /**
566  * Changing this would eat up any speed benefits it has.
567  * Do not use "fast" flag if you need the code to be robust.
568  */
570  int16_t *block, int n)
571 {
572  int level, dc, diff, i, j, run;
573  int component;
574  const RL_VLC_ELEM *rl_vlc;
575  uint8_t *const scantable = s->intra_scantable.permutated;
576  const uint16_t *quant_matrix;
577  const int qscale = s->qscale;
578 
579  /* DC coefficient */
580  if (n < 4) {
581  quant_matrix = s->intra_matrix;
582  component = 0;
583  } else {
584  quant_matrix = s->chroma_intra_matrix;
585  component = (n & 1) + 1;
586  }
587  diff = decode_dc(&s->gb, component);
588  dc = s->last_dc[component];
589  dc += diff;
590  s->last_dc[component] = dc;
591  block[0] = dc * (1 << (3 - s->intra_dc_precision));
592  i = 0;
593  if (s->intra_vlc_format)
595  else
597 
598  {
599  OPEN_READER(re, &s->gb);
600  /* now quantify & encode AC coefficients */
601  for (;;) {
602  UPDATE_CACHE(re, &s->gb);
603  GET_RL_VLC(level, run, re, &s->gb, rl_vlc,
604  TEX_VLC_BITS, 2, 0);
605 
606  if (level >= 64 || i > 63) {
607  break;
608  } else if (level != 0) {
609  i += run;
610  j = scantable[i];
611  level = (level * qscale * quant_matrix[j]) >> 4;
612  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
613  SHOW_SBITS(re, &s->gb, 1);
614  LAST_SKIP_BITS(re, &s->gb, 1);
615  } else {
616  /* escape */
617  run = SHOW_UBITS(re, &s->gb, 6) + 1;
618  SKIP_BITS(re, &s->gb, 6);
619  level = SHOW_SBITS(re, &s->gb, 12);
620  LAST_SKIP_BITS(re, &s->gb, 12);
621  i += run;
622  j = scantable[i];
623  if (level < 0) {
624  level = (-level * qscale * quant_matrix[j]) >> 4;
625  level = -level;
626  } else {
627  level = (level * qscale * quant_matrix[j]) >> 4;
628  }
629  }
630 
631  block[j] = level;
632  }
633  CLOSE_READER(re, &s->gb);
634  }
635 
637 
638  s->block_last_index[n] = i;
639  return 0;
640 }
641 
642 /******************************************/
643 /* decoding */
644 
645 static inline int get_dmv(MpegEncContext *s)
646 {
647  if (get_bits1(&s->gb))
648  return 1 - (get_bits1(&s->gb) << 1);
649  else
650  return 0;
651 }
652 
653 /* motion type (for MPEG-2) */
654 #define MT_FIELD 1
655 #define MT_FRAME 2
656 #define MT_16X8 2
657 #define MT_DMV 3
658 
659 static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
660 {
661  int i, j, k, cbp, val, mb_type, motion_type;
662  const int mb_block_count = 4 + (1 << s->chroma_format);
663  int ret;
664 
665  ff_tlog(s->avctx, "decode_mb: x=%d y=%d\n", s->mb_x, s->mb_y);
666 
667  av_assert2(s->mb_skipped == 0);
668 
669  if (s->mb_skip_run-- != 0) {
670  if (s->pict_type == AV_PICTURE_TYPE_P) {
671  s->mb_skipped = 1;
672  s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride] =
674  } else {
675  int mb_type;
676 
677  if (s->mb_x)
678  mb_type = s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1];
679  else
680  // FIXME not sure if this is allowed in MPEG at all
681  mb_type = s->current_picture.mb_type[s->mb_width + (s->mb_y - 1) * s->mb_stride - 1];
682  if (IS_INTRA(mb_type)) {
683  av_log(s->avctx, AV_LOG_ERROR, "skip with previntra\n");
684  return AVERROR_INVALIDDATA;
685  }
686  s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride] =
687  mb_type | MB_TYPE_SKIP;
688 
689  if ((s->mv[0][0][0] | s->mv[0][0][1] | s->mv[1][0][0] | s->mv[1][0][1]) == 0)
690  s->mb_skipped = 1;
691  }
692 
693  return 0;
694  }
695 
696  switch (s->pict_type) {
697  default:
698  case AV_PICTURE_TYPE_I:
699  if (get_bits1(&s->gb) == 0) {
700  if (get_bits1(&s->gb) == 0) {
701  av_log(s->avctx, AV_LOG_ERROR,
702  "Invalid mb type in I-frame at %d %d\n",
703  s->mb_x, s->mb_y);
704  return AVERROR_INVALIDDATA;
705  }
706  mb_type = MB_TYPE_QUANT | MB_TYPE_INTRA;
707  } else {
708  mb_type = MB_TYPE_INTRA;
709  }
710  break;
711  case AV_PICTURE_TYPE_P:
712  mb_type = get_vlc2(&s->gb, ff_mb_ptype_vlc.table, MB_PTYPE_VLC_BITS, 1);
713  if (mb_type < 0) {
714  av_log(s->avctx, AV_LOG_ERROR,
715  "Invalid mb type in P-frame at %d %d\n", s->mb_x, s->mb_y);
716  return AVERROR_INVALIDDATA;
717  }
718  mb_type = ptype2mb_type[mb_type];
719  break;
720  case AV_PICTURE_TYPE_B:
721  mb_type = get_vlc2(&s->gb, ff_mb_btype_vlc.table, MB_BTYPE_VLC_BITS, 1);
722  if (mb_type < 0) {
723  av_log(s->avctx, AV_LOG_ERROR,
724  "Invalid mb type in B-frame at %d %d\n", s->mb_x, s->mb_y);
725  return AVERROR_INVALIDDATA;
726  }
727  mb_type = btype2mb_type[mb_type];
728  break;
729  }
730  ff_tlog(s->avctx, "mb_type=%x\n", mb_type);
731 // motion_type = 0; /* avoid warning */
732  if (IS_INTRA(mb_type)) {
733  s->bdsp.clear_blocks(s->block[0]);
734 
735  if (!s->chroma_y_shift)
736  s->bdsp.clear_blocks(s->block[6]);
737 
738  /* compute DCT type */
739  // FIXME: add an interlaced_dct coded var?
740  if (s->picture_structure == PICT_FRAME &&
741  !s->frame_pred_frame_dct)
742  s->interlaced_dct = get_bits1(&s->gb);
743 
744  if (IS_QUANT(mb_type))
745  s->qscale = mpeg_get_qscale(s);
746 
747  if (s->concealment_motion_vectors) {
748  /* just parse them */
749  if (s->picture_structure != PICT_FRAME)
750  skip_bits1(&s->gb); /* field select */
751 
752  s->mv[0][0][0] =
753  s->last_mv[0][0][0] =
754  s->last_mv[0][1][0] = mpeg_decode_motion(s, s->mpeg_f_code[0][0],
755  s->last_mv[0][0][0]);
756  s->mv[0][0][1] =
757  s->last_mv[0][0][1] =
758  s->last_mv[0][1][1] = mpeg_decode_motion(s, s->mpeg_f_code[0][1],
759  s->last_mv[0][0][1]);
760 
761  check_marker(s->avctx, &s->gb, "after concealment_motion_vectors");
762  } else {
763  /* reset mv prediction */
764  memset(s->last_mv, 0, sizeof(s->last_mv));
765  }
766  s->mb_intra = 1;
767 
768  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
769  if (s->avctx->flags2 & AV_CODEC_FLAG2_FAST) {
770  for (i = 0; i < 6; i++)
771  mpeg2_fast_decode_block_intra(s, *s->pblocks[i], i);
772  } else {
773  for (i = 0; i < mb_block_count; i++)
774  if ((ret = mpeg2_decode_block_intra(s, *s->pblocks[i], i)) < 0)
775  return ret;
776  }
777  } else {
778  for (i = 0; i < 6; i++) {
780  s->intra_matrix,
781  s->intra_scantable.permutated,
782  s->last_dc, *s->pblocks[i],
783  i, s->qscale);
784  if (ret < 0) {
785  av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n",
786  s->mb_x, s->mb_y);
787  return ret;
788  }
789 
790  s->block_last_index[i] = ret;
791  }
792  }
793  } else {
794  if (mb_type & MB_TYPE_ZERO_MV) {
795  av_assert2(mb_type & MB_TYPE_CBP);
796 
797  s->mv_dir = MV_DIR_FORWARD;
798  if (s->picture_structure == PICT_FRAME) {
799  if (s->picture_structure == PICT_FRAME
800  && !s->frame_pred_frame_dct)
801  s->interlaced_dct = get_bits1(&s->gb);
802  s->mv_type = MV_TYPE_16X16;
803  } else {
804  s->mv_type = MV_TYPE_FIELD;
805  mb_type |= MB_TYPE_INTERLACED;
806  s->field_select[0][0] = s->picture_structure - 1;
807  }
808 
809  if (IS_QUANT(mb_type))
810  s->qscale = mpeg_get_qscale(s);
811 
812  s->last_mv[0][0][0] = 0;
813  s->last_mv[0][0][1] = 0;
814  s->last_mv[0][1][0] = 0;
815  s->last_mv[0][1][1] = 0;
816  s->mv[0][0][0] = 0;
817  s->mv[0][0][1] = 0;
818  } else {
819  av_assert2(mb_type & MB_TYPE_L0L1);
820  // FIXME decide if MBs in field pictures are MB_TYPE_INTERLACED
821  /* get additional motion vector type */
822  if (s->picture_structure == PICT_FRAME && s->frame_pred_frame_dct) {
823  motion_type = MT_FRAME;
824  } else {
825  motion_type = get_bits(&s->gb, 2);
826  if (s->picture_structure == PICT_FRAME && HAS_CBP(mb_type))
827  s->interlaced_dct = get_bits1(&s->gb);
828  }
829 
830  if (IS_QUANT(mb_type))
831  s->qscale = mpeg_get_qscale(s);
832 
833  /* motion vectors */
834  s->mv_dir = (mb_type >> 13) & 3;
835  ff_tlog(s->avctx, "motion_type=%d\n", motion_type);
836  switch (motion_type) {
837  case MT_FRAME: /* or MT_16X8 */
838  if (s->picture_structure == PICT_FRAME) {
839  mb_type |= MB_TYPE_16x16;
840  s->mv_type = MV_TYPE_16X16;
841  for (i = 0; i < 2; i++) {
842  if (USES_LIST(mb_type, i)) {
843  /* MT_FRAME */
844  s->mv[i][0][0] =
845  s->last_mv[i][0][0] =
846  s->last_mv[i][1][0] =
847  mpeg_decode_motion(s, s->mpeg_f_code[i][0],
848  s->last_mv[i][0][0]);
849  s->mv[i][0][1] =
850  s->last_mv[i][0][1] =
851  s->last_mv[i][1][1] =
852  mpeg_decode_motion(s, s->mpeg_f_code[i][1],
853  s->last_mv[i][0][1]);
854  /* full_pel: only for MPEG-1 */
855  if (s->full_pel[i]) {
856  s->mv[i][0][0] *= 2;
857  s->mv[i][0][1] *= 2;
858  }
859  }
860  }
861  } else {
862  mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
863  s->mv_type = MV_TYPE_16X8;
864  for (i = 0; i < 2; i++) {
865  if (USES_LIST(mb_type, i)) {
866  /* MT_16X8 */
867  for (j = 0; j < 2; j++) {
868  s->field_select[i][j] = get_bits1(&s->gb);
869  for (k = 0; k < 2; k++) {
870  val = mpeg_decode_motion(s, s->mpeg_f_code[i][k],
871  s->last_mv[i][j][k]);
872  s->last_mv[i][j][k] = val;
873  s->mv[i][j][k] = val;
874  }
875  }
876  }
877  }
878  }
879  break;
880  case MT_FIELD:
881  s->mv_type = MV_TYPE_FIELD;
882  if (s->picture_structure == PICT_FRAME) {
883  mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
884  for (i = 0; i < 2; i++) {
885  if (USES_LIST(mb_type, i)) {
886  for (j = 0; j < 2; j++) {
887  s->field_select[i][j] = get_bits1(&s->gb);
888  val = mpeg_decode_motion(s, s->mpeg_f_code[i][0],
889  s->last_mv[i][j][0]);
890  s->last_mv[i][j][0] = val;
891  s->mv[i][j][0] = val;
892  ff_tlog(s->avctx, "fmx=%d\n", val);
893  val = mpeg_decode_motion(s, s->mpeg_f_code[i][1],
894  s->last_mv[i][j][1] >> 1);
895  s->last_mv[i][j][1] = 2 * val;
896  s->mv[i][j][1] = val;
897  ff_tlog(s->avctx, "fmy=%d\n", val);
898  }
899  }
900  }
901  } else {
902  av_assert0(!s->progressive_sequence);
903  mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED;
904  for (i = 0; i < 2; i++) {
905  if (USES_LIST(mb_type, i)) {
906  s->field_select[i][0] = get_bits1(&s->gb);
907  for (k = 0; k < 2; k++) {
908  val = mpeg_decode_motion(s, s->mpeg_f_code[i][k],
909  s->last_mv[i][0][k]);
910  s->last_mv[i][0][k] = val;
911  s->last_mv[i][1][k] = val;
912  s->mv[i][0][k] = val;
913  }
914  }
915  }
916  }
917  break;
918  case MT_DMV:
919  if (s->progressive_sequence){
920  av_log(s->avctx, AV_LOG_ERROR, "MT_DMV in progressive_sequence\n");
921  return AVERROR_INVALIDDATA;
922  }
923  s->mv_type = MV_TYPE_DMV;
924  for (i = 0; i < 2; i++) {
925  if (USES_LIST(mb_type, i)) {
926  int dmx, dmy, mx, my, m;
927  const int my_shift = s->picture_structure == PICT_FRAME;
928 
929  mx = mpeg_decode_motion(s, s->mpeg_f_code[i][0],
930  s->last_mv[i][0][0]);
931  s->last_mv[i][0][0] = mx;
932  s->last_mv[i][1][0] = mx;
933  dmx = get_dmv(s);
934  my = mpeg_decode_motion(s, s->mpeg_f_code[i][1],
935  s->last_mv[i][0][1] >> my_shift);
936  dmy = get_dmv(s);
937 
938 
939  s->last_mv[i][0][1] = my * (1 << my_shift);
940  s->last_mv[i][1][1] = my * (1 << my_shift);
941 
942  s->mv[i][0][0] = mx;
943  s->mv[i][0][1] = my;
944  s->mv[i][1][0] = mx; // not used
945  s->mv[i][1][1] = my; // not used
946 
947  if (s->picture_structure == PICT_FRAME) {
948  mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED;
949 
950  // m = 1 + 2 * s->top_field_first;
951  m = s->top_field_first ? 1 : 3;
952 
953  /* top -> top pred */
954  s->mv[i][2][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
955  s->mv[i][2][1] = ((my * m + (my > 0)) >> 1) + dmy - 1;
956  m = 4 - m;
957  s->mv[i][3][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
958  s->mv[i][3][1] = ((my * m + (my > 0)) >> 1) + dmy + 1;
959  } else {
960  mb_type |= MB_TYPE_16x16;
961 
962  s->mv[i][2][0] = ((mx + (mx > 0)) >> 1) + dmx;
963  s->mv[i][2][1] = ((my + (my > 0)) >> 1) + dmy;
964  if (s->picture_structure == PICT_TOP_FIELD)
965  s->mv[i][2][1]--;
966  else
967  s->mv[i][2][1]++;
968  }
969  }
970  }
971  break;
972  default:
973  av_log(s->avctx, AV_LOG_ERROR,
974  "00 motion_type at %d %d\n", s->mb_x, s->mb_y);
975  return AVERROR_INVALIDDATA;
976  }
977  }
978 
979  s->mb_intra = 0;
980  if (HAS_CBP(mb_type)) {
981  s->bdsp.clear_blocks(s->block[0]);
982 
983  cbp = get_vlc2(&s->gb, ff_mb_pat_vlc.table, MB_PAT_VLC_BITS, 1);
984  if (mb_block_count > 6) {
985  cbp *= 1 << mb_block_count - 6;
986  cbp |= get_bits(&s->gb, mb_block_count - 6);
987  s->bdsp.clear_blocks(s->block[6]);
988  }
989  if (cbp <= 0) {
990  av_log(s->avctx, AV_LOG_ERROR,
991  "invalid cbp %d at %d %d\n", cbp, s->mb_x, s->mb_y);
992  return AVERROR_INVALIDDATA;
993  }
994 
995  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
996  if (s->avctx->flags2 & AV_CODEC_FLAG2_FAST) {
997  for (i = 0; i < 6; i++) {
998  if (cbp & 32)
999  mpeg2_fast_decode_block_non_intra(s, *s->pblocks[i], i);
1000  else
1001  s->block_last_index[i] = -1;
1002  cbp += cbp;
1003  }
1004  } else {
1005  cbp <<= 12 - mb_block_count;
1006 
1007  for (i = 0; i < mb_block_count; i++) {
1008  if (cbp & (1 << 11)) {
1009  if ((ret = mpeg2_decode_block_non_intra(s, *s->pblocks[i], i)) < 0)
1010  return ret;
1011  } else {
1012  s->block_last_index[i] = -1;
1013  }
1014  cbp += cbp;
1015  }
1016  }
1017  } else {
1018  if (s->avctx->flags2 & AV_CODEC_FLAG2_FAST) {
1019  for (i = 0; i < 6; i++) {
1020  if (cbp & 32)
1021  mpeg1_fast_decode_block_inter(s, *s->pblocks[i], i);
1022  else
1023  s->block_last_index[i] = -1;
1024  cbp += cbp;
1025  }
1026  } else {
1027  for (i = 0; i < 6; i++) {
1028  if (cbp & 32) {
1029  if ((ret = mpeg1_decode_block_inter(s, *s->pblocks[i], i)) < 0)
1030  return ret;
1031  } else {
1032  s->block_last_index[i] = -1;
1033  }
1034  cbp += cbp;
1035  }
1036  }
1037  }
1038  } else {
1039  for (i = 0; i < 12; i++)
1040  s->block_last_index[i] = -1;
1041  }
1042  }
1043 
1044  s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride] = mb_type;
1045 
1046  return 0;
1047 }
1048 
1050 {
1051  Mpeg1Context *s = avctx->priv_data;
1052  MpegEncContext *s2 = &s->mpeg_enc_ctx;
1053 
1054  if ( avctx->codec_tag != AV_RL32("VCR2")
1055  && avctx->codec_tag != AV_RL32("BW10"))
1056  avctx->coded_width = avctx->coded_height = 0; // do not trust dimensions from input
1057  ff_mpv_decode_init(s2, avctx);
1058 
1059  /* we need some permutation to store matrices,
1060  * until the decoder sets the real permutation. */
1063 
1064  s2->chroma_format = 1;
1065  s->mpeg_enc_ctx_allocated = 0;
1066  s->repeat_field = 0;
1067  avctx->color_range = AVCOL_RANGE_MPEG;
1068  return 0;
1069 }
1070 
1071 #if HAVE_THREADS
1072 static int mpeg_decode_update_thread_context(AVCodecContext *avctx,
1073  const AVCodecContext *avctx_from)
1074 {
1075  Mpeg1Context *ctx = avctx->priv_data, *ctx_from = avctx_from->priv_data;
1076  MpegEncContext *s = &ctx->mpeg_enc_ctx, *s1 = &ctx_from->mpeg_enc_ctx;
1077  int err;
1078 
1079  if (avctx == avctx_from ||
1080  !ctx_from->mpeg_enc_ctx_allocated ||
1081  !s1->context_initialized)
1082  return 0;
1083 
1084  err = ff_mpeg_update_thread_context(avctx, avctx_from);
1085  if (err)
1086  return err;
1087 
1088  if (!ctx->mpeg_enc_ctx_allocated)
1089  memcpy(s + 1, s1 + 1, sizeof(Mpeg1Context) - sizeof(MpegEncContext));
1090 
1091  return 0;
1092 }
1093 #endif
1094 
1095 static void quant_matrix_rebuild(uint16_t *matrix, const uint8_t *old_perm,
1096  const uint8_t *new_perm)
1097 {
1098  uint16_t temp_matrix[64];
1099  int i;
1100 
1101  memcpy(temp_matrix, matrix, 64 * sizeof(uint16_t));
1102 
1103  for (i = 0; i < 64; i++)
1104  matrix[new_perm[i]] = temp_matrix[old_perm[i]];
1105 }
1106 
1108 #if CONFIG_MPEG1_NVDEC_HWACCEL
1110 #endif
1111 #if CONFIG_MPEG1_VDPAU_HWACCEL
1113 #endif
1116 };
1117 
1119 #if CONFIG_MPEG2_NVDEC_HWACCEL
1121 #endif
1122 #if CONFIG_MPEG2_VDPAU_HWACCEL
1124 #endif
1125 #if CONFIG_MPEG2_DXVA2_HWACCEL
1127 #endif
1128 #if CONFIG_MPEG2_D3D11VA_HWACCEL
1131 #endif
1132 #if CONFIG_MPEG2_VAAPI_HWACCEL
1134 #endif
1135 #if CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL
1137 #endif
1140 };
1141 
1142 static const enum AVPixelFormat mpeg12_pixfmt_list_422[] = {
1145 };
1146 
1147 static const enum AVPixelFormat mpeg12_pixfmt_list_444[] = {
1150 };
1151 
1153 {
1154  Mpeg1Context *s1 = avctx->priv_data;
1155  MpegEncContext *s = &s1->mpeg_enc_ctx;
1156  const enum AVPixelFormat *pix_fmts;
1157 
1158  if (CONFIG_GRAY && (avctx->flags & AV_CODEC_FLAG_GRAY))
1159  return AV_PIX_FMT_GRAY8;
1160 
1161  if (s->chroma_format < 2)
1165  else if (s->chroma_format == 2)
1167  else
1169 
1170  return ff_thread_get_format(avctx, pix_fmts);
1171 }
1172 
1173 /* Call this function when we know all parameters.
1174  * It may be called in different places for MPEG-1 and MPEG-2. */
1176 {
1177  Mpeg1Context *s1 = avctx->priv_data;
1178  MpegEncContext *s = &s1->mpeg_enc_ctx;
1179  uint8_t old_permutation[64];
1180  int ret;
1181 
1182  if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
1183  // MPEG-1 aspect
1184  AVRational aspect_inv = av_d2q(ff_mpeg1_aspect[s1->aspect_ratio_info], 255);
1185  avctx->sample_aspect_ratio = (AVRational) { aspect_inv.den, aspect_inv.num };
1186  } else { // MPEG-2
1187  // MPEG-2 aspect
1188  if (s1->aspect_ratio_info > 1) {
1189  AVRational dar =
1190  av_mul_q(av_div_q(ff_mpeg2_aspect[s1->aspect_ratio_info],
1191  (AVRational) { s1->pan_scan.width,
1192  s1->pan_scan.height }),
1193  (AVRational) { s->width, s->height });
1194 
1195  /* We ignore the spec here and guess a bit as reality does not
1196  * match the spec, see for example res_change_ffmpeg_aspect.ts
1197  * and sequence-display-aspect.mpg.
1198  * issue1613, 621, 562 */
1199  if ((s1->pan_scan.width == 0) || (s1->pan_scan.height == 0) ||
1200  (av_cmp_q(dar, (AVRational) { 4, 3 }) &&
1201  av_cmp_q(dar, (AVRational) { 16, 9 }))) {
1202  s->avctx->sample_aspect_ratio =
1203  av_div_q(ff_mpeg2_aspect[s1->aspect_ratio_info],
1204  (AVRational) { s->width, s->height });
1205  } else {
1206  s->avctx->sample_aspect_ratio =
1207  av_div_q(ff_mpeg2_aspect[s1->aspect_ratio_info],
1208  (AVRational) { s1->pan_scan.width, s1->pan_scan.height });
1209 // issue1613 4/3 16/9 -> 16/9
1210 // res_change_ffmpeg_aspect.ts 4/3 225/44 ->4/3
1211 // widescreen-issue562.mpg 4/3 16/9 -> 16/9
1212 // s->avctx->sample_aspect_ratio = av_mul_q(s->avctx->sample_aspect_ratio, (AVRational) {s->width, s->height});
1213  ff_dlog(avctx, "aspect A %d/%d\n",
1214  ff_mpeg2_aspect[s1->aspect_ratio_info].num,
1215  ff_mpeg2_aspect[s1->aspect_ratio_info].den);
1216  ff_dlog(avctx, "aspect B %d/%d\n", s->avctx->sample_aspect_ratio.num,
1217  s->avctx->sample_aspect_ratio.den);
1218  }
1219  } else {
1220  s->avctx->sample_aspect_ratio =
1221  ff_mpeg2_aspect[s1->aspect_ratio_info];
1222  }
1223  } // MPEG-2
1224 
1225  if (av_image_check_sar(s->width, s->height,
1226  avctx->sample_aspect_ratio) < 0) {
1227  av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
1228  avctx->sample_aspect_ratio.num,
1229  avctx->sample_aspect_ratio.den);
1230  avctx->sample_aspect_ratio = (AVRational){ 0, 1 };
1231  }
1232 
1233  if ((s1->mpeg_enc_ctx_allocated == 0) ||
1234  avctx->coded_width != s->width ||
1235  avctx->coded_height != s->height ||
1236  s1->save_width != s->width ||
1237  s1->save_height != s->height ||
1238  av_cmp_q(s1->save_aspect, s->avctx->sample_aspect_ratio) ||
1239  (s1->save_progressive_seq != s->progressive_sequence && FFALIGN(s->height, 16) != FFALIGN(s->height, 32)) ||
1240  0) {
1241  if (s1->mpeg_enc_ctx_allocated) {
1242 #if FF_API_FLAG_TRUNCATED
1243  ParseContext pc = s->parse_context;
1244  s->parse_context.buffer = 0;
1246  s->parse_context = pc;
1247 #else
1249 #endif
1250  s1->mpeg_enc_ctx_allocated = 0;
1251  }
1252 
1253  ret = ff_set_dimensions(avctx, s->width, s->height);
1254  if (ret < 0)
1255  return ret;
1256 
1257  if (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO && s->bit_rate) {
1258  avctx->rc_max_rate = s->bit_rate;
1259  } else if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO && s->bit_rate &&
1260  (s->bit_rate != 0x3FFFF*400 || s->vbv_delay != 0xFFFF)) {
1261  avctx->bit_rate = s->bit_rate;
1262  }
1263  s1->save_aspect = s->avctx->sample_aspect_ratio;
1264  s1->save_width = s->width;
1265  s1->save_height = s->height;
1266  s1->save_progressive_seq = s->progressive_sequence;
1267 
1268  /* low_delay may be forced, in this case we will have B-frames
1269  * that behave like P-frames. */
1270  avctx->has_b_frames = !s->low_delay;
1271 
1272  if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
1273  // MPEG-1 fps
1274  avctx->framerate = ff_mpeg12_frame_rate_tab[s1->frame_rate_index];
1275  avctx->ticks_per_frame = 1;
1276 
1278  } else { // MPEG-2
1279  // MPEG-2 fps
1280  av_reduce(&s->avctx->framerate.num,
1281  &s->avctx->framerate.den,
1282  ff_mpeg12_frame_rate_tab[s1->frame_rate_index].num * s1->frame_rate_ext.num,
1283  ff_mpeg12_frame_rate_tab[s1->frame_rate_index].den * s1->frame_rate_ext.den,
1284  1 << 30);
1285  avctx->ticks_per_frame = 2;
1286 
1287  switch (s->chroma_format) {
1288  case 1: avctx->chroma_sample_location = AVCHROMA_LOC_LEFT; break;
1289  case 2:
1290  case 3: avctx->chroma_sample_location = AVCHROMA_LOC_TOPLEFT; break;
1291  default: av_assert0(0);
1292  }
1293  } // MPEG-2
1294 
1295  avctx->pix_fmt = mpeg_get_pixelformat(avctx);
1296 
1297  /* Quantization matrices may need reordering
1298  * if DCT permutation is changed. */
1299  memcpy(old_permutation, s->idsp.idct_permutation, 64 * sizeof(uint8_t));
1300 
1302  if ((ret = ff_mpv_common_init(s)) < 0)
1303  return ret;
1304 
1305  quant_matrix_rebuild(s->intra_matrix, old_permutation, s->idsp.idct_permutation);
1306  quant_matrix_rebuild(s->inter_matrix, old_permutation, s->idsp.idct_permutation);
1307  quant_matrix_rebuild(s->chroma_intra_matrix, old_permutation, s->idsp.idct_permutation);
1308  quant_matrix_rebuild(s->chroma_inter_matrix, old_permutation, s->idsp.idct_permutation);
1309 
1310  s1->mpeg_enc_ctx_allocated = 1;
1311  }
1312  return 0;
1313 }
1314 
1315 static int mpeg1_decode_picture(AVCodecContext *avctx, const uint8_t *buf,
1316  int buf_size)
1317 {
1318  Mpeg1Context *s1 = avctx->priv_data;
1319  MpegEncContext *s = &s1->mpeg_enc_ctx;
1320  int ref, f_code, vbv_delay, ret;
1321 
1322  ret = init_get_bits8(&s->gb, buf, buf_size);
1323  if (ret < 0)
1324  return ret;
1325 
1326  ref = get_bits(&s->gb, 10); /* temporal ref */
1327  s->pict_type = get_bits(&s->gb, 3);
1328  if (s->pict_type == 0 || s->pict_type > 3)
1329  return AVERROR_INVALIDDATA;
1330 
1331  vbv_delay = get_bits(&s->gb, 16);
1332  s->vbv_delay = vbv_delay;
1333  if (s->pict_type == AV_PICTURE_TYPE_P ||
1334  s->pict_type == AV_PICTURE_TYPE_B) {
1335  s->full_pel[0] = get_bits1(&s->gb);
1336  f_code = get_bits(&s->gb, 3);
1337  if (f_code == 0 && (avctx->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT)))
1338  return AVERROR_INVALIDDATA;
1339  f_code += !f_code;
1340  s->mpeg_f_code[0][0] = f_code;
1341  s->mpeg_f_code[0][1] = f_code;
1342  }
1343  if (s->pict_type == AV_PICTURE_TYPE_B) {
1344  s->full_pel[1] = get_bits1(&s->gb);
1345  f_code = get_bits(&s->gb, 3);
1346  if (f_code == 0 && (avctx->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT)))
1347  return AVERROR_INVALIDDATA;
1348  f_code += !f_code;
1349  s->mpeg_f_code[1][0] = f_code;
1350  s->mpeg_f_code[1][1] = f_code;
1351  }
1352  s->current_picture.f->pict_type = s->pict_type;
1353  s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1354 
1355  if (avctx->debug & FF_DEBUG_PICT_INFO)
1356  av_log(avctx, AV_LOG_DEBUG,
1357  "vbv_delay %d, ref %d type:%d\n", vbv_delay, ref, s->pict_type);
1358 
1359  s->y_dc_scale = 8;
1360  s->c_dc_scale = 8;
1361  return 0;
1362 }
1363 
1365 {
1366  MpegEncContext *s = &s1->mpeg_enc_ctx;
1367  int horiz_size_ext, vert_size_ext;
1368  int bit_rate_ext;
1369  AVCPBProperties *cpb_props;
1370 
1371  skip_bits(&s->gb, 1); /* profile and level esc*/
1372  s->avctx->profile = get_bits(&s->gb, 3);
1373  s->avctx->level = get_bits(&s->gb, 4);
1374  s->progressive_sequence = get_bits1(&s->gb); /* progressive_sequence */
1375  s->chroma_format = get_bits(&s->gb, 2); /* chroma_format 1=420, 2=422, 3=444 */
1376 
1377  if (!s->chroma_format) {
1378  s->chroma_format = 1;
1379  av_log(s->avctx, AV_LOG_WARNING, "Chroma format invalid\n");
1380  }
1381 
1382  horiz_size_ext = get_bits(&s->gb, 2);
1383  vert_size_ext = get_bits(&s->gb, 2);
1384  s->width |= (horiz_size_ext << 12);
1385  s->height |= (vert_size_ext << 12);
1386  bit_rate_ext = get_bits(&s->gb, 12); /* XXX: handle it */
1387  s->bit_rate += (bit_rate_ext << 18) * 400LL;
1388  check_marker(s->avctx, &s->gb, "after bit rate extension");
1389  s1->rc_buffer_size += get_bits(&s->gb, 8) * 1024 * 16 << 10;
1390 
1391  s->low_delay = get_bits1(&s->gb);
1392  if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY)
1393  s->low_delay = 1;
1394 
1395  s1->frame_rate_ext.num = get_bits(&s->gb, 2) + 1;
1396  s1->frame_rate_ext.den = get_bits(&s->gb, 5) + 1;
1397 
1398  ff_dlog(s->avctx, "sequence extension\n");
1399  s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG2VIDEO;
1400 
1401  if (cpb_props = ff_add_cpb_side_data(s->avctx)) {
1402  cpb_props->buffer_size = s1->rc_buffer_size;
1403  if (s->bit_rate != 0x3FFFF*400)
1404  cpb_props->max_bitrate = s->bit_rate;
1405  }
1406 
1407  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1408  av_log(s->avctx, AV_LOG_DEBUG,
1409  "profile: %d, level: %d ps: %d cf:%d vbv buffer: %d, bitrate:%"PRId64"\n",
1410  s->avctx->profile, s->avctx->level, s->progressive_sequence, s->chroma_format,
1411  s1->rc_buffer_size, s->bit_rate);
1412 }
1413 
1415 {
1416  MpegEncContext *s = &s1->mpeg_enc_ctx;
1417  int color_description, w, h;
1418 
1419  skip_bits(&s->gb, 3); /* video format */
1420  color_description = get_bits1(&s->gb);
1421  if (color_description) {
1422  s->avctx->color_primaries = get_bits(&s->gb, 8);
1423  s->avctx->color_trc = get_bits(&s->gb, 8);
1424  s->avctx->colorspace = get_bits(&s->gb, 8);
1425  }
1426  w = get_bits(&s->gb, 14);
1427  skip_bits(&s->gb, 1); // marker
1428  h = get_bits(&s->gb, 14);
1429  // remaining 3 bits are zero padding
1430 
1431  s1->pan_scan.width = 16 * w;
1432  s1->pan_scan.height = 16 * h;
1433 
1434  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1435  av_log(s->avctx, AV_LOG_DEBUG, "sde w:%d, h:%d\n", w, h);
1436 }
1437 
1439 {
1440  MpegEncContext *s = &s1->mpeg_enc_ctx;
1441  int i, nofco;
1442 
1443  nofco = 1;
1444  if (s->progressive_sequence) {
1445  if (s->repeat_first_field) {
1446  nofco++;
1447  if (s->top_field_first)
1448  nofco++;
1449  }
1450  } else {
1451  if (s->picture_structure == PICT_FRAME) {
1452  nofco++;
1453  if (s->repeat_first_field)
1454  nofco++;
1455  }
1456  }
1457  for (i = 0; i < nofco; i++) {
1458  s1->pan_scan.position[i][0] = get_sbits(&s->gb, 16);
1459  skip_bits(&s->gb, 1); // marker
1460  s1->pan_scan.position[i][1] = get_sbits(&s->gb, 16);
1461  skip_bits(&s->gb, 1); // marker
1462  }
1463 
1464  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1465  av_log(s->avctx, AV_LOG_DEBUG,
1466  "pde (%"PRId16",%"PRId16") (%"PRId16",%"PRId16") (%"PRId16",%"PRId16")\n",
1467  s1->pan_scan.position[0][0], s1->pan_scan.position[0][1],
1468  s1->pan_scan.position[1][0], s1->pan_scan.position[1][1],
1469  s1->pan_scan.position[2][0], s1->pan_scan.position[2][1]);
1470 }
1471 
1472 static int load_matrix(MpegEncContext *s, uint16_t matrix0[64],
1473  uint16_t matrix1[64], int intra)
1474 {
1475  int i;
1476 
1477  for (i = 0; i < 64; i++) {
1478  int j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
1479  int v = get_bits(&s->gb, 8);
1480  if (v == 0) {
1481  av_log(s->avctx, AV_LOG_ERROR, "matrix damaged\n");
1482  return AVERROR_INVALIDDATA;
1483  }
1484  if (intra && i == 0 && v != 8) {
1485  av_log(s->avctx, AV_LOG_DEBUG, "intra matrix specifies invalid DC quantizer %d, ignoring\n", v);
1486  v = 8; // needed by pink.mpg / issue1046
1487  }
1488  matrix0[j] = v;
1489  if (matrix1)
1490  matrix1[j] = v;
1491  }
1492  return 0;
1493 }
1494 
1496 {
1497  ff_dlog(s->avctx, "matrix extension\n");
1498 
1499  if (get_bits1(&s->gb))
1500  load_matrix(s, s->chroma_intra_matrix, s->intra_matrix, 1);
1501  if (get_bits1(&s->gb))
1502  load_matrix(s, s->chroma_inter_matrix, s->inter_matrix, 0);
1503  if (get_bits1(&s->gb))
1504  load_matrix(s, s->chroma_intra_matrix, NULL, 1);
1505  if (get_bits1(&s->gb))
1506  load_matrix(s, s->chroma_inter_matrix, NULL, 0);
1507 }
1508 
1510 {
1511  MpegEncContext *s = &s1->mpeg_enc_ctx;
1512 
1513  s->full_pel[0] = s->full_pel[1] = 0;
1514  s->mpeg_f_code[0][0] = get_bits(&s->gb, 4);
1515  s->mpeg_f_code[0][1] = get_bits(&s->gb, 4);
1516  s->mpeg_f_code[1][0] = get_bits(&s->gb, 4);
1517  s->mpeg_f_code[1][1] = get_bits(&s->gb, 4);
1518  s->mpeg_f_code[0][0] += !s->mpeg_f_code[0][0];
1519  s->mpeg_f_code[0][1] += !s->mpeg_f_code[0][1];
1520  s->mpeg_f_code[1][0] += !s->mpeg_f_code[1][0];
1521  s->mpeg_f_code[1][1] += !s->mpeg_f_code[1][1];
1522  if (!s->pict_type && s1->mpeg_enc_ctx_allocated) {
1523  av_log(s->avctx, AV_LOG_ERROR, "Missing picture start code\n");
1524  if (s->avctx->err_recognition & AV_EF_EXPLODE)
1525  return AVERROR_INVALIDDATA;
1526  av_log(s->avctx, AV_LOG_WARNING, "Guessing pict_type from mpeg_f_code\n");
1527  if (s->mpeg_f_code[1][0] == 15 && s->mpeg_f_code[1][1] == 15) {
1528  if (s->mpeg_f_code[0][0] == 15 && s->mpeg_f_code[0][1] == 15)
1529  s->pict_type = AV_PICTURE_TYPE_I;
1530  else
1531  s->pict_type = AV_PICTURE_TYPE_P;
1532  } else
1533  s->pict_type = AV_PICTURE_TYPE_B;
1534  s->current_picture.f->pict_type = s->pict_type;
1535  s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1536  }
1537 
1538  s->intra_dc_precision = get_bits(&s->gb, 2);
1539  s->picture_structure = get_bits(&s->gb, 2);
1540  s->top_field_first = get_bits1(&s->gb);
1541  s->frame_pred_frame_dct = get_bits1(&s->gb);
1542  s->concealment_motion_vectors = get_bits1(&s->gb);
1543  s->q_scale_type = get_bits1(&s->gb);
1544  s->intra_vlc_format = get_bits1(&s->gb);
1545  s->alternate_scan = get_bits1(&s->gb);
1546  s->repeat_first_field = get_bits1(&s->gb);
1547  s->chroma_420_type = get_bits1(&s->gb);
1548  s->progressive_frame = get_bits1(&s->gb);
1549 
1550  if (s->alternate_scan) {
1551  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
1552  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
1553  } else {
1554  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
1555  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
1556  }
1557 
1558  /* composite display not parsed */
1559  ff_dlog(s->avctx, "intra_dc_precision=%d\n", s->intra_dc_precision);
1560  ff_dlog(s->avctx, "picture_structure=%d\n", s->picture_structure);
1561  ff_dlog(s->avctx, "top field first=%d\n", s->top_field_first);
1562  ff_dlog(s->avctx, "repeat first field=%d\n", s->repeat_first_field);
1563  ff_dlog(s->avctx, "conceal=%d\n", s->concealment_motion_vectors);
1564  ff_dlog(s->avctx, "intra_vlc_format=%d\n", s->intra_vlc_format);
1565  ff_dlog(s->avctx, "alternate_scan=%d\n", s->alternate_scan);
1566  ff_dlog(s->avctx, "frame_pred_frame_dct=%d\n", s->frame_pred_frame_dct);
1567  ff_dlog(s->avctx, "progressive_frame=%d\n", s->progressive_frame);
1568 
1569  return 0;
1570 }
1571 
1572 static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
1573 {
1574  AVCodecContext *avctx = s->avctx;
1575  Mpeg1Context *s1 = (Mpeg1Context *) s;
1576  int ret;
1577 
1578  if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)) {
1579  if (s->mb_width * s->mb_height * 11LL / (33 * 2 * 8) > buf_size)
1580  return AVERROR_INVALIDDATA;
1581  }
1582 
1583  /* start frame decoding */
1584  if (s->first_field || s->picture_structure == PICT_FRAME) {
1585  AVFrameSideData *pan_scan;
1586 
1587  if ((ret = ff_mpv_frame_start(s, avctx)) < 0)
1588  return ret;
1589 
1591 
1592  /* first check if we must repeat the frame */
1593  s->current_picture_ptr->f->repeat_pict = 0;
1594  if (s->repeat_first_field) {
1595  if (s->progressive_sequence) {
1596  if (s->top_field_first)
1597  s->current_picture_ptr->f->repeat_pict = 4;
1598  else
1599  s->current_picture_ptr->f->repeat_pict = 2;
1600  } else if (s->progressive_frame) {
1601  s->current_picture_ptr->f->repeat_pict = 1;
1602  }
1603  }
1604 
1605  pan_scan = av_frame_new_side_data(s->current_picture_ptr->f,
1607  sizeof(s1->pan_scan));
1608  if (!pan_scan)
1609  return AVERROR(ENOMEM);
1610  memcpy(pan_scan->data, &s1->pan_scan, sizeof(s1->pan_scan));
1611 
1612  if (s1->a53_buf_ref) {
1614  s->current_picture_ptr->f, AV_FRAME_DATA_A53_CC,
1615  s1->a53_buf_ref);
1616  if (!sd)
1617  av_buffer_unref(&s1->a53_buf_ref);
1618  s1->a53_buf_ref = NULL;
1619  }
1620 
1621  if (s1->has_stereo3d) {
1622  AVStereo3D *stereo = av_stereo3d_create_side_data(s->current_picture_ptr->f);
1623  if (!stereo)
1624  return AVERROR(ENOMEM);
1625 
1626  *stereo = s1->stereo3d;
1627  s1->has_stereo3d = 0;
1628  }
1629 
1630  if (s1->has_afd) {
1631  AVFrameSideData *sd =
1632  av_frame_new_side_data(s->current_picture_ptr->f,
1633  AV_FRAME_DATA_AFD, 1);
1634  if (!sd)
1635  return AVERROR(ENOMEM);
1636 
1637  *sd->data = s1->afd;
1638  s1->has_afd = 0;
1639  }
1640 
1641  if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME))
1642  ff_thread_finish_setup(avctx);
1643  } else { // second field
1644  int i;
1645 
1646  if (!s->current_picture_ptr) {
1647  av_log(s->avctx, AV_LOG_ERROR, "first field missing\n");
1648  return AVERROR_INVALIDDATA;
1649  }
1650 
1651  if (s->avctx->hwaccel) {
1652  if ((ret = s->avctx->hwaccel->end_frame(s->avctx)) < 0) {
1653  av_log(avctx, AV_LOG_ERROR,
1654  "hardware accelerator failed to decode first field\n");
1655  return ret;
1656  }
1657  }
1658 
1659  for (i = 0; i < 4; i++) {
1660  s->current_picture.f->data[i] = s->current_picture_ptr->f->data[i];
1661  if (s->picture_structure == PICT_BOTTOM_FIELD)
1662  s->current_picture.f->data[i] +=
1663  s->current_picture_ptr->f->linesize[i];
1664  }
1665  }
1666 
1667  if (avctx->hwaccel) {
1668  if ((ret = avctx->hwaccel->start_frame(avctx, buf, buf_size)) < 0)
1669  return ret;
1670  }
1671 
1672  return 0;
1673 }
1674 
1675 #define DECODE_SLICE_ERROR -1
1676 #define DECODE_SLICE_OK 0
1677 
1678 /**
1679  * Decode a slice.
1680  * MpegEncContext.mb_y must be set to the MB row from the startcode.
1681  * @return DECODE_SLICE_ERROR if the slice is damaged,
1682  * DECODE_SLICE_OK if this slice is OK
1683  */
1684 static int mpeg_decode_slice(MpegEncContext *s, int mb_y,
1685  const uint8_t **buf, int buf_size)
1686 {
1687  AVCodecContext *avctx = s->avctx;
1688  const int lowres = s->avctx->lowres;
1689  const int field_pic = s->picture_structure != PICT_FRAME;
1690  int ret;
1691 
1692  s->resync_mb_x =
1693  s->resync_mb_y = -1;
1694 
1695  av_assert0(mb_y < s->mb_height);
1696 
1697  init_get_bits(&s->gb, *buf, buf_size * 8);
1698  if (s->codec_id != AV_CODEC_ID_MPEG1VIDEO && s->mb_height > 2800/16)
1699  skip_bits(&s->gb, 3);
1700 
1702  s->interlaced_dct = 0;
1703 
1704  s->qscale = mpeg_get_qscale(s);
1705 
1706  if (s->qscale == 0) {
1707  av_log(s->avctx, AV_LOG_ERROR, "qscale == 0\n");
1708  return AVERROR_INVALIDDATA;
1709  }
1710 
1711  /* extra slice info */
1712  if (skip_1stop_8data_bits(&s->gb) < 0)
1713  return AVERROR_INVALIDDATA;
1714 
1715  s->mb_x = 0;
1716 
1717  if (mb_y == 0 && s->codec_tag == AV_RL32("SLIF")) {
1718  skip_bits1(&s->gb);
1719  } else {
1720  while (get_bits_left(&s->gb) > 0) {
1721  int code = get_vlc2(&s->gb, ff_mbincr_vlc.table,
1722  MBINCR_VLC_BITS, 2);
1723  if (code < 0) {
1724  av_log(s->avctx, AV_LOG_ERROR, "first mb_incr damaged\n");
1725  return AVERROR_INVALIDDATA;
1726  }
1727  if (code >= 33) {
1728  if (code == 33)
1729  s->mb_x += 33;
1730  /* otherwise, stuffing, nothing to do */
1731  } else {
1732  s->mb_x += code;
1733  break;
1734  }
1735  }
1736  }
1737 
1738  if (s->mb_x >= (unsigned) s->mb_width) {
1739  av_log(s->avctx, AV_LOG_ERROR, "initial skip overflow\n");
1740  return AVERROR_INVALIDDATA;
1741  }
1742 
1743  if (avctx->hwaccel && avctx->hwaccel->decode_slice) {
1744  const uint8_t *buf_end, *buf_start = *buf - 4; /* include start_code */
1745  int start_code = -1;
1746  buf_end = avpriv_find_start_code(buf_start + 2, *buf + buf_size, &start_code);
1747  if (buf_end < *buf + buf_size)
1748  buf_end -= 4;
1749  s->mb_y = mb_y;
1750  if (avctx->hwaccel->decode_slice(avctx, buf_start, buf_end - buf_start) < 0)
1751  return DECODE_SLICE_ERROR;
1752  *buf = buf_end;
1753  return DECODE_SLICE_OK;
1754  }
1755 
1756  s->resync_mb_x = s->mb_x;
1757  s->resync_mb_y = s->mb_y = mb_y;
1758  s->mb_skip_run = 0;
1760 
1761  if (s->mb_y == 0 && s->mb_x == 0 && (s->first_field || s->picture_structure == PICT_FRAME)) {
1762  if (s->avctx->debug & FF_DEBUG_PICT_INFO) {
1763  av_log(s->avctx, AV_LOG_DEBUG,
1764  "qp:%d fc:%2d%2d%2d%2d %c %s %s %s %s dc:%d pstruct:%d fdct:%d cmv:%d qtype:%d ivlc:%d rff:%d %s\n",
1765  s->qscale,
1766  s->mpeg_f_code[0][0], s->mpeg_f_code[0][1],
1767  s->mpeg_f_code[1][0], s->mpeg_f_code[1][1],
1768  s->pict_type == AV_PICTURE_TYPE_I ? 'I' :
1769  (s->pict_type == AV_PICTURE_TYPE_P ? 'P' :
1770  (s->pict_type == AV_PICTURE_TYPE_B ? 'B' : 'S')),
1771  s->progressive_sequence ? "ps" : "",
1772  s->progressive_frame ? "pf" : "",
1773  s->alternate_scan ? "alt" : "",
1774  s->top_field_first ? "top" : "",
1775  s->intra_dc_precision, s->picture_structure,
1776  s->frame_pred_frame_dct, s->concealment_motion_vectors,
1777  s->q_scale_type, s->intra_vlc_format,
1778  s->repeat_first_field, s->chroma_420_type ? "420" : "");
1779  }
1780  }
1781 
1782  for (;;) {
1783  if ((ret = mpeg_decode_mb(s, s->block)) < 0)
1784  return ret;
1785 
1786  // Note motion_val is normally NULL unless we want to extract the MVs.
1787  if (s->current_picture.motion_val[0]) {
1788  const int wrap = s->b8_stride;
1789  int xy = s->mb_x * 2 + s->mb_y * 2 * wrap;
1790  int b8_xy = 4 * (s->mb_x + s->mb_y * s->mb_stride);
1791  int motion_x, motion_y, dir, i;
1792 
1793  for (i = 0; i < 2; i++) {
1794  for (dir = 0; dir < 2; dir++) {
1795  if (s->mb_intra ||
1796  (dir == 1 && s->pict_type != AV_PICTURE_TYPE_B)) {
1797  motion_x = motion_y = 0;
1798  } else if (s->mv_type == MV_TYPE_16X16 ||
1799  (s->mv_type == MV_TYPE_FIELD && field_pic)) {
1800  motion_x = s->mv[dir][0][0];
1801  motion_y = s->mv[dir][0][1];
1802  } else { /* if ((s->mv_type == MV_TYPE_FIELD) || (s->mv_type == MV_TYPE_16X8)) */
1803  motion_x = s->mv[dir][i][0];
1804  motion_y = s->mv[dir][i][1];
1805  }
1806 
1807  s->current_picture.motion_val[dir][xy][0] = motion_x;
1808  s->current_picture.motion_val[dir][xy][1] = motion_y;
1809  s->current_picture.motion_val[dir][xy + 1][0] = motion_x;
1810  s->current_picture.motion_val[dir][xy + 1][1] = motion_y;
1811  s->current_picture.ref_index [dir][b8_xy] =
1812  s->current_picture.ref_index [dir][b8_xy + 1] = s->field_select[dir][i];
1813  av_assert2(s->field_select[dir][i] == 0 ||
1814  s->field_select[dir][i] == 1);
1815  }
1816  xy += wrap;
1817  b8_xy += 2;
1818  }
1819  }
1820 
1821  s->dest[0] += 16 >> lowres;
1822  s->dest[1] +=(16 >> lowres) >> s->chroma_x_shift;
1823  s->dest[2] +=(16 >> lowres) >> s->chroma_x_shift;
1824 
1825  ff_mpv_reconstruct_mb(s, s->block);
1826 
1827  if (++s->mb_x >= s->mb_width) {
1828  const int mb_size = 16 >> s->avctx->lowres;
1829  int left;
1830 
1831  ff_mpeg_draw_horiz_band(s, mb_size * (s->mb_y >> field_pic), mb_size);
1833 
1834  s->mb_x = 0;
1835  s->mb_y += 1 << field_pic;
1836 
1837  if (s->mb_y >= s->mb_height) {
1838  int left = get_bits_left(&s->gb);
1839  int is_d10 = s->chroma_format == 2 &&
1840  s->pict_type == AV_PICTURE_TYPE_I &&
1841  avctx->profile == 0 && avctx->level == 5 &&
1842  s->intra_dc_precision == 2 &&
1843  s->q_scale_type == 1 && s->alternate_scan == 0 &&
1844  s->progressive_frame == 0
1845  /* vbv_delay == 0xBBB || 0xE10 */;
1846 
1847  if (left >= 32 && !is_d10) {
1848  GetBitContext gb = s->gb;
1849  align_get_bits(&gb);
1850  if (show_bits(&gb, 24) == 0x060E2B) {
1851  av_log(avctx, AV_LOG_DEBUG, "Invalid MXF data found in video stream\n");
1852  is_d10 = 1;
1853  }
1854  if (left > 32 && show_bits_long(&gb, 32) == 0x201) {
1855  av_log(avctx, AV_LOG_DEBUG, "skipping m704 alpha (unsupported)\n");
1856  goto eos;
1857  }
1858  }
1859 
1860  if (left < 0 ||
1861  (left && show_bits(&s->gb, FFMIN(left, 23)) && !is_d10) ||
1862  ((avctx->err_recognition & (AV_EF_BITSTREAM | AV_EF_AGGRESSIVE)) && left > 8)) {
1863  av_log(avctx, AV_LOG_ERROR, "end mismatch left=%d %0X at %d %d\n",
1864  left, left>0 ? show_bits(&s->gb, FFMIN(left, 23)) : 0, s->mb_x, s->mb_y);
1865  return AVERROR_INVALIDDATA;
1866  } else
1867  goto eos;
1868  }
1869  // There are some files out there which are missing the last slice
1870  // in cases where the slice is completely outside the visible
1871  // area, we detect this here instead of running into the end expecting
1872  // more data
1873  left = get_bits_left(&s->gb);
1874  if (s->mb_y >= ((s->height + 15) >> 4) &&
1875  !s->progressive_sequence &&
1876  left <= 25 &&
1877  left >= 0 &&
1878  s->mb_skip_run == -1 &&
1879  (!left || show_bits(&s->gb, left) == 0))
1880  goto eos;
1881 
1883  }
1884 
1885  /* skip mb handling */
1886  if (s->mb_skip_run == -1) {
1887  /* read increment again */
1888  s->mb_skip_run = 0;
1889  for (;;) {
1890  int code = get_vlc2(&s->gb, ff_mbincr_vlc.table,
1891  MBINCR_VLC_BITS, 2);
1892  if (code < 0) {
1893  av_log(s->avctx, AV_LOG_ERROR, "mb incr damaged\n");
1894  return AVERROR_INVALIDDATA;
1895  }
1896  if (code >= 33) {
1897  if (code == 33) {
1898  s->mb_skip_run += 33;
1899  } else if (code == 35) {
1900  if (s->mb_skip_run != 0 || show_bits(&s->gb, 15) != 0) {
1901  av_log(s->avctx, AV_LOG_ERROR, "slice mismatch\n");
1902  return AVERROR_INVALIDDATA;
1903  }
1904  goto eos; /* end of slice */
1905  }
1906  /* otherwise, stuffing, nothing to do */
1907  } else {
1908  s->mb_skip_run += code;
1909  break;
1910  }
1911  }
1912  if (s->mb_skip_run) {
1913  int i;
1914  if (s->pict_type == AV_PICTURE_TYPE_I) {
1915  av_log(s->avctx, AV_LOG_ERROR,
1916  "skipped MB in I-frame at %d %d\n", s->mb_x, s->mb_y);
1917  return AVERROR_INVALIDDATA;
1918  }
1919 
1920  /* skip mb */
1921  s->mb_intra = 0;
1922  for (i = 0; i < 12; i++)
1923  s->block_last_index[i] = -1;
1924  if (s->picture_structure == PICT_FRAME)
1925  s->mv_type = MV_TYPE_16X16;
1926  else
1927  s->mv_type = MV_TYPE_FIELD;
1928  if (s->pict_type == AV_PICTURE_TYPE_P) {
1929  /* if P type, zero motion vector is implied */
1930  s->mv_dir = MV_DIR_FORWARD;
1931  s->mv[0][0][0] = s->mv[0][0][1] = 0;
1932  s->last_mv[0][0][0] = s->last_mv[0][0][1] = 0;
1933  s->last_mv[0][1][0] = s->last_mv[0][1][1] = 0;
1934  s->field_select[0][0] = (s->picture_structure - 1) & 1;
1935  } else {
1936  /* if B type, reuse previous vectors and directions */
1937  s->mv[0][0][0] = s->last_mv[0][0][0];
1938  s->mv[0][0][1] = s->last_mv[0][0][1];
1939  s->mv[1][0][0] = s->last_mv[1][0][0];
1940  s->mv[1][0][1] = s->last_mv[1][0][1];
1941  s->field_select[0][0] = (s->picture_structure - 1) & 1;
1942  s->field_select[1][0] = (s->picture_structure - 1) & 1;
1943  }
1944  }
1945  }
1946  }
1947 eos: // end of slice
1948  if (get_bits_left(&s->gb) < 0) {
1949  av_log(s, AV_LOG_ERROR, "overread %d\n", -get_bits_left(&s->gb));
1950  return AVERROR_INVALIDDATA;
1951  }
1952  *buf += (get_bits_count(&s->gb) - 1) / 8;
1953  ff_dlog(s, "Slice start:%d %d end:%d %d\n", s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y);
1954  return 0;
1955 }
1956 
1958 {
1959  MpegEncContext *s = *(void **) arg;
1960  const uint8_t *buf = s->gb.buffer;
1961  int mb_y = s->start_mb_y;
1962  const int field_pic = s->picture_structure != PICT_FRAME;
1963 
1964  s->er.error_count = (3 * (s->end_mb_y - s->start_mb_y) * s->mb_width) >> field_pic;
1965 
1966  for (;;) {
1967  uint32_t start_code;
1968  int ret;
1969 
1970  ret = mpeg_decode_slice(s, mb_y, &buf, s->gb.buffer_end - buf);
1971  emms_c();
1972  ff_dlog(c, "ret:%d resync:%d/%d mb:%d/%d ts:%d/%d ec:%d\n",
1973  ret, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y,
1974  s->start_mb_y, s->end_mb_y, s->er.error_count);
1975  if (ret < 0) {
1976  if (c->err_recognition & AV_EF_EXPLODE)
1977  return ret;
1978  if (s->resync_mb_x >= 0 && s->resync_mb_y >= 0)
1979  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
1980  s->mb_x, s->mb_y,
1982  } else {
1983  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
1984  s->mb_x - 1, s->mb_y,
1986  }
1987 
1988  if (s->mb_y == s->end_mb_y)
1989  return 0;
1990 
1991  start_code = -1;
1992  buf = avpriv_find_start_code(buf, s->gb.buffer_end, &start_code);
1993  if (start_code < SLICE_MIN_START_CODE || start_code > SLICE_MAX_START_CODE)
1994  return AVERROR_INVALIDDATA;
1996  if (s->codec_id != AV_CODEC_ID_MPEG1VIDEO && s->mb_height > 2800/16)
1997  mb_y += (*buf&0xE0)<<2;
1998  mb_y <<= field_pic;
1999  if (s->picture_structure == PICT_BOTTOM_FIELD)
2000  mb_y++;
2001  if (mb_y >= s->end_mb_y)
2002  return AVERROR_INVALIDDATA;
2003  }
2004 }
2005 
2006 /**
2007  * Handle slice ends.
2008  * @return 1 if it seems to be the last slice
2009  */
2010 static int slice_end(AVCodecContext *avctx, AVFrame *pict)
2011 {
2012  Mpeg1Context *s1 = avctx->priv_data;
2013  MpegEncContext *s = &s1->mpeg_enc_ctx;
2014 
2015  if (!s1->mpeg_enc_ctx_allocated || !s->current_picture_ptr)
2016  return 0;
2017 
2018  if (s->avctx->hwaccel) {
2019  int ret = s->avctx->hwaccel->end_frame(s->avctx);
2020  if (ret < 0) {
2021  av_log(avctx, AV_LOG_ERROR,
2022  "hardware accelerator failed to decode picture\n");
2023  return ret;
2024  }
2025  }
2026 
2027  /* end of slice reached */
2028  if (/* s->mb_y << field_pic == s->mb_height && */ !s->first_field && !s1->first_slice) {
2029  /* end of image */
2030 
2031  ff_er_frame_end(&s->er);
2032 
2034 
2035  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
2036  int ret = av_frame_ref(pict, s->current_picture_ptr->f);
2037  if (ret < 0)
2038  return ret;
2039  ff_print_debug_info(s, s->current_picture_ptr, pict);
2040  ff_mpv_export_qp_table(s, pict, s->current_picture_ptr, FF_MPV_QSCALE_TYPE_MPEG2);
2041  } else {
2042  /* latency of 1 frame for I- and P-frames */
2043  if (s->last_picture_ptr) {
2044  int ret = av_frame_ref(pict, s->last_picture_ptr->f);
2045  if (ret < 0)
2046  return ret;
2047  ff_print_debug_info(s, s->last_picture_ptr, pict);
2048  ff_mpv_export_qp_table(s, pict, s->last_picture_ptr, FF_MPV_QSCALE_TYPE_MPEG2);
2049  }
2050  }
2051 
2052  return 1;
2053  } else {
2054  return 0;
2055  }
2056 }
2057 
2059  const uint8_t *buf, int buf_size)
2060 {
2061  Mpeg1Context *s1 = avctx->priv_data;
2062  MpegEncContext *s = &s1->mpeg_enc_ctx;
2063  int width, height;
2064  int i, v, j;
2065 
2066  init_get_bits(&s->gb, buf, buf_size * 8);
2067 
2068  width = get_bits(&s->gb, 12);
2069  height = get_bits(&s->gb, 12);
2070  if (width == 0 || height == 0) {
2071  av_log(avctx, AV_LOG_WARNING,
2072  "Invalid horizontal or vertical size value.\n");
2074  return AVERROR_INVALIDDATA;
2075  }
2076  s1->aspect_ratio_info = get_bits(&s->gb, 4);
2077  if (s1->aspect_ratio_info == 0) {
2078  av_log(avctx, AV_LOG_ERROR, "aspect ratio has forbidden 0 value\n");
2080  return AVERROR_INVALIDDATA;
2081  }
2082  s1->frame_rate_index = get_bits(&s->gb, 4);
2083  if (s1->frame_rate_index == 0 || s1->frame_rate_index > 13) {
2084  av_log(avctx, AV_LOG_WARNING,
2085  "frame_rate_index %d is invalid\n", s1->frame_rate_index);
2086  s1->frame_rate_index = 1;
2087  }
2088  s->bit_rate = get_bits(&s->gb, 18) * 400LL;
2089  if (check_marker(s->avctx, &s->gb, "in sequence header") == 0) {
2090  return AVERROR_INVALIDDATA;
2091  }
2092 
2093  s1->rc_buffer_size = get_bits(&s->gb, 10) * 1024 * 16;
2094  skip_bits(&s->gb, 1);
2095 
2096  /* get matrix */
2097  if (get_bits1(&s->gb)) {
2098  load_matrix(s, s->chroma_intra_matrix, s->intra_matrix, 1);
2099  } else {
2100  for (i = 0; i < 64; i++) {
2101  j = s->idsp.idct_permutation[i];
2103  s->intra_matrix[j] = v;
2104  s->chroma_intra_matrix[j] = v;
2105  }
2106  }
2107  if (get_bits1(&s->gb)) {
2108  load_matrix(s, s->chroma_inter_matrix, s->inter_matrix, 0);
2109  } else {
2110  for (i = 0; i < 64; i++) {
2111  int j = s->idsp.idct_permutation[i];
2113  s->inter_matrix[j] = v;
2114  s->chroma_inter_matrix[j] = v;
2115  }
2116  }
2117 
2118  if (show_bits(&s->gb, 23) != 0) {
2119  av_log(s->avctx, AV_LOG_ERROR, "sequence header damaged\n");
2120  return AVERROR_INVALIDDATA;
2121  }
2122 
2123  s->width = width;
2124  s->height = height;
2125 
2126  /* We set MPEG-2 parameters so that it emulates MPEG-1. */
2127  s->progressive_sequence = 1;
2128  s->progressive_frame = 1;
2129  s->picture_structure = PICT_FRAME;
2130  s->first_field = 0;
2131  s->frame_pred_frame_dct = 1;
2132  s->chroma_format = 1;
2133  s->codec_id =
2134  s->avctx->codec_id = AV_CODEC_ID_MPEG1VIDEO;
2135  s->out_format = FMT_MPEG1;
2136  if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY)
2137  s->low_delay = 1;
2138 
2139  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2140  av_log(s->avctx, AV_LOG_DEBUG, "vbv buffer: %d, bitrate:%"PRId64", aspect_ratio_info: %d \n",
2141  s1->rc_buffer_size, s->bit_rate, s1->aspect_ratio_info);
2142 
2143  return 0;
2144 }
2145 
2147 {
2148  Mpeg1Context *s1 = avctx->priv_data;
2149  MpegEncContext *s = &s1->mpeg_enc_ctx;
2150  int i, v, ret;
2151 
2152  /* start new MPEG-1 context decoding */
2153  s->out_format = FMT_MPEG1;
2154  if (s1->mpeg_enc_ctx_allocated) {
2156  s1->mpeg_enc_ctx_allocated = 0;
2157  }
2158  s->width = avctx->coded_width;
2159  s->height = avctx->coded_height;
2160  avctx->has_b_frames = 0; // true?
2161  s->low_delay = 1;
2162 
2163  avctx->pix_fmt = mpeg_get_pixelformat(avctx);
2164 
2166  if ((ret = ff_mpv_common_init(s)) < 0)
2167  return ret;
2168  s1->mpeg_enc_ctx_allocated = 1;
2169 
2170  for (i = 0; i < 64; i++) {
2171  int j = s->idsp.idct_permutation[i];
2173  s->intra_matrix[j] = v;
2174  s->chroma_intra_matrix[j] = v;
2175 
2177  s->inter_matrix[j] = v;
2178  s->chroma_inter_matrix[j] = v;
2179  }
2180 
2181  s->progressive_sequence = 1;
2182  s->progressive_frame = 1;
2183  s->picture_structure = PICT_FRAME;
2184  s->first_field = 0;
2185  s->frame_pred_frame_dct = 1;
2186  s->chroma_format = 1;
2187  if (s->codec_tag == AV_RL32("BW10")) {
2188  s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG1VIDEO;
2189  } else {
2190  s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG2VIDEO;
2191  }
2192  s1->save_width = s->width;
2193  s1->save_height = s->height;
2194  s1->save_progressive_seq = s->progressive_sequence;
2195  return 0;
2196 }
2197 
2199  const uint8_t *p, int buf_size)
2200 {
2201  Mpeg1Context *s1 = avctx->priv_data;
2202 
2203  if (buf_size >= 6 &&
2204  p[0] == 'G' && p[1] == 'A' && p[2] == '9' && p[3] == '4' &&
2205  p[4] == 3 && (p[5] & 0x40)) {
2206  /* extract A53 Part 4 CC data */
2207  int cc_count = p[5] & 0x1f;
2208  if (cc_count > 0 && buf_size >= 7 + cc_count * 3) {
2209  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
2210  const uint64_t new_size = (old_size + cc_count
2211  * UINT64_C(3));
2212  int ret;
2213 
2214  if (new_size > 3*A53_MAX_CC_COUNT)
2215  return AVERROR(EINVAL);
2216 
2217  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
2218  if (ret >= 0)
2219  memcpy(s1->a53_buf_ref->data + old_size, p + 7, cc_count * UINT64_C(3));
2220 
2222  }
2223  return 1;
2224  } else if (buf_size >= 2 &&
2225  p[0] == 0x03 && (p[1]&0x7f) == 0x01) {
2226  /* extract SCTE-20 CC data */
2227  GetBitContext gb;
2228  int cc_count = 0;
2229  int i, ret;
2230 
2231  init_get_bits8(&gb, p + 2, buf_size - 2);
2232  cc_count = get_bits(&gb, 5);
2233  if (cc_count > 0) {
2234  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
2235  const uint64_t new_size = (old_size + cc_count
2236  * UINT64_C(3));
2237  if (new_size > 3*A53_MAX_CC_COUNT)
2238  return AVERROR(EINVAL);
2239 
2240  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
2241  if (ret >= 0) {
2242  uint8_t field, cc1, cc2;
2243  uint8_t *cap = s1->a53_buf_ref->data;
2244 
2245  memset(s1->a53_buf_ref->data + old_size, 0, cc_count * 3);
2246  for (i = 0; i < cc_count && get_bits_left(&gb) >= 26; i++) {
2247  skip_bits(&gb, 2); // priority
2248  field = get_bits(&gb, 2);
2249  skip_bits(&gb, 5); // line_offset
2250  cc1 = get_bits(&gb, 8);
2251  cc2 = get_bits(&gb, 8);
2252  skip_bits(&gb, 1); // marker
2253 
2254  if (!field) { // forbidden
2255  cap[0] = cap[1] = cap[2] = 0x00;
2256  } else {
2257  field = (field == 2 ? 1 : 0);
2258  if (!s1->mpeg_enc_ctx.top_field_first) field = !field;
2259  cap[0] = 0x04 | field;
2260  cap[1] = ff_reverse[cc1];
2261  cap[2] = ff_reverse[cc2];
2262  }
2263  cap += 3;
2264  }
2265  }
2267  }
2268  return 1;
2269  } else if (buf_size >= 11 &&
2270  p[0] == 'C' && p[1] == 'C' && p[2] == 0x01 && p[3] == 0xf8) {
2271  /* extract DVD CC data
2272  *
2273  * uint32_t user_data_start_code 0x000001B2 (big endian)
2274  * uint16_t user_identifier 0x4343 "CC"
2275  * uint8_t user_data_type_code 0x01
2276  * uint8_t caption_block_size 0xF8
2277  * uint8_t
2278  * bit 7 caption_odd_field_first 1=odd field (CC1/CC2) first 0=even field (CC3/CC4) first
2279  * bit 6 caption_filler 0
2280  * bit 5:1 caption_block_count number of caption blocks (pairs of caption words = frames). Most DVDs use 15 per start of GOP.
2281  * bit 0 caption_extra_field_added 1=one additional caption word
2282  *
2283  * struct caption_field_block {
2284  * uint8_t
2285  * bit 7:1 caption_filler 0x7F (all 1s)
2286  * bit 0 caption_field_odd 1=odd field (this is CC1/CC2) 0=even field (this is CC3/CC4)
2287  * uint8_t caption_first_byte
2288  * uint8_t caption_second_byte
2289  * } caption_block[(caption_block_count * 2) + caption_extra_field_added];
2290  *
2291  * Some DVDs encode caption data for both fields with caption_field_odd=1. The only way to decode the fields
2292  * correctly is to start on the field indicated by caption_odd_field_first and count between odd/even fields.
2293  * Don't assume that the first caption word is the odd field. There do exist MPEG files in the wild that start
2294  * on the even field. There also exist DVDs in the wild that encode an odd field count and the
2295  * caption_extra_field_added/caption_odd_field_first bits change per packet to allow that. */
2296  int cc_count = 0;
2297  int i, ret;
2298  // There is a caption count field in the data, but it is often
2299  // incorrect. So count the number of captions present.
2300  for (i = 5; i + 6 <= buf_size && ((p[i] & 0xfe) == 0xfe); i += 6)
2301  cc_count++;
2302  // Transform the DVD format into A53 Part 4 format
2303  if (cc_count > 0) {
2304  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
2305  const uint64_t new_size = (old_size + cc_count
2306  * UINT64_C(6));
2307  if (new_size > 3*A53_MAX_CC_COUNT)
2308  return AVERROR(EINVAL);
2309 
2310  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
2311  if (ret >= 0) {
2312  uint8_t field1 = !!(p[4] & 0x80);
2313  uint8_t *cap = s1->a53_buf_ref->data;
2314  p += 5;
2315  for (i = 0; i < cc_count; i++) {
2316  cap[0] = (p[0] == 0xff && field1) ? 0xfc : 0xfd;
2317  cap[1] = p[1];
2318  cap[2] = p[2];
2319  cap[3] = (p[3] == 0xff && !field1) ? 0xfc : 0xfd;
2320  cap[4] = p[4];
2321  cap[5] = p[5];
2322  cap += 6;
2323  p += 6;
2324  }
2325  }
2327  }
2328  return 1;
2329  }
2330  return 0;
2331 }
2332 
2334  const uint8_t *p, int buf_size)
2335 {
2336  Mpeg1Context *s = avctx->priv_data;
2337  const uint8_t *buf_end = p + buf_size;
2338  Mpeg1Context *s1 = avctx->priv_data;
2339 
2340 #if 0
2341  int i;
2342  for(i=0; !(!p[i-2] && !p[i-1] && p[i]==1) && i<buf_size; i++){
2343  av_log(avctx, AV_LOG_ERROR, "%c", p[i]);
2344  }
2345  av_log(avctx, AV_LOG_ERROR, "\n");
2346 #endif
2347 
2348  if (buf_size > 29){
2349  int i;
2350  for(i=0; i<20; i++)
2351  if (!memcmp(p+i, "\0TMPGEXS\0", 9)){
2352  s->tmpgexs= 1;
2353  }
2354  }
2355  /* we parse the DTG active format information */
2356  if (buf_end - p >= 5 &&
2357  p[0] == 'D' && p[1] == 'T' && p[2] == 'G' && p[3] == '1') {
2358  int flags = p[4];
2359  p += 5;
2360  if (flags & 0x80) {
2361  /* skip event id */
2362  p += 2;
2363  }
2364  if (flags & 0x40) {
2365  if (buf_end - p < 1)
2366  return;
2367  s1->has_afd = 1;
2368  s1->afd = p[0] & 0x0f;
2369  }
2370  } else if (buf_end - p >= 6 &&
2371  p[0] == 'J' && p[1] == 'P' && p[2] == '3' && p[3] == 'D' &&
2372  p[4] == 0x03) { // S3D_video_format_length
2373  // the 0x7F mask ignores the reserved_bit value
2374  const uint8_t S3D_video_format_type = p[5] & 0x7F;
2375 
2376  if (S3D_video_format_type == 0x03 ||
2377  S3D_video_format_type == 0x04 ||
2378  S3D_video_format_type == 0x08 ||
2379  S3D_video_format_type == 0x23) {
2380 
2381  s1->has_stereo3d = 1;
2382 
2383  switch (S3D_video_format_type) {
2384  case 0x03:
2385  s1->stereo3d.type = AV_STEREO3D_SIDEBYSIDE;
2386  break;
2387  case 0x04:
2388  s1->stereo3d.type = AV_STEREO3D_TOPBOTTOM;
2389  break;
2390  case 0x08:
2391  s1->stereo3d.type = AV_STEREO3D_2D;
2392  break;
2393  case 0x23:
2394  s1->stereo3d.type = AV_STEREO3D_SIDEBYSIDE_QUINCUNX;
2395  break;
2396  }
2397  }
2398  } else if (mpeg_decode_a53_cc(avctx, p, buf_size)) {
2399  return;
2400  }
2401 }
2402 
2403 static void mpeg_decode_gop(AVCodecContext *avctx,
2404  const uint8_t *buf, int buf_size)
2405 {
2406  Mpeg1Context *s1 = avctx->priv_data;
2407  MpegEncContext *s = &s1->mpeg_enc_ctx;
2408  int broken_link;
2409  int64_t tc;
2410 
2411  init_get_bits(&s->gb, buf, buf_size * 8);
2412 
2413  tc = s1->timecode_frame_start = get_bits(&s->gb, 25);
2414 
2415  s1->closed_gop = get_bits1(&s->gb);
2416  /* broken_link indicates that after editing the
2417  * reference frames of the first B-Frames after GOP I-Frame
2418  * are missing (open gop) */
2419  broken_link = get_bits1(&s->gb);
2420 
2421  if (s->avctx->debug & FF_DEBUG_PICT_INFO) {
2422  char tcbuf[AV_TIMECODE_STR_SIZE];
2424  av_log(s->avctx, AV_LOG_DEBUG,
2425  "GOP (%s) closed_gop=%d broken_link=%d\n",
2426  tcbuf, s1->closed_gop, broken_link);
2427  }
2428 }
2429 
2430 static int decode_chunks(AVCodecContext *avctx, AVFrame *picture,
2431  int *got_output, const uint8_t *buf, int buf_size)
2432 {
2433  Mpeg1Context *s = avctx->priv_data;
2434  MpegEncContext *s2 = &s->mpeg_enc_ctx;
2435  const uint8_t *buf_ptr = buf;
2436  const uint8_t *buf_end = buf + buf_size;
2437  int ret, input_size;
2438  int last_code = 0, skip_frame = 0;
2439  int picture_start_code_seen = 0;
2440 
2441  for (;;) {
2442  /* find next start code */
2443  uint32_t start_code = -1;
2444  buf_ptr = avpriv_find_start_code(buf_ptr, buf_end, &start_code);
2445  if (start_code > 0x1ff) {
2446  if (!skip_frame) {
2447  if (HAVE_THREADS &&
2448  (avctx->active_thread_type & FF_THREAD_SLICE) &&
2449  !avctx->hwaccel) {
2450  int i;
2451  av_assert0(avctx->thread_count > 1);
2452 
2453  avctx->execute(avctx, slice_decode_thread,
2454  &s2->thread_context[0], NULL,
2455  s->slice_count, sizeof(void *));
2456  for (i = 0; i < s->slice_count; i++)
2457  s2->er.error_count += s2->thread_context[i]->er.error_count;
2458  }
2459 
2460  ret = slice_end(avctx, picture);
2461  if (ret < 0)
2462  return ret;
2463  else if (ret) {
2464  // FIXME: merge with the stuff in mpeg_decode_slice
2465  if (s2->last_picture_ptr || s2->low_delay || s2->pict_type == AV_PICTURE_TYPE_B)
2466  *got_output = 1;
2467  }
2468  }
2469  s2->pict_type = 0;
2470 
2471  if (avctx->err_recognition & AV_EF_EXPLODE && s2->er.error_count)
2472  return AVERROR_INVALIDDATA;
2473 
2474 #if FF_API_FLAG_TRUNCATED
2475  return FFMAX(0, buf_ptr - buf - s2->parse_context.last_index);
2476 #else
2477  return FFMAX(0, buf_ptr - buf);
2478 #endif
2479  }
2480 
2481  input_size = buf_end - buf_ptr;
2482 
2483  if (avctx->debug & FF_DEBUG_STARTCODE)
2484  av_log(avctx, AV_LOG_DEBUG, "%3"PRIX32" at %"PTRDIFF_SPECIFIER" left %d\n",
2485  start_code, buf_ptr - buf, input_size);
2486 
2487  /* prepare data for next start code */
2488  switch (start_code) {
2489  case SEQ_START_CODE:
2490  if (last_code == 0) {
2491  mpeg1_decode_sequence(avctx, buf_ptr, input_size);
2492  if (buf != avctx->extradata)
2493  s->sync = 1;
2494  } else {
2495  av_log(avctx, AV_LOG_ERROR,
2496  "ignoring SEQ_START_CODE after %X\n", last_code);
2497  if (avctx->err_recognition & AV_EF_EXPLODE)
2498  return AVERROR_INVALIDDATA;
2499  }
2500  break;
2501 
2502  case PICTURE_START_CODE:
2503  if (picture_start_code_seen && s2->picture_structure == PICT_FRAME) {
2504  /* If it's a frame picture, there can't be more than one picture header.
2505  Yet, it does happen and we need to handle it. */
2506  av_log(avctx, AV_LOG_WARNING, "ignoring extra picture following a frame-picture\n");
2507  break;
2508  }
2509  picture_start_code_seen = 1;
2510 
2511  if (s2->width <= 0 || s2->height <= 0) {
2512  av_log(avctx, AV_LOG_ERROR, "Invalid frame dimensions %dx%d.\n",
2513  s2->width, s2->height);
2514  return AVERROR_INVALIDDATA;
2515  }
2516 
2517  if (s->tmpgexs){
2518  s2->intra_dc_precision= 3;
2519  s2->intra_matrix[0]= 1;
2520  }
2521  if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE) &&
2522  !avctx->hwaccel && s->slice_count) {
2523  int i;
2524 
2525  avctx->execute(avctx, slice_decode_thread,
2526  s2->thread_context, NULL,
2527  s->slice_count, sizeof(void *));
2528  for (i = 0; i < s->slice_count; i++)
2529  s2->er.error_count += s2->thread_context[i]->er.error_count;
2530  s->slice_count = 0;
2531  }
2532  if (last_code == 0 || last_code == SLICE_MIN_START_CODE) {
2533  ret = mpeg_decode_postinit(avctx);
2534  if (ret < 0) {
2535  av_log(avctx, AV_LOG_ERROR,
2536  "mpeg_decode_postinit() failure\n");
2537  return ret;
2538  }
2539 
2540  /* We have a complete image: we try to decompress it. */
2541  if (mpeg1_decode_picture(avctx, buf_ptr, input_size) < 0)
2542  s2->pict_type = 0;
2543  s->first_slice = 1;
2544  last_code = PICTURE_START_CODE;
2545  } else {
2546  av_log(avctx, AV_LOG_ERROR,
2547  "ignoring pic after %X\n", last_code);
2548  if (avctx->err_recognition & AV_EF_EXPLODE)
2549  return AVERROR_INVALIDDATA;
2550  }
2551  break;
2552  case EXT_START_CODE:
2553  init_get_bits(&s2->gb, buf_ptr, input_size * 8);
2554 
2555  switch (get_bits(&s2->gb, 4)) {
2556  case 0x1:
2557  if (last_code == 0) {
2559  } else {
2560  av_log(avctx, AV_LOG_ERROR,
2561  "ignoring seq ext after %X\n", last_code);
2562  if (avctx->err_recognition & AV_EF_EXPLODE)
2563  return AVERROR_INVALIDDATA;
2564  }
2565  break;
2566  case 0x2:
2568  break;
2569  case 0x3:
2571  break;
2572  case 0x7:
2574  break;
2575  case 0x8:
2576  if (last_code == PICTURE_START_CODE) {
2578  if (ret < 0)
2579  return ret;
2580  } else {
2581  av_log(avctx, AV_LOG_ERROR,
2582  "ignoring pic cod ext after %X\n", last_code);
2583  if (avctx->err_recognition & AV_EF_EXPLODE)
2584  return AVERROR_INVALIDDATA;
2585  }
2586  break;
2587  }
2588  break;
2589  case USER_START_CODE:
2590  mpeg_decode_user_data(avctx, buf_ptr, input_size);
2591  break;
2592  case GOP_START_CODE:
2593  if (last_code == 0) {
2594  s2->first_field = 0;
2595  mpeg_decode_gop(avctx, buf_ptr, input_size);
2596  s->sync = 1;
2597  } else {
2598  av_log(avctx, AV_LOG_ERROR,
2599  "ignoring GOP_START_CODE after %X\n", last_code);
2600  if (avctx->err_recognition & AV_EF_EXPLODE)
2601  return AVERROR_INVALIDDATA;
2602  }
2603  break;
2604  default:
2606  start_code <= SLICE_MAX_START_CODE && last_code == PICTURE_START_CODE) {
2607  if (s2->progressive_sequence && !s2->progressive_frame) {
2608  s2->progressive_frame = 1;
2609  av_log(s2->avctx, AV_LOG_ERROR,
2610  "interlaced frame in progressive sequence, ignoring\n");
2611  }
2612 
2613  if (s2->picture_structure == 0 ||
2614  (s2->progressive_frame && s2->picture_structure != PICT_FRAME)) {
2615  av_log(s2->avctx, AV_LOG_ERROR,
2616  "picture_structure %d invalid, ignoring\n",
2617  s2->picture_structure);
2618  s2->picture_structure = PICT_FRAME;
2619  }
2620 
2621  if (s2->progressive_sequence && !s2->frame_pred_frame_dct)
2622  av_log(s2->avctx, AV_LOG_WARNING, "invalid frame_pred_frame_dct\n");
2623 
2624  if (s2->picture_structure == PICT_FRAME) {
2625  s2->first_field = 0;
2626  s2->v_edge_pos = 16 * s2->mb_height;
2627  } else {
2628  s2->first_field ^= 1;
2629  s2->v_edge_pos = 8 * s2->mb_height;
2630  memset(s2->mbskip_table, 0, s2->mb_stride * s2->mb_height);
2631  }
2632  }
2634  start_code <= SLICE_MAX_START_CODE && last_code != 0) {
2635  const int field_pic = s2->picture_structure != PICT_FRAME;
2636  int mb_y = start_code - SLICE_MIN_START_CODE;
2637  last_code = SLICE_MIN_START_CODE;
2638  if (s2->codec_id != AV_CODEC_ID_MPEG1VIDEO && s2->mb_height > 2800/16)
2639  mb_y += (*buf_ptr&0xE0)<<2;
2640 
2641  mb_y <<= field_pic;
2642  if (s2->picture_structure == PICT_BOTTOM_FIELD)
2643  mb_y++;
2644 
2645  if (buf_end - buf_ptr < 2) {
2646  av_log(s2->avctx, AV_LOG_ERROR, "slice too small\n");
2647  return AVERROR_INVALIDDATA;
2648  }
2649 
2650  if (mb_y >= s2->mb_height) {
2651  av_log(s2->avctx, AV_LOG_ERROR,
2652  "slice below image (%d >= %d)\n", mb_y, s2->mb_height);
2653  return AVERROR_INVALIDDATA;
2654  }
2655 
2656  if (!s2->last_picture_ptr) {
2657  /* Skip B-frames if we do not have reference frames and
2658  * GOP is not closed. */
2659  if (s2->pict_type == AV_PICTURE_TYPE_B) {
2660  if (!s->closed_gop) {
2661  skip_frame = 1;
2662  av_log(s2->avctx, AV_LOG_DEBUG,
2663  "Skipping B slice due to open GOP\n");
2664  break;
2665  }
2666  }
2667  }
2668  if (s2->pict_type == AV_PICTURE_TYPE_I || (s2->avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL))
2669  s->sync = 1;
2670  if (!s2->next_picture_ptr) {
2671  /* Skip P-frames if we do not have a reference frame or
2672  * we have an invalid header. */
2673  if (s2->pict_type == AV_PICTURE_TYPE_P && !s->sync) {
2674  skip_frame = 1;
2675  av_log(s2->avctx, AV_LOG_DEBUG,
2676  "Skipping P slice due to !sync\n");
2677  break;
2678  }
2679  }
2680  if ((avctx->skip_frame >= AVDISCARD_NONREF &&
2681  s2->pict_type == AV_PICTURE_TYPE_B) ||
2682  (avctx->skip_frame >= AVDISCARD_NONKEY &&
2683  s2->pict_type != AV_PICTURE_TYPE_I) ||
2684  avctx->skip_frame >= AVDISCARD_ALL) {
2685  skip_frame = 1;
2686  break;
2687  }
2688 
2689  if (!s->mpeg_enc_ctx_allocated)
2690  break;
2691 
2692  if (s2->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
2693  if (mb_y < avctx->skip_top ||
2694  mb_y >= s2->mb_height - avctx->skip_bottom)
2695  break;
2696  }
2697 
2698  if (!s2->pict_type) {
2699  av_log(avctx, AV_LOG_ERROR, "Missing picture start code\n");
2700  if (avctx->err_recognition & AV_EF_EXPLODE)
2701  return AVERROR_INVALIDDATA;
2702  break;
2703  }
2704 
2705  if (s->first_slice) {
2706  skip_frame = 0;
2707  s->first_slice = 0;
2708  if ((ret = mpeg_field_start(s2, buf, buf_size)) < 0)
2709  return ret;
2710  }
2711  if (!s2->current_picture_ptr) {
2712  av_log(avctx, AV_LOG_ERROR,
2713  "current_picture not initialized\n");
2714  return AVERROR_INVALIDDATA;
2715  }
2716 
2717  if (HAVE_THREADS &&
2718  (avctx->active_thread_type & FF_THREAD_SLICE) &&
2719  !avctx->hwaccel) {
2720  int threshold = (s2->mb_height * s->slice_count +
2721  s2->slice_context_count / 2) /
2722  s2->slice_context_count;
2723  av_assert0(avctx->thread_count > 1);
2724  if (threshold <= mb_y) {
2725  MpegEncContext *thread_context = s2->thread_context[s->slice_count];
2726 
2727  thread_context->start_mb_y = mb_y;
2728  thread_context->end_mb_y = s2->mb_height;
2729  if (s->slice_count) {
2730  s2->thread_context[s->slice_count - 1]->end_mb_y = mb_y;
2731  ret = ff_update_duplicate_context(thread_context, s2);
2732  if (ret < 0)
2733  return ret;
2734  }
2735  init_get_bits(&thread_context->gb, buf_ptr, input_size * 8);
2736  s->slice_count++;
2737  }
2738  buf_ptr += 2; // FIXME add minimum number of bytes per slice
2739  } else {
2740  ret = mpeg_decode_slice(s2, mb_y, &buf_ptr, input_size);
2741  emms_c();
2742 
2743  if (ret < 0) {
2744  if (avctx->err_recognition & AV_EF_EXPLODE)
2745  return ret;
2746  if (s2->resync_mb_x >= 0 && s2->resync_mb_y >= 0)
2747  ff_er_add_slice(&s2->er, s2->resync_mb_x,
2748  s2->resync_mb_y, s2->mb_x, s2->mb_y,
2750  } else {
2751  ff_er_add_slice(&s2->er, s2->resync_mb_x,
2752  s2->resync_mb_y, s2->mb_x - 1, s2->mb_y,
2754  }
2755  }
2756  }
2757  break;
2758  }
2759  }
2760 }
2761 
2762 static int mpeg_decode_frame(AVCodecContext *avctx, AVFrame *picture,
2763  int *got_output, AVPacket *avpkt)
2764 {
2765  const uint8_t *buf = avpkt->data;
2766  int ret;
2767  int buf_size = avpkt->size;
2768  Mpeg1Context *s = avctx->priv_data;
2769  MpegEncContext *s2 = &s->mpeg_enc_ctx;
2770 
2771  if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == SEQ_END_CODE)) {
2772  /* special case for last picture */
2773  if (s2->low_delay == 0 && s2->next_picture_ptr) {
2774  int ret = av_frame_ref(picture, s2->next_picture_ptr->f);
2775  if (ret < 0)
2776  return ret;
2777 
2778  s2->next_picture_ptr = NULL;
2779 
2780  *got_output = 1;
2781  }
2782  return buf_size;
2783  }
2784 
2785 #if FF_API_FLAG_TRUNCATED
2786  if (s2->avctx->flags & AV_CODEC_FLAG_TRUNCATED) {
2787  int next = ff_mpeg1_find_frame_end(&s2->parse_context, buf,
2788  buf_size, NULL);
2789 
2790  if (ff_combine_frame(&s2->parse_context, next,
2791  (const uint8_t **) &buf, &buf_size) < 0)
2792  return buf_size;
2793  }
2794 #endif
2795 
2796  if (s->mpeg_enc_ctx_allocated == 0 && ( s2->codec_tag == AV_RL32("VCR2")
2797  || s2->codec_tag == AV_RL32("BW10")
2798  ))
2799  vcr2_init_sequence(avctx);
2800 
2801  s->slice_count = 0;
2802 
2803  if (avctx->extradata && !s->extradata_decoded) {
2804  ret = decode_chunks(avctx, picture, got_output,
2805  avctx->extradata, avctx->extradata_size);
2806  if (*got_output) {
2807  av_log(avctx, AV_LOG_ERROR, "picture in extradata\n");
2808  av_frame_unref(picture);
2809  *got_output = 0;
2810  }
2811  s->extradata_decoded = 1;
2812  if (ret < 0 && (avctx->err_recognition & AV_EF_EXPLODE)) {
2813  s2->current_picture_ptr = NULL;
2814  return ret;
2815  }
2816  }
2817 
2818  ret = decode_chunks(avctx, picture, got_output, buf, buf_size);
2819  if (ret<0 || *got_output) {
2820  s2->current_picture_ptr = NULL;
2821 
2822  if (s->timecode_frame_start != -1 && *got_output) {
2823  char tcbuf[AV_TIMECODE_STR_SIZE];
2824  AVFrameSideData *tcside = av_frame_new_side_data(picture,
2826  sizeof(int64_t));
2827  if (!tcside)
2828  return AVERROR(ENOMEM);
2829  memcpy(tcside->data, &s->timecode_frame_start, sizeof(int64_t));
2830 
2831  av_timecode_make_mpeg_tc_string(tcbuf, s->timecode_frame_start);
2832  av_dict_set(&picture->metadata, "timecode", tcbuf, 0);
2833 
2834  s->timecode_frame_start = -1;
2835  }
2836  }
2837 
2838  return ret;
2839 }
2840 
2841 static void flush(AVCodecContext *avctx)
2842 {
2843  Mpeg1Context *s = avctx->priv_data;
2844 
2845  s->sync = 0;
2846  s->closed_gop = 0;
2847 
2848  ff_mpeg_flush(avctx);
2849 }
2850 
2852 {
2853  Mpeg1Context *s = avctx->priv_data;
2854 
2855  if (s->mpeg_enc_ctx_allocated)
2856  ff_mpv_common_end(&s->mpeg_enc_ctx);
2857  av_buffer_unref(&s->a53_buf_ref);
2858  return 0;
2859 }
2860 
2862  .p.name = "mpeg1video",
2863  CODEC_LONG_NAME("MPEG-1 video"),
2864  .p.type = AVMEDIA_TYPE_VIDEO,
2865  .p.id = AV_CODEC_ID_MPEG1VIDEO,
2866  .priv_data_size = sizeof(Mpeg1Context),
2868  .close = mpeg_decode_end,
2870  .p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
2871 #if FF_API_FLAG_TRUNCATED
2872  AV_CODEC_CAP_TRUNCATED |
2873 #endif
2875  .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
2876  .flush = flush,
2877  .p.max_lowres = 3,
2878  UPDATE_THREAD_CONTEXT(mpeg_decode_update_thread_context),
2879  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2880 #if CONFIG_MPEG1_NVDEC_HWACCEL
2881  HWACCEL_NVDEC(mpeg1),
2882 #endif
2883 #if CONFIG_MPEG1_VDPAU_HWACCEL
2884  HWACCEL_VDPAU(mpeg1),
2885 #endif
2886 #if CONFIG_MPEG1_VIDEOTOOLBOX_HWACCEL
2887  HWACCEL_VIDEOTOOLBOX(mpeg1),
2888 #endif
2889  NULL
2890  },
2891 };
2892 
2894  .p.name = "mpeg2video",
2895  CODEC_LONG_NAME("MPEG-2 video"),
2896  .p.type = AVMEDIA_TYPE_VIDEO,
2897  .p.id = AV_CODEC_ID_MPEG2VIDEO,
2898  .priv_data_size = sizeof(Mpeg1Context),
2900  .close = mpeg_decode_end,
2902  .p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
2903 #if FF_API_FLAG_TRUNCATED
2904  AV_CODEC_CAP_TRUNCATED |
2905 #endif
2907  .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
2908  .flush = flush,
2909  .p.max_lowres = 3,
2911  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2912 #if CONFIG_MPEG2_DXVA2_HWACCEL
2913  HWACCEL_DXVA2(mpeg2),
2914 #endif
2915 #if CONFIG_MPEG2_D3D11VA_HWACCEL
2916  HWACCEL_D3D11VA(mpeg2),
2917 #endif
2918 #if CONFIG_MPEG2_D3D11VA2_HWACCEL
2919  HWACCEL_D3D11VA2(mpeg2),
2920 #endif
2921 #if CONFIG_MPEG2_NVDEC_HWACCEL
2922  HWACCEL_NVDEC(mpeg2),
2923 #endif
2924 #if CONFIG_MPEG2_VAAPI_HWACCEL
2925  HWACCEL_VAAPI(mpeg2),
2926 #endif
2927 #if CONFIG_MPEG2_VDPAU_HWACCEL
2928  HWACCEL_VDPAU(mpeg2),
2929 #endif
2930 #if CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL
2931  HWACCEL_VIDEOTOOLBOX(mpeg2),
2932 #endif
2933  NULL
2934  },
2935 };
2936 
2937 //legacy decoder
2939  .p.name = "mpegvideo",
2940  CODEC_LONG_NAME("MPEG-1 video"),
2941  .p.type = AVMEDIA_TYPE_VIDEO,
2942  .p.id = AV_CODEC_ID_MPEG2VIDEO,
2943  .priv_data_size = sizeof(Mpeg1Context),
2945  .close = mpeg_decode_end,
2947  .p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
2948 #if FF_API_FLAG_TRUNCATED
2949  AV_CODEC_CAP_TRUNCATED |
2950 #endif
2952  .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
2953  .flush = flush,
2954  .p.max_lowres = 3,
2955 };
2956 
2957 typedef struct IPUContext {
2959 
2960  int flags;
2961  DECLARE_ALIGNED(32, int16_t, block)[6][64];
2962 } IPUContext;
2963 
2965  int *got_frame, AVPacket *avpkt)
2966 {
2967  IPUContext *s = avctx->priv_data;
2968  MpegEncContext *m = &s->m;
2969  GetBitContext *gb = &m->gb;
2970  int ret;
2971 
2972  ret = ff_get_buffer(avctx, frame, 0);
2973  if (ret < 0)
2974  return ret;
2975 
2976  ret = init_get_bits8(gb, avpkt->data, avpkt->size);
2977  if (ret < 0)
2978  return ret;
2979 
2980  s->flags = get_bits(gb, 8);
2981  m->intra_dc_precision = s->flags & 3;
2982  m->q_scale_type = !!(s->flags & 0x40);
2983  m->intra_vlc_format = !!(s->flags & 0x20);
2984  m->alternate_scan = !!(s->flags & 0x10);
2985 
2986  if (s->flags & 0x10) {
2989  } else {
2992  }
2993 
2994  m->last_dc[0] = m->last_dc[1] = m->last_dc[2] = 1 << (7 + (s->flags & 3));
2995  m->qscale = 1;
2996 
2997  for (int y = 0; y < avctx->height; y += 16) {
2998  int intraquant;
2999 
3000  for (int x = 0; x < avctx->width; x += 16) {
3001  if (x || y) {
3002  if (!get_bits1(gb))
3003  return AVERROR_INVALIDDATA;
3004  }
3005  if (get_bits1(gb)) {
3006  intraquant = 0;
3007  } else {
3008  if (!get_bits1(gb))
3009  return AVERROR_INVALIDDATA;
3010  intraquant = 1;
3011  }
3012 
3013  if (s->flags & 4)
3014  skip_bits1(gb);
3015 
3016  if (intraquant)
3017  m->qscale = mpeg_get_qscale(m);
3018 
3019  memset(s->block, 0, sizeof(s->block));
3020 
3021  for (int n = 0; n < 6; n++) {
3022  if (s->flags & 0x80) {
3024  m->intra_matrix,
3026  m->last_dc, s->block[n],
3027  n, m->qscale);
3028  if (ret >= 0)
3029  m->block_last_index[n] = ret;
3030  } else {
3031  ret = mpeg2_decode_block_intra(m, s->block[n], n);
3032  }
3033 
3034  if (ret < 0)
3035  return ret;
3036  }
3037 
3038  m->idsp.idct_put(frame->data[0] + y * frame->linesize[0] + x,
3039  frame->linesize[0], s->block[0]);
3040  m->idsp.idct_put(frame->data[0] + y * frame->linesize[0] + x + 8,
3041  frame->linesize[0], s->block[1]);
3042  m->idsp.idct_put(frame->data[0] + (y + 8) * frame->linesize[0] + x,
3043  frame->linesize[0], s->block[2]);
3044  m->idsp.idct_put(frame->data[0] + (y + 8) * frame->linesize[0] + x + 8,
3045  frame->linesize[0], s->block[3]);
3046  m->idsp.idct_put(frame->data[1] + (y >> 1) * frame->linesize[1] + (x >> 1),
3047  frame->linesize[1], s->block[4]);
3048  m->idsp.idct_put(frame->data[2] + (y >> 1) * frame->linesize[2] + (x >> 1),
3049  frame->linesize[2], s->block[5]);
3050  }
3051  }
3052 
3053  align_get_bits(gb);
3054  if (get_bits_left(gb) != 32)
3055  return AVERROR_INVALIDDATA;
3056 
3057  frame->pict_type = AV_PICTURE_TYPE_I;
3058  frame->key_frame = 1;
3059  *got_frame = 1;
3060 
3061  return avpkt->size;
3062 }
3063 
3065 {
3066  IPUContext *s = avctx->priv_data;
3067  MpegEncContext *m = &s->m;
3068 
3069  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
3070 
3071  ff_mpv_decode_init(m, avctx);
3072  ff_mpv_idct_init(m);
3074 
3075  for (int i = 0; i < 64; i++) {
3076  int j = m->idsp.idct_permutation[i];
3078  m->intra_matrix[j] = v;
3079  m->chroma_intra_matrix[j] = v;
3080  }
3081 
3082  for (int i = 0; i < 64; i++) {
3083  int j = m->idsp.idct_permutation[i];
3085  m->inter_matrix[j] = v;
3086  m->chroma_inter_matrix[j] = v;
3087  }
3088 
3089  return 0;
3090 }
3091 
3093 {
3094  IPUContext *s = avctx->priv_data;
3095 
3096  ff_mpv_common_end(&s->m);
3097 
3098  return 0;
3099 }
3100 
3102  .p.name = "ipu",
3103  CODEC_LONG_NAME("IPU Video"),
3104  .p.type = AVMEDIA_TYPE_VIDEO,
3105  .p.id = AV_CODEC_ID_IPU,
3106  .priv_data_size = sizeof(IPUContext),
3107  .init = ipu_decode_init,
3109  .close = ipu_decode_end,
3110  .p.capabilities = AV_CODEC_CAP_DR1,
3111  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
3112 };
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:38
vcr2_init_sequence
static int vcr2_init_sequence(AVCodecContext *avctx)
Definition: mpeg12dec.c:2146
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:682
hwconfig.h
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1369
MB_TYPE_L0
#define MB_TYPE_L0
Definition: mpegutils.h:60
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:263
Mpeg1Context::has_afd
int has_afd
Definition: mpeg12dec.c:72
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_TIMECODE_STR_SIZE
#define AV_TIMECODE_STR_SIZE
Definition: timecode.h:33
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:253
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
level
uint8_t level
Definition: svq3.c:204
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
Mpeg1Context::a53_buf_ref
AVBufferRef * a53_buf_ref
Definition: mpeg12dec.c:70
ff_mpeg2_aspect
const AVRational ff_mpeg2_aspect[16]
Definition: mpeg12data.c:380
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
show_bits_long
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
Definition: get_bits.h:602
mpeg_decode_a53_cc
static int mpeg_decode_a53_cc(AVCodecContext *avctx, const uint8_t *p, int buf_size)
Definition: mpeg12dec.c:2198
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:839
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegutils.h:117
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
ff_mpv_export_qp_table
int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const Picture *p, int qp_type)
Definition: mpegvideo_dec.c:502
AV_STEREO3D_SIDEBYSIDE_QUINCUNX
@ AV_STEREO3D_SIDEBYSIDE_QUINCUNX
Views are next to each other, but when upscaling apply a checkerboard pattern.
Definition: stereo3d.h:114
FF_MPV_QSCALE_TYPE_MPEG2
#define FF_MPV_QSCALE_TYPE_MPEG2
Definition: mpegvideodec.h:41
mem_internal.h
mpeg_decode_frame
static int mpeg_decode_frame(AVCodecContext *avctx, AVFrame *picture, int *got_output, AVPacket *avpkt)
Definition: mpeg12dec.c:2762
MpegEncContext::gb
GetBitContext gb
Definition: mpegvideo.h:431
AV_EF_COMPLIANT
#define AV_EF_COMPLIANT
consider all spec non compliances as errors
Definition: defs.h:55
SEQ_END_CODE
#define SEQ_END_CODE
Definition: mpeg12.h:28
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:679
check_scantable_index
#define check_scantable_index(ctx, x)
Definition: mpeg12dec.c:142
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:59
MT_FIELD
#define MT_FIELD
Definition: mpeg12dec.c:654
EXT_START_CODE
#define EXT_START_CODE
Definition: cavs.h:39
MV_TYPE_16X8
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:265
ff_mbincr_vlc
VLC ff_mbincr_vlc
Definition: mpeg12.c:123
av_div_q
AVRational av_div_q(AVRational b, AVRational c)
Divide one rational by another.
Definition: rational.c:88
matrix
Definition: vc1dsp.c:42
AVPanScan
Pan Scan area.
Definition: defs.h:97
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1351
SLICE_MAX_START_CODE
#define SLICE_MAX_START_CODE
Definition: cavs.h:38
MB_TYPE_16x8
#define MB_TYPE_16x8
Definition: mpegutils.h:48
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
ipu_decode_init
static av_cold int ipu_decode_init(AVCodecContext *avctx)
Definition: mpeg12dec.c:3064
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:490
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:198
ff_mpv_report_decode_progress
void ff_mpv_report_decode_progress(MpegEncContext *s)
Definition: mpegvideo_dec.c:569
w
uint8_t w
Definition: llviddspenc.c:38
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:67
ff_mpegvideo_decoder
const FFCodec ff_mpegvideo_decoder
Definition: mpeg12dec.c:2938
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:374
ipu_decode_end
static av_cold int ipu_decode_end(AVCodecContext *avctx)
Definition: mpeg12dec.c:3092
mpeg_decode_mb
static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpeg12dec.c:659
Mpeg1Context::closed_gop
int closed_gop
Definition: mpeg12dec.c:81
mpeg2_decode_block_intra
static int mpeg2_decode_block_intra(MpegEncContext *s, int16_t *block, int n)
Definition: mpeg12dec.c:483
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:69
ff_reverse
const uint8_t ff_reverse[256]
Definition: reverse.c:23
MpegEncContext::last_dc
int last_dc[3]
last DC values for MPEG-1
Definition: mpegvideo.h:177
MB_TYPE_16x16
#define MB_TYPE_16x16
Definition: mpegutils.h:47
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:247
FFCodec
Definition: codec_internal.h:119
mpeg2_fast_decode_block_intra
static int mpeg2_fast_decode_block_intra(MpegEncContext *s, int16_t *block, int n)
Changing this would eat up any speed benefits it has.
Definition: mpeg12dec.c:569
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:37
ff_er_add_slice
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
Definition: error_resilience.c:822
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:857
reverse.h
mpegvideo.h
MB_TYPE_L1
#define MB_TYPE_L1
Definition: mpegutils.h:61
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:178
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Mpeg1Context::first_slice
int first_slice
Definition: mpeg12dec.c:83
ER_DC_END
#define ER_DC_END
Definition: error_resilience.h:35
mpeg_decode_postinit
static int mpeg_decode_postinit(AVCodecContext *avctx)
Definition: mpeg12dec.c:1175
ff_add_cpb_side_data
AVCPBProperties * ff_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
Definition: utils.c:1022
mpegutils.h
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
ER_MV_ERROR
#define ER_MV_ERROR
Definition: error_resilience.h:33
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:649
thread.h
ff_mb_pat_vlc
VLC ff_mb_pat_vlc
Definition: mpeg12.c:126
SEQ_START_CODE
#define SEQ_START_CODE
Definition: mpeg12.h:29
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1328
MV_TYPE_DMV
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:267
GET_CACHE
#define GET_CACHE(name, gb)
Definition: get_bits.h:215
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
ff_mpeg2_rl_vlc
RL_VLC_ELEM ff_mpeg2_rl_vlc[674]
Definition: mpeg12.c:129
Mpeg1Context::save_aspect
AVRational save_aspect
Definition: mpeg12dec.c:75
MpegEncContext::intra_scantable
ScanTable intra_scantable
Definition: mpegvideo.h:84
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:1735
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:64
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
MB_TYPE_ZERO_MV
#define MB_TYPE_ZERO_MV
Definition: mpeg12dec.c:88
MT_DMV
#define MT_DMV
Definition: mpeg12dec.c:657
ParseContext
Definition: parser.h:28
ff_mpv_reconstruct_mb
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo_dec.c:1010
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:123
decode_chunks
static int decode_chunks(AVCodecContext *avctx, AVFrame *picture, int *got_output, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:2430
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1698
mpeg_decode_quant_matrix_extension
static void mpeg_decode_quant_matrix_extension(MpegEncContext *s)
Definition: mpeg12dec.c:1495
AVCodecContext::thread_count
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:1466
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:52
wrap
#define wrap(func)
Definition: neontest.h:65
timecode.h
GetBitContext
Definition: get_bits.h:61
AV_EF_BITSTREAM
#define AV_EF_BITSTREAM
detect bitstream specification deviations
Definition: defs.h:49
USES_LIST
#define USES_LIST(a, list)
Definition: mpegutils.h:92
slice_decode_thread
static int slice_decode_thread(AVCodecContext *c, void *arg)
Definition: mpeg12dec.c:1957
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:478
IDCTDSPContext::idct_put
void(* idct_put)(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
block -> idct -> clip to unsigned 8 bit -> dest.
Definition: idctdsp.h:63
MB_TYPE_CBP
#define MB_TYPE_CBP
Definition: mpegutils.h:64
val
static double val(void *priv, double ch)
Definition: aeval.c:77
Mpeg1Context::tmpgexs
int tmpgexs
Definition: mpeg12dec.c:82
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:75
AV_CODEC_FLAG_LOW_DELAY
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:280
mpeg12_pixfmt_list_444
static enum AVPixelFormat mpeg12_pixfmt_list_444[]
Definition: mpeg12dec.c:1147
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:586
ff_print_debug_info
void ff_print_debug_info(const MpegEncContext *s, const Picture *p, AVFrame *pict)
Definition: mpegvideo_dec.c:495
mpeg1_decode_sequence
static int mpeg1_decode_sequence(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:2058
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
HAS_CBP
#define HAS_CBP(a)
Definition: mpegutils.h:94
AVRational::num
int num
Numerator.
Definition: rational.h:59
GOP_START_CODE
#define GOP_START_CODE
Definition: mpeg12.h:30
IPUContext
Definition: mpeg12dec.c:2957
mpeg1_hwaccel_pixfmt_list_420
static enum AVPixelFormat mpeg1_hwaccel_pixfmt_list_420[]
Definition: mpeg12dec.c:1107
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:788
mpeg12.h
mpegvideodec.h
ff_mpeg2video_decoder
const FFCodec ff_mpeg2video_decoder
Definition: mpeg12dec.c:2893
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
Mpeg1Context::frame_rate_index
unsigned frame_rate_index
Definition: mpeg12dec.c:79
ipu_decode_frame
static int ipu_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: mpeg12dec.c:2964
ER_DC_ERROR
#define ER_DC_ERROR
Definition: error_resilience.h:32
av_cold
#define av_cold
Definition: attributes.h:90
mpeg2_hwaccel_pixfmt_list_420
static enum AVPixelFormat mpeg2_hwaccel_pixfmt_list_420[]
Definition: mpeg12dec.c:1118
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:667
mpeg1_decode_picture
static int mpeg1_decode_picture(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:1315
flush
static void flush(AVCodecContext *avctx)
Definition: mpeg12dec.c:2841
Mpeg1Context::save_progressive_seq
int save_progressive_seq
Definition: mpeg12dec.c:76
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:149
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:500
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:694
A53_MAX_CC_COUNT
#define A53_MAX_CC_COUNT
Definition: mpeg12dec.c:61
Mpeg1Context::repeat_field
int repeat_field
Definition: mpeg12dec.c:66
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:298
stereo3d.h
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:127
s
#define s(width, name)
Definition: cbs_vp9.c:256
ff_mpeg1_aspect
const float ff_mpeg1_aspect[16]
Definition: mpeg12data.c:359
AVCodecContext::ticks_per_frame
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:530
s1
#define s1
Definition: regdef.h:38
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2010
Mpeg1Context::mpeg_enc_ctx_allocated
int mpeg_enc_ctx_allocated
Definition: mpeg12dec.c:65
SHOW_SBITS
#define SHOW_SBITS(name, gb, num)
Definition: get_bits.h:212
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:363
ff_mpeg_er_frame_start
void ff_mpeg_er_frame_start(MpegEncContext *s)
Definition: mpeg_er.c:47
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
Mpeg1Context::aspect_ratio_info
unsigned aspect_ratio_info
Definition: mpeg12dec.c:74
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
mpeg_decode_sequence_display_extension
static void mpeg_decode_sequence_display_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1414
Mpeg1Context::pan_scan
AVPanScan pan_scan
Definition: mpeg12dec.c:67
get_sbits
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:359
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
ctx
AVFormatContext * ctx
Definition: movenc.c:48
PICT_TOP_FIELD
#define PICT_TOP_FIELD
Definition: mpegutils.h:36
decode.h
mpeg12_pixfmt_list_422
static enum AVPixelFormat mpeg12_pixfmt_list_422[]
Definition: mpeg12dec.c:1142
SKIP_BITS
#define SKIP_BITS(name, gb, num)
Definition: get_bits.h:193
IS_INTRA
#define IS_INTRA(x, y)
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1223
AVCPBProperties
This structure describes the bitrate properties of an encoded bitstream.
Definition: defs.h:126
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:264
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:408
arg
const char * arg
Definition: jacosubdec.c:67
if
if(ret)
Definition: filter_design.txt:179
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:76
Mpeg1Context::rc_buffer_size
int rc_buffer_size
Definition: mpeg12dec.c:77
MB_PTYPE_VLC_BITS
#define MB_PTYPE_VLC_BITS
Definition: mpeg12vlc.h:39
Mpeg1Context::save_width
int save_width
Definition: mpeg12dec.c:76
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:149
NULL
#define NULL
Definition: coverity.c:32
run
uint8_t run
Definition: svq3.c:203
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:982
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
ER_AC_ERROR
#define ER_AC_ERROR
Definition: error_resilience.h:31
SLICE_MIN_START_CODE
#define SLICE_MIN_START_CODE
Definition: mpeg12.h:32
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:342
Mpeg1Context::sync
int sync
Definition: mpeg12dec.c:80
AVCHROMA_LOC_LEFT
@ AVCHROMA_LOC_LEFT
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
Definition: pixfmt.h:682
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AVCHROMA_LOC_TOPLEFT
@ AVCHROMA_LOC_TOPLEFT
ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2.
Definition: pixfmt.h:684
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:448
mpeg_decode_picture_display_extension
static void mpeg_decode_picture_display_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1438
MpegEncContext::inter_matrix
uint16_t inter_matrix[64]
Definition: mpegvideo.h:299
AV_CODEC_FLAG2_FAST
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:303
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
profiles.h
AV_CODEC_FLAG_TRUNCATED
#define AV_CODEC_FLAG_TRUNCATED
Input bitstream might be truncated at a random location instead of only at frame boundaries.
Definition: avcodec.h:271
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:199
MB_TYPE_QUANT
#define MB_TYPE_QUANT
Definition: mpegutils.h:63
avpriv_find_start_code
const uint8_t * avpriv_find_start_code(const uint8_t *p, const uint8_t *end, uint32_t *state)
lowres
static int lowres
Definition: ffplay.c:335
av_frame_new_side_data_from_buf
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
Definition: frame.c:647
ff_mpeg1_rl_vlc
RL_VLC_ELEM ff_mpeg1_rl_vlc[680]
Definition: mpeg12.c:128
MB_BTYPE_VLC_BITS
#define MB_BTYPE_VLC_BITS
Definition: mpeg12vlc.h:40
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:273
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
mpeg12codecs.h
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:787
AV_FRAME_DATA_AFD
@ AV_FRAME_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:90
AVCodecContext::level
int level
level
Definition: avcodec.h:1676
Mpeg1Context::save_height
int save_height
Definition: mpeg12dec.c:76
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
MpegEncContext::idsp
IDCTDSPContext idsp
Definition: mpegvideo.h:219
ff_mb_ptype_vlc
VLC ff_mb_ptype_vlc
Definition: mpeg12.c:124
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
quant_matrix_rebuild
static void quant_matrix_rebuild(uint16_t *matrix, const uint8_t *old_perm, const uint8_t *new_perm)
Definition: mpeg12dec.c:1095
ff_mpeg1_find_frame_end
int ff_mpeg1_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size, AVCodecParserContext *s)
Find the end of the current frame in the bitstream.
Definition: mpeg12.c:175
startcode.h
s2
#define s2
Definition: regdef.h:39
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:75
check_marker
static int check_marker(void *logctx, GetBitContext *s, const char *msg)
Definition: mpegvideodec.h:73
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:485
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1450
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:259
AVPacket::size
int size
Definition: packet.h:375
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:115
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:196
AV_CODEC_ID_IPU
@ AV_CODEC_ID_IPU
Definition: codec_id.h:309
AV_FRAME_DATA_PANSCAN
@ AV_FRAME_DATA_PANSCAN
The data is the AVPanScan struct defined in libavcodec.
Definition: frame.h:53
RL_VLC_ELEM
Definition: vlc.h:37
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:353
MT_FRAME
#define MT_FRAME
Definition: mpeg12dec.c:655
codec_internal.h
shift
static int shift(int a, int b)
Definition: bonk.c:253
IPUContext::flags
int flags
Definition: mpeg12dec.c:2960
MpegEncContext::intra_matrix
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:297
ff_mpeg1_clean_buffers
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:106
ff_mpeg1video_decoder
const FFCodec ff_mpeg1video_decoder
Definition: mpeg12dec.c:2861
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: codec_internal.h:54
AVFrameSideData::data
uint8_t * data
Definition: frame.h:233
MB_TYPE_SKIP
#define MB_TYPE_SKIP
Definition: mpegutils.h:55
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1478
ff_mpeg_draw_horiz_band
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo_dec.c:532
PICTURE_START_CODE
#define PICTURE_START_CODE
Definition: mpeg12.h:31
USER_START_CODE
#define USER_START_CODE
Definition: cavs.h:40
AVCodecContext::skip_bottom
int skip_bottom
Number of macroblock rows at the bottom which are skipped.
Definition: avcodec.h:913
AVCodecHWConfigInternal
Definition: hwconfig.h:29
ff_mpeg1_default_intra_matrix
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:31
ff_mpv_frame_start
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo_dec.c:271
MB_TYPE_INTERLACED
#define MB_TYPE_INTERLACED
Definition: mpegutils.h:51
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:138
ff_mpeg_flush
void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo_dec.c:540
height
#define height
Mpeg1Context::has_stereo3d
int has_stereo3d
Definition: mpeg12dec.c:69
mpeg_decode_init
static av_cold int mpeg_decode_init(AVCodecContext *avctx)
Definition: mpeg12dec.c:1049
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:117
HWACCEL_D3D11VA
#define HWACCEL_D3D11VA(codec)
Definition: hwconfig.h:79
mpegvideodata.h
attributes.h
ff_mpeg1_decode_block_intra
int ff_mpeg1_decode_block_intra(GetBitContext *gb, const uint16_t *quant_matrix, const uint8_t *scantable, int last_dc[3], int16_t *block, int index, int qscale)
Definition: mpeg12.c:238
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:266
skip_bits1
static void skip_bits1(GetBitContext *s)
Definition: get_bits.h:538
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:333
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:71
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:119
ff_combine_frame
int ff_combine_frame(ParseContext *pc, int next, const uint8_t **buf, int *buf_size)
Combine the (truncated) bitstream to a complete frame.
Definition: parser.c:199
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1477
ff_mpeg2_video_profiles
const AVProfile ff_mpeg2_video_profiles[]
Definition: profiles.c:100
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:187
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem.h:116
MB_TYPE_L0L1
#define MB_TYPE_L0L1
Definition: mpegutils.h:62
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:302
ff_init_scantable
av_cold void ff_init_scantable(const uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: mpegvideo.c:321
MpegEncContext::block_last_index
int block_last_index[12]
last non zero coefficient in block
Definition: mpegvideo.h:75
mpeg_decode_gop
static void mpeg_decode_gop(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:2403
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
MpegEncContext::chroma_inter_matrix
uint16_t chroma_inter_matrix[64]
Definition: mpegvideo.h:300
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
AV_CODEC_FLAG2_SHOW_ALL
#define AV_CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
Definition: avcodec.h:331
AVCodecContext::properties
unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1847
ff_alternate_vertical_scan
const uint8_t ff_alternate_vertical_scan[64]
Definition: mpegvideodata.c:63
btype2mb_type
static const uint32_t btype2mb_type[11]
Definition: mpeg12dec.c:100
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:499
AVHWAccel::decode_slice
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:2159
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:446
internal.h
ff_mpv_decode_init
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
Definition: mpegvideo_dec.c:42
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:76
AVCPBProperties::max_bitrate
int64_t max_bitrate
Maximum bitrate of the stream, in bits per second.
Definition: defs.h:131
IS_QUANT
#define IS_QUANT(a)
Definition: mpegutils.h:88
ff_mpeg12_init_vlcs
av_cold void ff_mpeg12_init_vlcs(void)
Definition: mpeg12.c:164
FF_DEBUG_STARTCODE
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:1335
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_d2q
AVRational av_d2q(double d, int max)
Convert a double precision floating point number to a rational.
Definition: rational.c:106
MB_PAT_VLC_BITS
#define MB_PAT_VLC_BITS
Definition: mpeg12vlc.h:38
mpeg1_decode_block_inter
static int mpeg1_decode_block_inter(MpegEncContext *s, int16_t *block, int n)
Definition: mpeg12dec.c:151
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:487
ptype2mb_type
static const uint32_t ptype2mb_type[7]
Definition: mpeg12dec.c:90
IPUContext::m
MpegEncContext m
Definition: mpeg12dec.c:2958
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:211
MpegEncContext::intra_vlc_format
int intra_vlc_format
Definition: mpegvideo.h:450
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:989
MAX_INDEX
#define MAX_INDEX
Definition: mpeg12dec.c:141
AVCodecContext::height
int height
Definition: avcodec.h:571
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:608
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:644
HWACCEL_VIDEOTOOLBOX
#define HWACCEL_VIDEOTOOLBOX(codec)
Definition: hwconfig.h:77
Mpeg1Context::stereo3d
AVStereo3D stereo3d
Definition: mpeg12dec.c:68
idctdsp.h
avcodec.h
av_cmp_q
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
GET_RL_VLC
#define GET_RL_VLC(level, run, name, gb, table, bits, max_depth, need_update)
Definition: get_bits.h:728
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ff_mpeg12_frame_rate_tab
const AVRational ff_mpeg12_frame_rate_tab[]
Definition: mpeg12framerate.c:24
ret
ret
Definition: filter_design.txt:187
AV_EF_AGGRESSIVE
#define AV_EF_AGGRESSIVE
consider things that a sane encoder/muxer should not do as an error
Definition: defs.h:56
pred
static const float pred[4]
Definition: siprdata.h:259
AV_FRAME_DATA_GOP_TIMECODE
@ AV_FRAME_DATA_GOP_TIMECODE
The GOP timecode in 25 bit timecode format.
Definition: frame.h:125
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ff_mpeg1_default_non_intra_matrix
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:42
AVCPBProperties::buffer_size
int64_t buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
Definition: defs.h:147
mpeg1_fast_decode_block_inter
static int mpeg1_fast_decode_block_inter(MpegEncContext *s, int16_t *block, int n)
Changing this would eat up any speed benefits it has.
Definition: mpeg12dec.c:239
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:683
TEX_VLC_BITS
#define TEX_VLC_BITS
Definition: dvdec.c:146
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
mpeg_get_pixelformat
static enum AVPixelFormat mpeg_get_pixelformat(AVCodecContext *avctx)
Definition: mpeg12dec.c:1152
AV_CODEC_FLAG2_CHUNKS
#define AV_CODEC_FLAG2_CHUNKS
Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries.
Definition: avcodec.h:322
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
mpeg12data.h
mpeg2_fast_decode_block_non_intra
static int mpeg2_fast_decode_block_non_intra(MpegEncContext *s, int16_t *block, int n)
Changing this would eat up any speed benefits it has.
Definition: mpeg12dec.c:412
mpeg_field_start
static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:1572
ff_mpeg_update_thread_context
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo_dec.c:58
skip_1stop_8data_bits
static int skip_1stop_8data_bits(GetBitContext *gb)
Definition: get_bits.h:844
AVCodecContext
main external API structure.
Definition: avcodec.h:398
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1485
av_timecode_make_mpeg_tc_string
char * av_timecode_make_mpeg_tc_string(char *buf, uint32_t tc25bit)
Get the timecode string from the 25-bit timecode format (MPEG GOP format).
Definition: timecode.c:167
MpegEncContext::intra_dc_precision
int intra_dc_precision
Definition: mpegvideo.h:444
AVCodecContext::execute
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:1517
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:211
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
mpeg12dec.h
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:683
AVRational::den
int den
Denominator.
Definition: rational.h:60
error_resilience.h
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1550
AVFrame::metadata
AVDictionary * metadata
metadata.
Definition: frame.h:625
ff_mb_btype_vlc
VLC ff_mb_btype_vlc
Definition: mpeg12.c:125
sign_extend
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:133
ff_thread_get_format
FF_DISABLE_DEPRECATION_WARNINGS enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Wrapper around get_format() for frame-multithreaded codecs.
Definition: pthread_frame.c:1042
ff_mpv_frame_end
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo_dec.c:487
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
Mpeg1Context::slice_count
int slice_count
Definition: mpeg12dec.c:73
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:82
VLC::table
VLCElem * table
Definition: vlc.h:33
FF_CODEC_PROPERTY_CLOSED_CAPTIONS
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
Definition: avcodec.h:1849
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
av_buffer_realloc
int av_buffer_realloc(AVBufferRef **pbuf, size_t size)
Reallocate a given buffer.
Definition: buffer.c:183
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1327
AVHWAccel::start_frame
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: avcodec.h:2132
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:586
get_dmv
static int get_dmv(MpegEncContext *s)
Definition: mpeg12dec.c:645
tc
#define tc
Definition: regdef.h:69
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
mpeg_decode_end
static av_cold int mpeg_decode_end(AVCodecContext *avctx)
Definition: mpeg12dec.c:2851
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
MpegEncContext::inter_scantable
ScanTable inter_scantable
if inter == intra then intra should be used to reduce the cache usage
Definition: mpegvideo.h:79
IDCTDSPContext::idct_permutation
uint8_t idct_permutation[64]
IDCT input permutation.
Definition: idctdsp.h:87
ff_ipu_decoder
const FFCodec ff_ipu_decoder
Definition: mpeg12dec.c:3101
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:34
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:231
ER_MV_END
#define ER_MV_END
Definition: error_resilience.h:36
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:139
ff_mv_vlc
VLC ff_mv_vlc
Definition: mpeg12.c:118
MpegEncContext::q_scale_type
int q_scale_type
Definition: mpegvideo.h:448
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:423
Mpeg1Context::mpeg_enc_ctx
MpegEncContext mpeg_enc_ctx
Definition: mpeg12dec.c:64
ff_tlog
#define ff_tlog(ctx,...)
Definition: internal.h:162
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:259
AVPacket
This structure stores compressed data.
Definition: packet.h:351
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:425
ScanTable::permutated
uint8_t permutated[64]
Definition: mpegvideo.h:63
ff_er_frame_end
void ff_er_frame_end(ERContext *s)
Definition: error_resilience.c:892
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:86
mpeg_get_qscale
static int mpeg_get_qscale(MpegEncContext *s)
Definition: mpegvideodec.h:64
mpeg_decode_sequence_extension
static void mpeg_decode_sequence_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1364
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:73
mpeg_er.h
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:571
int32_t
int32_t
Definition: audioconvert.c:56
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AV_CODEC_CAP_DRAW_HORIZ_BAND
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: codec.h:44
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
Mpeg1Context::frame_rate_ext
AVRational frame_rate_ext
Definition: mpeg12dec.c:78
mpeg_decode_motion
static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred)
Definition: mpeg12dec.c:115
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
IPUContext::block
int16_t block[6][64]
Definition: mpeg12dec.c:2961
mpeg_decode_user_data
static void mpeg_decode_user_data(AVCodecContext *avctx, const uint8_t *p, int buf_size)
Definition: mpeg12dec.c:2333
h
h
Definition: vp9dsp_template.c:2038
MpegEncContext::end_mb_y
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:146
ER_AC_END
#define ER_AC_END
Definition: error_resilience.h:34
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:173
av_image_check_sar
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
Definition: imgutils.c:323
MV_VLC_BITS
#define MV_VLC_BITS
Definition: mpeg12vlc.h:34
Mpeg1Context::timecode_frame_start
int64_t timecode_frame_start
Definition: mpeg12dec.c:85
MpegEncContext::start_mb_y
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:145
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:72
MpegEncContext::alternate_scan
int alternate_scan
Definition: mpegvideo.h:451
DECODE_SLICE_OK
#define DECODE_SLICE_OK
Definition: mpeg12dec.c:1676
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
DECODE_SLICE_ERROR
#define DECODE_SLICE_ERROR
Definition: mpeg12dec.c:1675
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:70
load_matrix
static int load_matrix(MpegEncContext *s, uint16_t matrix0[64], uint16_t matrix1[64], int intra)
Definition: mpeg12dec.c:1472
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:768
decode_dc
static int decode_dc(GetBitContext *gb, int component)
Definition: mpeg12dec.h:28
Mpeg1Context::afd
uint8_t afd
Definition: mpeg12dec.c:71
Mpeg1Context
Definition: mpeg12dec.c:63
MpegEncContext::chroma_intra_matrix
uint16_t chroma_intra_matrix[64]
Definition: mpegvideo.h:298
mpeg_decode_picture_coding_extension
static int mpeg_decode_picture_coding_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1509
Mpeg1Context::extradata_decoded
int extradata_decoded
Definition: mpeg12dec.c:84
mpeg2_decode_block_non_intra
static int mpeg2_decode_block_non_intra(MpegEncContext *s, int16_t *block, int n)
Definition: mpeg12dec.c:323
MB_TYPE_INTRA
#define MB_TYPE_INTRA
Definition: mpegutils.h:66
MBINCR_VLC_BITS
#define MBINCR_VLC_BITS
Definition: mpeg12vlc.h:37
mpeg_decode_slice
static int mpeg_decode_slice(MpegEncContext *s, int mb_y, const uint8_t **buf, int buf_size)
Decode a slice.
Definition: mpeg12dec.c:1684
re
float re
Definition: fft.c:79
rl_vlc
static VLC rl_vlc[2]
Definition: mobiclip.c:277