FFmpeg
mpegvideo.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29 
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 
35 #include "avcodec.h"
36 #include "blockdsp.h"
37 #include "idctdsp.h"
38 #include "mathops.h"
39 #include "mpeg_er.h"
40 #include "mpegutils.h"
41 #include "mpegvideo.h"
42 #include "mpegvideodata.h"
43 
45  int16_t *block, int n, int qscale)
46 {
47  int i, level, nCoeffs;
48  const uint16_t *quant_matrix;
49 
50  nCoeffs= s->block_last_index[n];
51 
52  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
53  /* XXX: only MPEG-1 */
54  quant_matrix = s->intra_matrix;
55  for(i=1;i<=nCoeffs;i++) {
56  int j= s->intra_scantable.permutated[i];
57  level = block[j];
58  if (level) {
59  if (level < 0) {
60  level = -level;
61  level = (int)(level * qscale * quant_matrix[j]) >> 3;
62  level = (level - 1) | 1;
63  level = -level;
64  } else {
65  level = (int)(level * qscale * quant_matrix[j]) >> 3;
66  level = (level - 1) | 1;
67  }
68  block[j] = level;
69  }
70  }
71 }
72 
74  int16_t *block, int n, int qscale)
75 {
76  int i, level, nCoeffs;
77  const uint16_t *quant_matrix;
78 
79  nCoeffs= s->block_last_index[n];
80 
81  quant_matrix = s->inter_matrix;
82  for(i=0; i<=nCoeffs; i++) {
83  int j= s->intra_scantable.permutated[i];
84  level = block[j];
85  if (level) {
86  if (level < 0) {
87  level = -level;
88  level = (((level << 1) + 1) * qscale *
89  ((int) (quant_matrix[j]))) >> 4;
90  level = (level - 1) | 1;
91  level = -level;
92  } else {
93  level = (((level << 1) + 1) * qscale *
94  ((int) (quant_matrix[j]))) >> 4;
95  level = (level - 1) | 1;
96  }
97  block[j] = level;
98  }
99  }
100 }
101 
103  int16_t *block, int n, int qscale)
104 {
105  int i, level, nCoeffs;
106  const uint16_t *quant_matrix;
107 
108  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
109  else qscale <<= 1;
110 
111  if(s->alternate_scan) nCoeffs= 63;
112  else nCoeffs= s->block_last_index[n];
113 
114  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
115  quant_matrix = s->intra_matrix;
116  for(i=1;i<=nCoeffs;i++) {
117  int j= s->intra_scantable.permutated[i];
118  level = block[j];
119  if (level) {
120  if (level < 0) {
121  level = -level;
122  level = (int)(level * qscale * quant_matrix[j]) >> 4;
123  level = -level;
124  } else {
125  level = (int)(level * qscale * quant_matrix[j]) >> 4;
126  }
127  block[j] = level;
128  }
129  }
130 }
131 
133  int16_t *block, int n, int qscale)
134 {
135  int i, level, nCoeffs;
136  const uint16_t *quant_matrix;
137  int sum=-1;
138 
139  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
140  else qscale <<= 1;
141 
142  if(s->alternate_scan) nCoeffs= 63;
143  else nCoeffs= s->block_last_index[n];
144 
145  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
146  sum += block[0];
147  quant_matrix = s->intra_matrix;
148  for(i=1;i<=nCoeffs;i++) {
149  int j= s->intra_scantable.permutated[i];
150  level = block[j];
151  if (level) {
152  if (level < 0) {
153  level = -level;
154  level = (int)(level * qscale * quant_matrix[j]) >> 4;
155  level = -level;
156  } else {
157  level = (int)(level * qscale * quant_matrix[j]) >> 4;
158  }
159  block[j] = level;
160  sum+=level;
161  }
162  }
163  block[63]^=sum&1;
164 }
165 
167  int16_t *block, int n, int qscale)
168 {
169  int i, level, nCoeffs;
170  const uint16_t *quant_matrix;
171  int sum=-1;
172 
173  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
174  else qscale <<= 1;
175 
176  if(s->alternate_scan) nCoeffs= 63;
177  else nCoeffs= s->block_last_index[n];
178 
179  quant_matrix = s->inter_matrix;
180  for(i=0; i<=nCoeffs; i++) {
181  int j= s->intra_scantable.permutated[i];
182  level = block[j];
183  if (level) {
184  if (level < 0) {
185  level = -level;
186  level = (((level << 1) + 1) * qscale *
187  ((int) (quant_matrix[j]))) >> 5;
188  level = -level;
189  } else {
190  level = (((level << 1) + 1) * qscale *
191  ((int) (quant_matrix[j]))) >> 5;
192  }
193  block[j] = level;
194  sum+=level;
195  }
196  }
197  block[63]^=sum&1;
198 }
199 
201  int16_t *block, int n, int qscale)
202 {
203  int i, level, qmul, qadd;
204  int nCoeffs;
205 
206  av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
207 
208  qmul = qscale << 1;
209 
210  if (!s->h263_aic) {
211  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
212  qadd = (qscale - 1) | 1;
213  }else{
214  qadd = 0;
215  }
216  if(s->ac_pred)
217  nCoeffs=63;
218  else
219  nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ];
220 
221  for(i=1; i<=nCoeffs; i++) {
222  level = block[i];
223  if (level) {
224  if (level < 0) {
225  level = level * qmul - qadd;
226  } else {
227  level = level * qmul + qadd;
228  }
229  block[i] = level;
230  }
231  }
232 }
233 
235  int16_t *block, int n, int qscale)
236 {
237  int i, level, qmul, qadd;
238  int nCoeffs;
239 
240  av_assert2(s->block_last_index[n]>=0);
241 
242  qadd = (qscale - 1) | 1;
243  qmul = qscale << 1;
244 
245  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
246 
247  for(i=0; i<=nCoeffs; i++) {
248  level = block[i];
249  if (level) {
250  if (level < 0) {
251  level = level * qmul - qadd;
252  } else {
253  level = level * qmul + qadd;
254  }
255  block[i] = level;
256  }
257  }
258 }
259 
260 
261 static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
262 {
263  while(h--)
264  memset(dst + h*linesize, 128, 16);
265 }
266 
267 static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
268 {
269  while(h--)
270  memset(dst + h*linesize, 128, 8);
271 }
272 
273 /* init common dct for both encoder and decoder */
275 {
276  ff_blockdsp_init(&s->bdsp);
277  ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
278  ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
279 
280  if (s->avctx->debug & FF_DEBUG_NOMC) {
281  int i;
282  for (i=0; i<4; i++) {
283  s->hdsp.avg_pixels_tab[0][i] = gray16;
284  s->hdsp.put_pixels_tab[0][i] = gray16;
285  s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
286 
287  s->hdsp.avg_pixels_tab[1][i] = gray8;
288  s->hdsp.put_pixels_tab[1][i] = gray8;
289  s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
290  }
291  }
292 
293  s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
294  s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
295  s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
296  s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
297  s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
298  if (s->avctx->flags & AV_CODEC_FLAG_BITEXACT)
299  s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
300  s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
301 
302 #if HAVE_INTRINSICS_NEON
304 #endif
305 
306 #if ARCH_ALPHA
308 #elif ARCH_ARM
310 #elif ARCH_PPC
312 #elif ARCH_X86
314 #elif ARCH_MIPS
316 #endif
317 
318  return 0;
319 }
320 
321 av_cold void ff_init_scantable(const uint8_t *permutation, ScanTable *st,
322  const uint8_t *src_scantable)
323 {
324  int end;
325 
326  st->scantable = src_scantable;
327 
328  for (int i = 0; i < 64; i++) {
329  int j = src_scantable[i];
330  st->permutated[i] = permutation[j];
331  }
332 
333  end = -1;
334  for (int i = 0; i < 64; i++) {
335  int j = st->permutated[i];
336  if (j > end)
337  end = j;
338  st->raster_end[i] = end;
339  }
340 }
341 
343 {
344  if (s->codec_id == AV_CODEC_ID_MPEG4)
345  s->idsp.mpeg4_studio_profile = s->studio_profile;
346  ff_idctdsp_init(&s->idsp, s->avctx);
347 
348  /* load & permutate scantables
349  * note: only wmv uses different ones
350  */
351  if (s->alternate_scan) {
352  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
353  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
354  } else {
355  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
356  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
357  }
358  ff_permute_scantable(s->permutated_intra_h_scantable, ff_alternate_horizontal_scan,
359  s->idsp.idct_permutation);
360  ff_permute_scantable(s->permutated_intra_v_scantable, ff_alternate_vertical_scan,
361  s->idsp.idct_permutation);
362 }
363 
365 {
366  int y_size = s->b8_stride * (2 * s->mb_height + 1);
367  int c_size = s->mb_stride * (s->mb_height + 1);
368  int yc_size = y_size + 2 * c_size;
369  int i;
370 
371  if (s->mb_height & 1)
372  yc_size += 2*s->b8_stride + 2*s->mb_stride;
373 
374  if (s->encoding) {
375  s->me.map = av_mallocz(2 * ME_MAP_SIZE * sizeof(*s->me.map));
376  if (!s->me.map)
377  return AVERROR(ENOMEM);
378  s->me.score_map = s->me.map + ME_MAP_SIZE;
379 
380  if (s->noise_reduction) {
381  if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_error_sum, 2))
382  return AVERROR(ENOMEM);
383  }
384  }
385  if (!FF_ALLOCZ_TYPED_ARRAY(s->blocks, 1 + s->encoding))
386  return AVERROR(ENOMEM);
387  s->block = s->blocks[0];
388 
389  for (i = 0; i < 12; i++) {
390  s->pblocks[i] = &s->block[i];
391  }
392 
393  if (s->avctx->codec_tag == AV_RL32("VCR2")) {
394  // exchange uv
395  FFSWAP(void *, s->pblocks[4], s->pblocks[5]);
396  }
397 
398  if (s->out_format == FMT_H263) {
399  /* ac values */
400  if (!FF_ALLOCZ_TYPED_ARRAY(s->ac_val_base, yc_size))
401  return AVERROR(ENOMEM);
402  s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
403  s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
404  s->ac_val[2] = s->ac_val[1] + c_size;
405  }
406 
407  return 0;
408 }
409 
411 {
412  int nb_slices = s->slice_context_count, ret;
413 
414  /* We initialize the copies before the original so that
415  * fields allocated in init_duplicate_context are NULL after
416  * copying. This prevents double-frees upon allocation error. */
417  for (int i = 1; i < nb_slices; i++) {
418  s->thread_context[i] = av_memdup(s, sizeof(MpegEncContext));
419  if (!s->thread_context[i])
420  return AVERROR(ENOMEM);
421  if ((ret = init_duplicate_context(s->thread_context[i])) < 0)
422  return ret;
423  s->thread_context[i]->start_mb_y =
424  (s->mb_height * (i ) + nb_slices / 2) / nb_slices;
425  s->thread_context[i]->end_mb_y =
426  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
427  }
428  s->start_mb_y = 0;
429  s->end_mb_y = nb_slices > 1 ? (s->mb_height + nb_slices / 2) / nb_slices
430  : s->mb_height;
431  return init_duplicate_context(s);
432 }
433 
435 {
436  if (!s)
437  return;
438 
439  av_freep(&s->sc.edge_emu_buffer);
440  av_freep(&s->me.scratchpad);
441  s->me.temp =
442  s->sc.rd_scratchpad =
443  s->sc.b_scratchpad =
444  s->sc.obmc_scratchpad = NULL;
445 
446  av_freep(&s->dct_error_sum);
447  av_freep(&s->me.map);
448  s->me.score_map = NULL;
449  av_freep(&s->blocks);
450  av_freep(&s->ac_val_base);
451  s->block = NULL;
452 }
453 
455 {
456  for (int i = 1; i < s->slice_context_count; i++) {
457  free_duplicate_context(s->thread_context[i]);
458  av_freep(&s->thread_context[i]);
459  }
461 }
462 
464 {
465 #define COPY(a) bak->a = src->a
466  COPY(sc.edge_emu_buffer);
467  COPY(me.scratchpad);
468  COPY(me.temp);
469  COPY(sc.rd_scratchpad);
470  COPY(sc.b_scratchpad);
471  COPY(sc.obmc_scratchpad);
472  COPY(me.map);
473  COPY(me.score_map);
474  COPY(blocks);
475  COPY(block);
476  COPY(start_mb_y);
477  COPY(end_mb_y);
478  COPY(me.map_generation);
479  COPY(pb);
480  COPY(dct_error_sum);
481  COPY(dct_count[0]);
482  COPY(dct_count[1]);
483  COPY(ac_val_base);
484  COPY(ac_val[0]);
485  COPY(ac_val[1]);
486  COPY(ac_val[2]);
487 #undef COPY
488 }
489 
491 {
492  MpegEncContext bak;
493  int i, ret;
494  // FIXME copy only needed parts
495  backup_duplicate_context(&bak, dst);
496  memcpy(dst, src, sizeof(MpegEncContext));
497  backup_duplicate_context(dst, &bak);
498  for (i = 0; i < 12; i++) {
499  dst->pblocks[i] = &dst->block[i];
500  }
501  if (dst->avctx->codec_tag == AV_RL32("VCR2")) {
502  // exchange uv
503  FFSWAP(void *, dst->pblocks[4], dst->pblocks[5]);
504  }
505  if (!dst->sc.edge_emu_buffer &&
506  (ret = ff_mpeg_framesize_alloc(dst->avctx, &dst->me,
507  &dst->sc, dst->linesize)) < 0) {
508  av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
509  "scratch buffers.\n");
510  return ret;
511  }
512  return 0;
513 }
514 
515 /**
516  * Set the given MpegEncContext to common defaults
517  * (same for encoding and decoding).
518  * The changed fields will not depend upon the
519  * prior state of the MpegEncContext.
520  */
522 {
523  s->y_dc_scale_table =
524  s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
525  s->chroma_qscale_table = ff_default_chroma_qscale_table;
526  s->progressive_frame = 1;
527  s->progressive_sequence = 1;
528  s->picture_structure = PICT_FRAME;
529 
530  s->coded_picture_number = 0;
531  s->picture_number = 0;
532 
533  s->f_code = 1;
534  s->b_code = 1;
535 
536  s->slice_context_count = 1;
537 }
538 
540 {
541  int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
542 
543  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
544  s->mb_height = (s->height + 31) / 32 * 2;
545  else
546  s->mb_height = (s->height + 15) / 16;
547 
548  s->mb_width = (s->width + 15) / 16;
549  s->mb_stride = s->mb_width + 1;
550  s->b8_stride = s->mb_width * 2 + 1;
551  mb_array_size = s->mb_height * s->mb_stride;
552  mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
553 
554  /* set default edge pos, will be overridden
555  * in decode_header if needed */
556  s->h_edge_pos = s->mb_width * 16;
557  s->v_edge_pos = s->mb_height * 16;
558 
559  s->mb_num = s->mb_width * s->mb_height;
560 
561  s->block_wrap[0] =
562  s->block_wrap[1] =
563  s->block_wrap[2] =
564  s->block_wrap[3] = s->b8_stride;
565  s->block_wrap[4] =
566  s->block_wrap[5] = s->mb_stride;
567 
568  y_size = s->b8_stride * (2 * s->mb_height + 1);
569  c_size = s->mb_stride * (s->mb_height + 1);
570  yc_size = y_size + 2 * c_size;
571 
572  if (s->mb_height & 1)
573  yc_size += 2*s->b8_stride + 2*s->mb_stride;
574 
575  if (!FF_ALLOCZ_TYPED_ARRAY(s->mb_index2xy, s->mb_num + 1))
576  return AVERROR(ENOMEM);
577  for (y = 0; y < s->mb_height; y++)
578  for (x = 0; x < s->mb_width; x++)
579  s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
580 
581  s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
582 
583  if (s->codec_id == AV_CODEC_ID_MPEG4 ||
584  (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
585  /* interlaced direct mode decoding tables */
586  int16_t (*tmp)[2] = av_calloc(mv_table_size, 4 * sizeof(*tmp));
587  if (!tmp)
588  return AVERROR(ENOMEM);
589  s->p_field_mv_table_base = tmp;
590  tmp += s->mb_stride + 1;
591  for (int i = 0; i < 2; i++) {
592  for (int j = 0; j < 2; j++) {
593  s->p_field_mv_table[i][j] = tmp;
594  tmp += mv_table_size;
595  }
596  }
597  }
598 
599  if (s->out_format == FMT_H263) {
600  /* cbp values, cbp, ac_pred, pred_dir */
601  if (!(s->coded_block_base = av_mallocz(y_size + (s->mb_height&1)*2*s->b8_stride)) ||
602  !(s->cbp_table = av_mallocz(mb_array_size)) ||
603  !(s->pred_dir_table = av_mallocz(mb_array_size)))
604  return AVERROR(ENOMEM);
605  s->coded_block = s->coded_block_base + s->b8_stride + 1;
606  }
607 
608  if (s->h263_pred || s->h263_plus || !s->encoding) {
609  /* dc values */
610  // MN: we need these for error resilience of intra-frames
611  if (!FF_ALLOCZ_TYPED_ARRAY(s->dc_val_base, yc_size))
612  return AVERROR(ENOMEM);
613  s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
614  s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
615  s->dc_val[2] = s->dc_val[1] + c_size;
616  for (i = 0; i < yc_size; i++)
617  s->dc_val_base[i] = 1024;
618  }
619 
620  // Note the + 1 is for a quicker MPEG-4 slice_end detection
621  if (!(s->mbskip_table = av_mallocz(mb_array_size + 2)) ||
622  /* which mb is an intra block, init macroblock skip table */
623  !(s->mbintra_table = av_malloc(mb_array_size)))
624  return AVERROR(ENOMEM);
625  memset(s->mbintra_table, 1, mb_array_size);
626 
627  return !CONFIG_MPEGVIDEODEC || s->encoding ? 0 : ff_mpeg_er_init(s);
628 }
629 
631 {
632  memset(&s->next_picture, 0, sizeof(s->next_picture));
633  memset(&s->last_picture, 0, sizeof(s->last_picture));
634  memset(&s->current_picture, 0, sizeof(s->current_picture));
635 
636  memset(s->thread_context, 0, sizeof(s->thread_context));
637 
638  s->me.map = NULL;
639  s->me.score_map = NULL;
640  s->dct_error_sum = NULL;
641  s->block = NULL;
642  s->blocks = NULL;
643  memset(s->pblocks, 0, sizeof(s->pblocks));
644  s->ac_val_base = NULL;
645  s->ac_val[0] =
646  s->ac_val[1] =
647  s->ac_val[2] =NULL;
648  s->sc.edge_emu_buffer = NULL;
649  s->me.scratchpad = NULL;
650  s->me.temp =
651  s->sc.rd_scratchpad =
652  s->sc.b_scratchpad =
653  s->sc.obmc_scratchpad = NULL;
654 
655 
656  s->bitstream_buffer = NULL;
657  s->allocated_bitstream_buffer_size = 0;
658  s->picture = NULL;
659  s->p_field_mv_table_base = NULL;
660  for (int i = 0; i < 2; i++)
661  for (int j = 0; j < 2; j++)
662  s->p_field_mv_table[i][j] = NULL;
663 
664  s->dc_val_base = NULL;
665  s->coded_block_base = NULL;
666  s->mbintra_table = NULL;
667  s->cbp_table = NULL;
668  s->pred_dir_table = NULL;
669 
670  s->mbskip_table = NULL;
671 
672  s->er.error_status_table = NULL;
673  s->er.er_temp_buffer = NULL;
674  s->mb_index2xy = NULL;
675 }
676 
677 /**
678  * init common structure for both encoder and decoder.
679  * this assumes that some variables like width/height are already set
680  */
682 {
683  int i, ret;
684  int nb_slices = (HAVE_THREADS &&
685  s->avctx->active_thread_type & FF_THREAD_SLICE) ?
686  s->avctx->thread_count : 1;
687 
688  clear_context(s);
689 
690  if (s->encoding && s->avctx->slices)
691  nb_slices = s->avctx->slices;
692 
693  if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
694  av_log(s->avctx, AV_LOG_ERROR,
695  "decoding to AV_PIX_FMT_NONE is not supported.\n");
696  return AVERROR(EINVAL);
697  }
698 
699  if ((s->width || s->height) &&
700  av_image_check_size(s->width, s->height, 0, s->avctx))
701  return AVERROR(EINVAL);
702 
703  dct_init(s);
704 
705  /* set chroma shifts */
706  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
707  &s->chroma_x_shift,
708  &s->chroma_y_shift);
709  if (ret)
710  return ret;
711 
712  if (!FF_ALLOCZ_TYPED_ARRAY(s->picture, MAX_PICTURE_COUNT))
713  return AVERROR(ENOMEM);
714  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
715  s->picture[i].f = av_frame_alloc();
716  if (!s->picture[i].f)
717  goto fail_nomem;
718  }
719 
720  if (!(s->next_picture.f = av_frame_alloc()) ||
721  !(s->last_picture.f = av_frame_alloc()) ||
722  !(s->current_picture.f = av_frame_alloc()))
723  goto fail_nomem;
724 
726  goto fail;
727 
728  if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
729  int max_slices;
730  if (s->mb_height)
731  max_slices = FFMIN(MAX_THREADS, s->mb_height);
732  else
733  max_slices = MAX_THREADS;
734  av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
735  " reducing to %d\n", nb_slices, max_slices);
736  nb_slices = max_slices;
737  }
738 
739  s->context_initialized = 1;
740  memset(s->thread_context, 0, sizeof(s->thread_context));
741  s->thread_context[0] = s;
742  s->slice_context_count = nb_slices;
743 
744 // if (s->width && s->height) {
746  if (ret < 0)
747  goto fail;
748 // }
749 
750  return 0;
751  fail_nomem:
752  ret = AVERROR(ENOMEM);
753  fail:
755  return ret;
756 }
757 
759 {
761 
762  av_freep(&s->p_field_mv_table_base);
763  for (int i = 0; i < 2; i++)
764  for (int j = 0; j < 2; j++)
765  s->p_field_mv_table[i][j] = NULL;
766 
767  av_freep(&s->dc_val_base);
768  av_freep(&s->coded_block_base);
769  av_freep(&s->mbintra_table);
770  av_freep(&s->cbp_table);
771  av_freep(&s->pred_dir_table);
772 
773  av_freep(&s->mbskip_table);
774 
775  av_freep(&s->er.error_status_table);
776  av_freep(&s->er.er_temp_buffer);
777  av_freep(&s->mb_index2xy);
778 
779  s->linesize = s->uvlinesize = 0;
780 }
781 
783 {
785  if (s->slice_context_count > 1)
786  s->slice_context_count = 1;
787 
788  av_freep(&s->bitstream_buffer);
789  s->allocated_bitstream_buffer_size = 0;
790 
791  if (!s->avctx)
792  return;
793 
794  if (s->picture) {
795  for (int i = 0; i < MAX_PICTURE_COUNT; i++)
796  ff_mpv_picture_free(&s->picture[i]);
797  }
798  av_freep(&s->picture);
799  ff_mpv_picture_free(&s->last_picture);
800  ff_mpv_picture_free(&s->current_picture);
801  ff_mpv_picture_free(&s->next_picture);
802 
803  s->context_initialized = 0;
804  s->context_reinit = 0;
805  s->last_picture_ptr =
806  s->next_picture_ptr =
807  s->current_picture_ptr = NULL;
808  s->linesize = s->uvlinesize = 0;
809 }
810 
811 
812 /**
813  * Clean dc, ac, coded_block for the current non-intra MB.
814  */
816 {
817  int wrap = s->b8_stride;
818  int xy = s->block_index[0];
819 
820  s->dc_val[0][xy ] =
821  s->dc_val[0][xy + 1 ] =
822  s->dc_val[0][xy + wrap] =
823  s->dc_val[0][xy + 1 + wrap] = 1024;
824  /* ac pred */
825  memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
826  memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
827  if (s->msmpeg4_version>=3) {
828  s->coded_block[xy ] =
829  s->coded_block[xy + 1 ] =
830  s->coded_block[xy + wrap] =
831  s->coded_block[xy + 1 + wrap] = 0;
832  }
833  /* chroma */
834  wrap = s->mb_stride;
835  xy = s->mb_x + s->mb_y * wrap;
836  s->dc_val[1][xy] =
837  s->dc_val[2][xy] = 1024;
838  /* ac pred */
839  memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
840  memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
841 
842  s->mbintra_table[xy]= 0;
843 }
844 
845 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
846  const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
847  const int uvlinesize = s->current_picture.f->linesize[1];
848  const int width_of_mb = (4 + (s->avctx->bits_per_raw_sample > 8)) - s->avctx->lowres;
849  const int height_of_mb = 4 - s->avctx->lowres;
850 
851  s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
852  s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
853  s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
854  s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
855  s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
856  s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
857  //block_index is not used by mpeg2, so it is not affected by chroma_format
858 
859  s->dest[0] = s->current_picture.f->data[0] + (int)((s->mb_x - 1U) << width_of_mb);
860  s->dest[1] = s->current_picture.f->data[1] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
861  s->dest[2] = s->current_picture.f->data[2] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
862 
863  if (s->picture_structure == PICT_FRAME) {
864  s->dest[0] += s->mb_y * linesize << height_of_mb;
865  s->dest[1] += s->mb_y * uvlinesize << (height_of_mb - s->chroma_y_shift);
866  s->dest[2] += s->mb_y * uvlinesize << (height_of_mb - s->chroma_y_shift);
867  } else {
868  s->dest[0] += (s->mb_y>>1) * linesize << height_of_mb;
869  s->dest[1] += (s->mb_y>>1) * uvlinesize << (height_of_mb - s->chroma_y_shift);
870  s->dest[2] += (s->mb_y>>1) * uvlinesize << (height_of_mb - s->chroma_y_shift);
871  av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
872  }
873 }
874 
875 /**
876  * set qscale and update qscale dependent variables.
877  */
878 void ff_set_qscale(MpegEncContext * s, int qscale)
879 {
880  if (qscale < 1)
881  qscale = 1;
882  else if (qscale > 31)
883  qscale = 31;
884 
885  s->qscale = qscale;
886  s->chroma_qscale= s->chroma_qscale_table[qscale];
887 
888  s->y_dc_scale= s->y_dc_scale_table[ qscale ];
889  s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
890 }
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:88
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:38
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:681
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
free_duplicate_contexts
static void free_duplicate_contexts(MpegEncContext *s)
Definition: mpegvideo.c:454
level
uint8_t level
Definition: svq3.c:204
ff_mpeg_framesize_alloc
int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me, ScratchpadContext *sc, int linesize)
Definition: mpegpicture.c:87
blockdsp.h
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
ff_mpv_init_context_frame
int ff_mpv_init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
Definition: mpegvideo.c:539
backup_duplicate_context
static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
Definition: mpegvideo.c:463
ff_mpv_common_defaults
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:521
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:490
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
ff_mpeg2_non_linear_qscale
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
ff_clean_intra_table_entries
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
Definition: mpegvideo.c:815
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:37
init_duplicate_context
static int init_duplicate_context(MpegEncContext *s)
Definition: mpegvideo.c:364
ff_mpv_common_init_arm
av_cold void ff_mpv_common_init_arm(MpegEncContext *s)
Definition: mpegvideo_arm.c:44
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:845
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:85
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:347
mpegutils.h
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:228
free_duplicate_context
static void free_duplicate_context(MpegEncContext *s)
Definition: mpegvideo.c:434
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
av_memdup
void * av_memdup(const void *p, size_t size)
Duplicate a buffer with av_malloc().
Definition: mem.c:302
ff_permute_scantable
av_cold void ff_permute_scantable(uint8_t dst[64], const uint8_t src[64], const uint8_t permutation[64])
Definition: idctdsp.c:30
dct_unquantize_mpeg1_inter_c
static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:73
fail
#define fail()
Definition: checkasm.h:138
wrap
#define wrap(func)
Definition: neontest.h:65
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:123
MAX_THREADS
#define MAX_THREADS
Definition: frame_thread_encoder.c:34
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2992
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
MAX_PICTURE_COUNT
#define MAX_PICTURE_COUNT
Definition: mpegpicture.h:33
dct_unquantize_mpeg1_intra_c
static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:44
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:88
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:782
MpegEncContext::pblocks
int16_t(*[12] pblocks)[64]
Definition: mpegvideo.h:465
avassert.h
gray16
static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
Definition: mpegvideo.c:261
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c)
Definition: blockdsp.c:58
s
#define s(width, name)
Definition: cbs_vp9.c:198
ff_mpeg1_dc_scale_table
static const uint8_t *const ff_mpeg1_dc_scale_table
Definition: mpegvideodata.h:32
dct_unquantize_mpeg2_intra_bitexact
static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:132
ff_mpv_picture_free
void av_cold ff_mpv_picture_free(Picture *pic)
Definition: mpegpicture.c:392
ScanTable::scantable
const uint8_t * scantable
Definition: mpegvideo.h:59
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
NULL
#define NULL
Definition: coverity.c:32
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:342
me
#define me
Definition: vf_colormatrix.c:102
ff_set_qscale
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:878
mathops.h
ff_alternate_horizontal_scan
const uint8_t ff_alternate_horizontal_scan[64]
Definition: mpegvideodata.c:52
MpegEncContext::me
MotionEstContext me
Definition: mpegvideo.h:277
ME_MAP_SIZE
#define ME_MAP_SIZE
Definition: motion_est.h:38
dct_unquantize_mpeg2_intra_c
static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:102
ff_mpeg_er_init
int ff_mpeg_er_init(MpegEncContext *s)
Definition: mpeg_er.c:102
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1544
mpegvideodata.h
attributes.h
dct_unquantize_mpeg2_inter_c
static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:166
clear_context
static void clear_context(MpegEncContext *s)
Definition: mpegvideo.c:630
ff_init_scantable
av_cold void ff_init_scantable(const uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: mpegvideo.c:321
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
FMT_H263
@ FMT_H263
Definition: mpegutils.h:119
dct_unquantize_h263_inter_c
static void dct_unquantize_h263_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:234
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
ff_alternate_vertical_scan
const uint8_t ff_alternate_vertical_scan[64]
Definition: mpegvideodata.c:63
internal.h
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
ff_mpv_common_init_ppc
void ff_mpv_common_init_ppc(MpegEncContext *s)
Definition: mpegvideo_altivec.c:119
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
MpegEncContext::sc
ScratchpadContext sc
Definition: mpegvideo.h:192
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
FF_DEBUG_NOMC
#define FF_DEBUG_NOMC
Definition: avcodec.h:1403
idctdsp.h
avcodec.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ret
ret
Definition: filter_design.txt:187
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
dct_init
static av_cold int dct_init(MpegEncContext *s)
Definition: mpegvideo.c:274
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
U
#define U(x)
Definition: vpx_arith.h:37
ff_mpv_free_context_frame
void ff_mpv_free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution as well as the slice thread contex...
Definition: mpegvideo.c:758
MpegEncContext::block
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:467
ScratchpadContext::edge_emu_buffer
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegpicture.h:37
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
dct_unquantize_h263_intra_c
static void dct_unquantize_h263_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:200
ff_mpv_common_init_x86
av_cold void ff_mpv_common_init_x86(MpegEncContext *s)
Definition: mpegvideo.c:454
ff_mpv_common_init_axp
av_cold void ff_mpv_common_init_axp(MpegEncContext *s)
Definition: mpegvideo_alpha.c:106
ff_mpv_common_init_mips
av_cold void ff_mpv_common_init_mips(MpegEncContext *s)
Definition: mpegvideo_init_mips.c:26
ff_default_chroma_qscale_table
const uint8_t ff_default_chroma_qscale_table[32]
Definition: mpegvideodata.c:21
ff_mpv_init_duplicate_contexts
int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
Initialize an MpegEncContext's thread contexts.
Definition: mpegvideo.c:410
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:338
COPY
#define COPY(a)
ScanTable
Scantable.
Definition: mpegvideo.h:58
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:466
ScanTable::permutated
uint8_t permutated[64]
Definition: mpegvideo.h:60
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
mpeg_er.h
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:385
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
h
h
Definition: vp9dsp_template.c:2038
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
int
int
Definition: ffmpeg_filter.c:368
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
ff_mpv_common_init_neon
av_cold void ff_mpv_common_init_neon(MpegEncContext *s)
Definition: mpegvideo.c:127
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:67
gray8
static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
Definition: mpegvideo.c:267
ScanTable::raster_end
uint8_t raster_end[64]
Definition: mpegvideo.h:61