FFmpeg
mpegvideo.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29 
30 #include "config_components.h"
31 
32 #include "libavutil/attributes.h"
33 #include "libavutil/avassert.h"
34 #include "libavutil/imgutils.h"
35 #include "libavutil/internal.h"
36 
37 #include "avcodec.h"
38 #include "blockdsp.h"
39 #include "h264chroma.h"
40 #include "idctdsp.h"
41 #include "mathops.h"
42 #include "mpeg_er.h"
43 #include "mpegutils.h"
44 #include "mpegvideo.h"
45 #include "mpeg4videodec.h"
46 #include "mpegvideodata.h"
47 #include "qpeldsp.h"
48 #include "threadframe.h"
49 #include "wmv2dec.h"
50 #include <limits.h>
51 
53  int16_t *block, int n, int qscale)
54 {
55  int i, level, nCoeffs;
56  const uint16_t *quant_matrix;
57 
58  nCoeffs= s->block_last_index[n];
59 
60  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
61  /* XXX: only MPEG-1 */
62  quant_matrix = s->intra_matrix;
63  for(i=1;i<=nCoeffs;i++) {
64  int j= s->intra_scantable.permutated[i];
65  level = block[j];
66  if (level) {
67  if (level < 0) {
68  level = -level;
69  level = (int)(level * qscale * quant_matrix[j]) >> 3;
70  level = (level - 1) | 1;
71  level = -level;
72  } else {
73  level = (int)(level * qscale * quant_matrix[j]) >> 3;
74  level = (level - 1) | 1;
75  }
76  block[j] = level;
77  }
78  }
79 }
80 
82  int16_t *block, int n, int qscale)
83 {
84  int i, level, nCoeffs;
85  const uint16_t *quant_matrix;
86 
87  nCoeffs= s->block_last_index[n];
88 
89  quant_matrix = s->inter_matrix;
90  for(i=0; i<=nCoeffs; i++) {
91  int j= s->intra_scantable.permutated[i];
92  level = block[j];
93  if (level) {
94  if (level < 0) {
95  level = -level;
96  level = (((level << 1) + 1) * qscale *
97  ((int) (quant_matrix[j]))) >> 4;
98  level = (level - 1) | 1;
99  level = -level;
100  } else {
101  level = (((level << 1) + 1) * qscale *
102  ((int) (quant_matrix[j]))) >> 4;
103  level = (level - 1) | 1;
104  }
105  block[j] = level;
106  }
107  }
108 }
109 
111  int16_t *block, int n, int qscale)
112 {
113  int i, level, nCoeffs;
114  const uint16_t *quant_matrix;
115 
116  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
117  else qscale <<= 1;
118 
119  if(s->alternate_scan) nCoeffs= 63;
120  else nCoeffs= s->block_last_index[n];
121 
122  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
123  quant_matrix = s->intra_matrix;
124  for(i=1;i<=nCoeffs;i++) {
125  int j= s->intra_scantable.permutated[i];
126  level = block[j];
127  if (level) {
128  if (level < 0) {
129  level = -level;
130  level = (int)(level * qscale * quant_matrix[j]) >> 4;
131  level = -level;
132  } else {
133  level = (int)(level * qscale * quant_matrix[j]) >> 4;
134  }
135  block[j] = level;
136  }
137  }
138 }
139 
141  int16_t *block, int n, int qscale)
142 {
143  int i, level, nCoeffs;
144  const uint16_t *quant_matrix;
145  int sum=-1;
146 
147  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
148  else qscale <<= 1;
149 
150  if(s->alternate_scan) nCoeffs= 63;
151  else nCoeffs= s->block_last_index[n];
152 
153  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
154  sum += block[0];
155  quant_matrix = s->intra_matrix;
156  for(i=1;i<=nCoeffs;i++) {
157  int j= s->intra_scantable.permutated[i];
158  level = block[j];
159  if (level) {
160  if (level < 0) {
161  level = -level;
162  level = (int)(level * qscale * quant_matrix[j]) >> 4;
163  level = -level;
164  } else {
165  level = (int)(level * qscale * quant_matrix[j]) >> 4;
166  }
167  block[j] = level;
168  sum+=level;
169  }
170  }
171  block[63]^=sum&1;
172 }
173 
175  int16_t *block, int n, int qscale)
176 {
177  int i, level, nCoeffs;
178  const uint16_t *quant_matrix;
179  int sum=-1;
180 
181  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
182  else qscale <<= 1;
183 
184  if(s->alternate_scan) nCoeffs= 63;
185  else nCoeffs= s->block_last_index[n];
186 
187  quant_matrix = s->inter_matrix;
188  for(i=0; i<=nCoeffs; i++) {
189  int j= s->intra_scantable.permutated[i];
190  level = block[j];
191  if (level) {
192  if (level < 0) {
193  level = -level;
194  level = (((level << 1) + 1) * qscale *
195  ((int) (quant_matrix[j]))) >> 5;
196  level = -level;
197  } else {
198  level = (((level << 1) + 1) * qscale *
199  ((int) (quant_matrix[j]))) >> 5;
200  }
201  block[j] = level;
202  sum+=level;
203  }
204  }
205  block[63]^=sum&1;
206 }
207 
209  int16_t *block, int n, int qscale)
210 {
211  int i, level, qmul, qadd;
212  int nCoeffs;
213 
214  av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
215 
216  qmul = qscale << 1;
217 
218  if (!s->h263_aic) {
219  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
220  qadd = (qscale - 1) | 1;
221  }else{
222  qadd = 0;
223  }
224  if(s->ac_pred)
225  nCoeffs=63;
226  else
227  nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ];
228 
229  for(i=1; i<=nCoeffs; i++) {
230  level = block[i];
231  if (level) {
232  if (level < 0) {
233  level = level * qmul - qadd;
234  } else {
235  level = level * qmul + qadd;
236  }
237  block[i] = level;
238  }
239  }
240 }
241 
243  int16_t *block, int n, int qscale)
244 {
245  int i, level, qmul, qadd;
246  int nCoeffs;
247 
248  av_assert2(s->block_last_index[n]>=0);
249 
250  qadd = (qscale - 1) | 1;
251  qmul = qscale << 1;
252 
253  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
254 
255  for(i=0; i<=nCoeffs; i++) {
256  level = block[i];
257  if (level) {
258  if (level < 0) {
259  level = level * qmul - qadd;
260  } else {
261  level = level * qmul + qadd;
262  }
263  block[i] = level;
264  }
265  }
266 }
267 
268 
269 static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
270 {
271  while(h--)
272  memset(dst + h*linesize, 128, 16);
273 }
274 
275 static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
276 {
277  while(h--)
278  memset(dst + h*linesize, 128, 8);
279 }
280 
281 /* init common dct for both encoder and decoder */
283 {
284  ff_blockdsp_init(&s->bdsp, s->avctx);
285  ff_h264chroma_init(&s->h264chroma, 8); //for lowres
286  ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
287  ff_mpegvideodsp_init(&s->mdsp);
288  ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
289 
290  if (s->avctx->debug & FF_DEBUG_NOMC) {
291  int i;
292  for (i=0; i<4; i++) {
293  s->hdsp.avg_pixels_tab[0][i] = gray16;
294  s->hdsp.put_pixels_tab[0][i] = gray16;
295  s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
296 
297  s->hdsp.avg_pixels_tab[1][i] = gray8;
298  s->hdsp.put_pixels_tab[1][i] = gray8;
299  s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
300  }
301  }
302 
303  s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
304  s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
305  s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
306  s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
307  s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
308  if (s->avctx->flags & AV_CODEC_FLAG_BITEXACT)
309  s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
310  s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
311 
312  if (HAVE_INTRINSICS_NEON)
314 
315  if (ARCH_ALPHA)
317  if (ARCH_ARM)
319  if (ARCH_PPC)
321  if (ARCH_X86)
323  if (ARCH_MIPS)
325 
326  return 0;
327 }
328 
330 {
331  if (s->codec_id == AV_CODEC_ID_MPEG4)
332  s->idsp.mpeg4_studio_profile = s->studio_profile;
333  ff_idctdsp_init(&s->idsp, s->avctx);
334 
335  /* load & permutate scantables
336  * note: only wmv uses different ones
337  */
338  if (s->alternate_scan) {
339  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
340  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
341  } else {
342  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
343  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
344  }
345  ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
346  ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
347 }
348 
350 {
351  int y_size = s->b8_stride * (2 * s->mb_height + 1);
352  int c_size = s->mb_stride * (s->mb_height + 1);
353  int yc_size = y_size + 2 * c_size;
354  int i;
355 
356  if (s->mb_height & 1)
357  yc_size += 2*s->b8_stride + 2*s->mb_stride;
358 
359  if (s->encoding) {
360  if (!FF_ALLOCZ_TYPED_ARRAY(s->me.map, ME_MAP_SIZE) ||
361  !FF_ALLOCZ_TYPED_ARRAY(s->me.score_map, ME_MAP_SIZE))
362  return AVERROR(ENOMEM);
363 
364  if (s->noise_reduction) {
365  if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_error_sum, 2))
366  return AVERROR(ENOMEM);
367  }
368  }
369  if (!FF_ALLOCZ_TYPED_ARRAY(s->blocks, 2))
370  return AVERROR(ENOMEM);
371  s->block = s->blocks[0];
372 
373  for (i = 0; i < 12; i++) {
374  s->pblocks[i] = &s->block[i];
375  }
376 
377  if (s->avctx->codec_tag == AV_RL32("VCR2")) {
378  // exchange uv
379  FFSWAP(void *, s->pblocks[4], s->pblocks[5]);
380  }
381 
382  if (s->out_format == FMT_H263) {
383  /* ac values */
384  if (!FF_ALLOCZ_TYPED_ARRAY(s->ac_val_base, yc_size))
385  return AVERROR(ENOMEM);
386  s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
387  s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
388  s->ac_val[2] = s->ac_val[1] + c_size;
389  }
390 
391  return 0;
392 }
393 
395 {
396  int nb_slices = s->slice_context_count, ret;
397 
398  /* We initialize the copies before the original so that
399  * fields allocated in init_duplicate_context are NULL after
400  * copying. This prevents double-frees upon allocation error. */
401  for (int i = 1; i < nb_slices; i++) {
402  s->thread_context[i] = av_memdup(s, sizeof(MpegEncContext));
403  if (!s->thread_context[i])
404  return AVERROR(ENOMEM);
405  if ((ret = init_duplicate_context(s->thread_context[i])) < 0)
406  return ret;
407  s->thread_context[i]->start_mb_y =
408  (s->mb_height * (i ) + nb_slices / 2) / nb_slices;
409  s->thread_context[i]->end_mb_y =
410  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
411  }
412  s->start_mb_y = 0;
413  s->end_mb_y = nb_slices > 1 ? (s->mb_height + nb_slices / 2) / nb_slices
414  : s->mb_height;
415  return init_duplicate_context(s);
416 }
417 
419 {
420  if (!s)
421  return;
422 
423  av_freep(&s->sc.edge_emu_buffer);
424  av_freep(&s->me.scratchpad);
425  s->me.temp =
426  s->sc.rd_scratchpad =
427  s->sc.b_scratchpad =
428  s->sc.obmc_scratchpad = NULL;
429 
430  av_freep(&s->dct_error_sum);
431  av_freep(&s->me.map);
432  av_freep(&s->me.score_map);
433  av_freep(&s->blocks);
434  av_freep(&s->ac_val_base);
435  s->block = NULL;
436 }
437 
439 {
440  for (int i = 1; i < s->slice_context_count; i++) {
441  free_duplicate_context(s->thread_context[i]);
442  av_freep(&s->thread_context[i]);
443  }
445 }
446 
448 {
449 #define COPY(a) bak->a = src->a
450  COPY(sc.edge_emu_buffer);
451  COPY(me.scratchpad);
452  COPY(me.temp);
453  COPY(sc.rd_scratchpad);
454  COPY(sc.b_scratchpad);
455  COPY(sc.obmc_scratchpad);
456  COPY(me.map);
457  COPY(me.score_map);
458  COPY(blocks);
459  COPY(block);
460  COPY(start_mb_y);
461  COPY(end_mb_y);
462  COPY(me.map_generation);
463  COPY(pb);
464  COPY(dct_error_sum);
465  COPY(dct_count[0]);
466  COPY(dct_count[1]);
467  COPY(ac_val_base);
468  COPY(ac_val[0]);
469  COPY(ac_val[1]);
470  COPY(ac_val[2]);
471 #undef COPY
472 }
473 
475 {
476  MpegEncContext bak;
477  int i, ret;
478  // FIXME copy only needed parts
479  backup_duplicate_context(&bak, dst);
480  memcpy(dst, src, sizeof(MpegEncContext));
481  backup_duplicate_context(dst, &bak);
482  for (i = 0; i < 12; i++) {
483  dst->pblocks[i] = &dst->block[i];
484  }
485  if (dst->avctx->codec_tag == AV_RL32("VCR2")) {
486  // exchange uv
487  FFSWAP(void *, dst->pblocks[4], dst->pblocks[5]);
488  }
489  if (!dst->sc.edge_emu_buffer &&
490  (ret = ff_mpeg_framesize_alloc(dst->avctx, &dst->me,
491  &dst->sc, dst->linesize)) < 0) {
492  av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
493  "scratch buffers.\n");
494  return ret;
495  }
496  return 0;
497 }
498 
499 /**
500  * Set the given MpegEncContext to common defaults
501  * (same for encoding and decoding).
502  * The changed fields will not depend upon the
503  * prior state of the MpegEncContext.
504  */
506 {
507  s->y_dc_scale_table =
508  s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
509  s->chroma_qscale_table = ff_default_chroma_qscale_table;
510  s->progressive_frame = 1;
511  s->progressive_sequence = 1;
512  s->picture_structure = PICT_FRAME;
513 
514  s->coded_picture_number = 0;
515  s->picture_number = 0;
516 
517  s->f_code = 1;
518  s->b_code = 1;
519 
520  s->slice_context_count = 1;
521 }
522 
524 {
525  int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
526 
527  s->mb_width = (s->width + 15) / 16;
528  s->mb_stride = s->mb_width + 1;
529  s->b8_stride = s->mb_width * 2 + 1;
530  mb_array_size = s->mb_height * s->mb_stride;
531  mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
532 
533  /* set default edge pos, will be overridden
534  * in decode_header if needed */
535  s->h_edge_pos = s->mb_width * 16;
536  s->v_edge_pos = s->mb_height * 16;
537 
538  s->mb_num = s->mb_width * s->mb_height;
539 
540  s->block_wrap[0] =
541  s->block_wrap[1] =
542  s->block_wrap[2] =
543  s->block_wrap[3] = s->b8_stride;
544  s->block_wrap[4] =
545  s->block_wrap[5] = s->mb_stride;
546 
547  y_size = s->b8_stride * (2 * s->mb_height + 1);
548  c_size = s->mb_stride * (s->mb_height + 1);
549  yc_size = y_size + 2 * c_size;
550 
551  if (s->mb_height & 1)
552  yc_size += 2*s->b8_stride + 2*s->mb_stride;
553 
554  if (!FF_ALLOCZ_TYPED_ARRAY(s->mb_index2xy, s->mb_num + 1))
555  return AVERROR(ENOMEM);
556  for (y = 0; y < s->mb_height; y++)
557  for (x = 0; x < s->mb_width; x++)
558  s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
559 
560  s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
561 
562  if (s->encoding) {
563  /* Allocate MV tables */
564  if (!FF_ALLOCZ_TYPED_ARRAY(s->p_mv_table_base, mv_table_size) ||
565  !FF_ALLOCZ_TYPED_ARRAY(s->b_forw_mv_table_base, mv_table_size) ||
566  !FF_ALLOCZ_TYPED_ARRAY(s->b_back_mv_table_base, mv_table_size) ||
567  !FF_ALLOCZ_TYPED_ARRAY(s->b_bidir_forw_mv_table_base, mv_table_size) ||
568  !FF_ALLOCZ_TYPED_ARRAY(s->b_bidir_back_mv_table_base, mv_table_size) ||
569  !FF_ALLOCZ_TYPED_ARRAY(s->b_direct_mv_table_base, mv_table_size))
570  return AVERROR(ENOMEM);
571  s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
572  s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
573  s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
574  s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
575  s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
576  s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
577 
578  /* Allocate MB type table */
579  if (!FF_ALLOCZ_TYPED_ARRAY(s->mb_type, mb_array_size) ||
580  !FF_ALLOCZ_TYPED_ARRAY(s->lambda_table, mb_array_size) ||
581  !FF_ALLOC_TYPED_ARRAY (s->cplx_tab, mb_array_size) ||
582  !FF_ALLOC_TYPED_ARRAY (s->bits_tab, mb_array_size))
583  return AVERROR(ENOMEM);
584 
585 #define ALLOCZ_ARRAYS(p, mult, numb) ((p) = av_calloc(numb, mult * sizeof(*(p))))
586  if (s->codec_id == AV_CODEC_ID_MPEG4 ||
587  (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
588  int16_t (*tmp1)[2];
589  uint8_t *tmp2;
590  if (!(tmp1 = ALLOCZ_ARRAYS(s->b_field_mv_table_base, 8, mv_table_size)) ||
591  !(tmp2 = ALLOCZ_ARRAYS(s->b_field_select_table[0][0], 2 * 4, mv_table_size)) ||
592  !ALLOCZ_ARRAYS(s->p_field_select_table[0], 2 * 2, mv_table_size))
593  return AVERROR(ENOMEM);
594 
595  s->p_field_select_table[1] = s->p_field_select_table[0] + 2 * mv_table_size;
596  tmp1 += s->mb_stride + 1;
597 
598  for (int i = 0; i < 2; i++) {
599  for (int j = 0; j < 2; j++) {
600  for (int k = 0; k < 2; k++) {
601  s->b_field_mv_table[i][j][k] = tmp1;
602  tmp1 += mv_table_size;
603  }
604  s->b_field_select_table[i][j] = tmp2;
605  tmp2 += 2 * mv_table_size;
606  }
607  }
608  }
609  }
610 
611  if (s->codec_id == AV_CODEC_ID_MPEG4 ||
612  (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
613  int16_t (*tmp)[2];
614  /* interlaced direct mode decoding tables */
615  if (!(tmp = ALLOCZ_ARRAYS(s->p_field_mv_table_base, 4, mv_table_size)))
616  return AVERROR(ENOMEM);
617  tmp += s->mb_stride + 1;
618  for (int i = 0; i < 2; i++) {
619  for (int j = 0; j < 2; j++) {
620  s->p_field_mv_table[i][j] = tmp;
621  tmp += mv_table_size;
622  }
623  }
624  }
625 
626  if (s->out_format == FMT_H263) {
627  /* cbp values, cbp, ac_pred, pred_dir */
628  if (!(s->coded_block_base = av_mallocz(y_size + (s->mb_height&1)*2*s->b8_stride)) ||
629  !(s->cbp_table = av_mallocz(mb_array_size)) ||
630  !(s->pred_dir_table = av_mallocz(mb_array_size)))
631  return AVERROR(ENOMEM);
632  s->coded_block = s->coded_block_base + s->b8_stride + 1;
633  }
634 
635  if (s->h263_pred || s->h263_plus || !s->encoding) {
636  /* dc values */
637  // MN: we need these for error resilience of intra-frames
638  if (!FF_ALLOCZ_TYPED_ARRAY(s->dc_val_base, yc_size))
639  return AVERROR(ENOMEM);
640  s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
641  s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
642  s->dc_val[2] = s->dc_val[1] + c_size;
643  for (i = 0; i < yc_size; i++)
644  s->dc_val_base[i] = 1024;
645  }
646 
647  /* which mb is an intra block, init macroblock skip table */
648  if (!(s->mbintra_table = av_mallocz(mb_array_size)) ||
649  // Note the + 1 is for a quicker MPEG-4 slice_end detection
650  !(s->mbskip_table = av_mallocz(mb_array_size + 2)))
651  return AVERROR(ENOMEM);
652  memset(s->mbintra_table, 1, mb_array_size);
653 
654  return !CONFIG_MPEGVIDEODEC || s->encoding ? 0 : ff_mpeg_er_init(s);
655 }
656 
658 {
659  int i, j, k;
660 
661  memset(&s->next_picture, 0, sizeof(s->next_picture));
662  memset(&s->last_picture, 0, sizeof(s->last_picture));
663  memset(&s->current_picture, 0, sizeof(s->current_picture));
664  memset(&s->new_picture, 0, sizeof(s->new_picture));
665 
666  memset(s->thread_context, 0, sizeof(s->thread_context));
667 
668  s->me.map = NULL;
669  s->me.score_map = NULL;
670  s->dct_error_sum = NULL;
671  s->block = NULL;
672  s->blocks = NULL;
673  memset(s->pblocks, 0, sizeof(s->pblocks));
674  s->ac_val_base = NULL;
675  s->ac_val[0] =
676  s->ac_val[1] =
677  s->ac_val[2] =NULL;
678  s->sc.edge_emu_buffer = NULL;
679  s->me.scratchpad = NULL;
680  s->me.temp =
681  s->sc.rd_scratchpad =
682  s->sc.b_scratchpad =
683  s->sc.obmc_scratchpad = NULL;
684 
685 
686  s->bitstream_buffer = NULL;
687  s->allocated_bitstream_buffer_size = 0;
688  s->picture = NULL;
689  s->mb_type = NULL;
690  s->p_mv_table_base = NULL;
691  s->b_forw_mv_table_base = NULL;
692  s->b_back_mv_table_base = NULL;
693  s->b_bidir_forw_mv_table_base = NULL;
694  s->b_bidir_back_mv_table_base = NULL;
695  s->b_direct_mv_table_base = NULL;
696  s->p_mv_table = NULL;
697  s->b_forw_mv_table = NULL;
698  s->b_back_mv_table = NULL;
699  s->b_bidir_forw_mv_table = NULL;
700  s->b_bidir_back_mv_table = NULL;
701  s->b_direct_mv_table = NULL;
702  s->b_field_mv_table_base = NULL;
703  s->p_field_mv_table_base = NULL;
704  for (i = 0; i < 2; i++) {
705  for (j = 0; j < 2; j++) {
706  for (k = 0; k < 2; k++) {
707  s->b_field_mv_table[i][j][k] = NULL;
708  }
709  s->b_field_select_table[i][j] = NULL;
710  s->p_field_mv_table[i][j] = NULL;
711  }
712  s->p_field_select_table[i] = NULL;
713  }
714 
715  s->dc_val_base = NULL;
716  s->coded_block_base = NULL;
717  s->mbintra_table = NULL;
718  s->cbp_table = NULL;
719  s->pred_dir_table = NULL;
720 
721  s->mbskip_table = NULL;
722 
723  s->er.error_status_table = NULL;
724  s->er.er_temp_buffer = NULL;
725  s->mb_index2xy = NULL;
726  s->lambda_table = NULL;
727 
728  s->cplx_tab = NULL;
729  s->bits_tab = NULL;
730 }
731 
732 /**
733  * init common structure for both encoder and decoder.
734  * this assumes that some variables like width/height are already set
735  */
737 {
738  int i, ret;
739  int nb_slices = (HAVE_THREADS &&
740  s->avctx->active_thread_type & FF_THREAD_SLICE) ?
741  s->avctx->thread_count : 1;
742 
743  clear_context(s);
744 
745  if (s->encoding && s->avctx->slices)
746  nb_slices = s->avctx->slices;
747 
748  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
749  s->mb_height = (s->height + 31) / 32 * 2;
750  else
751  s->mb_height = (s->height + 15) / 16;
752 
753  if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
754  av_log(s->avctx, AV_LOG_ERROR,
755  "decoding to AV_PIX_FMT_NONE is not supported.\n");
756  return AVERROR(EINVAL);
757  }
758 
759  if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
760  int max_slices;
761  if (s->mb_height)
762  max_slices = FFMIN(MAX_THREADS, s->mb_height);
763  else
764  max_slices = MAX_THREADS;
765  av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
766  " reducing to %d\n", nb_slices, max_slices);
767  nb_slices = max_slices;
768  }
769 
770  if ((s->width || s->height) &&
771  av_image_check_size(s->width, s->height, 0, s->avctx))
772  return AVERROR(EINVAL);
773 
774  dct_init(s);
775 
776  /* set chroma shifts */
777  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
778  &s->chroma_x_shift,
779  &s->chroma_y_shift);
780  if (ret)
781  return ret;
782 
783  if (!FF_ALLOCZ_TYPED_ARRAY(s->picture, MAX_PICTURE_COUNT))
784  return AVERROR(ENOMEM);
785  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
786  s->picture[i].f = av_frame_alloc();
787  if (!s->picture[i].f)
788  goto fail_nomem;
789  }
790 
791  if (!(s->next_picture.f = av_frame_alloc()) ||
792  !(s->last_picture.f = av_frame_alloc()) ||
793  !(s->current_picture.f = av_frame_alloc()) ||
794  !(s->new_picture = av_frame_alloc()))
795  goto fail_nomem;
796 
798  goto fail;
799 
800 #if FF_API_FLAG_TRUNCATED
801  s->parse_context.state = -1;
802 #endif
803 
804  s->context_initialized = 1;
805  memset(s->thread_context, 0, sizeof(s->thread_context));
806  s->thread_context[0] = s;
807  s->slice_context_count = nb_slices;
808 
809 // if (s->width && s->height) {
811  if (ret < 0)
812  goto fail;
813 // }
814 
815  return 0;
816  fail_nomem:
817  ret = AVERROR(ENOMEM);
818  fail:
820  return ret;
821 }
822 
824 {
825  int i, j, k;
826 
828 
829  av_freep(&s->mb_type);
830  av_freep(&s->p_mv_table_base);
831  av_freep(&s->b_forw_mv_table_base);
832  av_freep(&s->b_back_mv_table_base);
833  av_freep(&s->b_bidir_forw_mv_table_base);
834  av_freep(&s->b_bidir_back_mv_table_base);
835  av_freep(&s->b_direct_mv_table_base);
836  s->p_mv_table = NULL;
837  s->b_forw_mv_table = NULL;
838  s->b_back_mv_table = NULL;
839  s->b_bidir_forw_mv_table = NULL;
840  s->b_bidir_back_mv_table = NULL;
841  s->b_direct_mv_table = NULL;
842  av_freep(&s->b_field_mv_table_base);
843  av_freep(&s->b_field_select_table[0][0]);
844  av_freep(&s->p_field_mv_table_base);
845  av_freep(&s->p_field_select_table[0]);
846  for (i = 0; i < 2; i++) {
847  for (j = 0; j < 2; j++) {
848  for (k = 0; k < 2; k++) {
849  s->b_field_mv_table[i][j][k] = NULL;
850  }
851  s->b_field_select_table[i][j] = NULL;
852  s->p_field_mv_table[i][j] = NULL;
853  }
854  s->p_field_select_table[i] = NULL;
855  }
856 
857  av_freep(&s->dc_val_base);
858  av_freep(&s->coded_block_base);
859  av_freep(&s->mbintra_table);
860  av_freep(&s->cbp_table);
861  av_freep(&s->pred_dir_table);
862 
863  av_freep(&s->mbskip_table);
864 
865  av_freep(&s->er.error_status_table);
866  av_freep(&s->er.er_temp_buffer);
867  av_freep(&s->mb_index2xy);
868  av_freep(&s->lambda_table);
869 
870  av_freep(&s->cplx_tab);
871  av_freep(&s->bits_tab);
872 
873  s->linesize = s->uvlinesize = 0;
874 }
875 
876 /* init common structure for both encoder and decoder */
878 {
879  if (!s)
880  return;
881 
883  if (s->slice_context_count > 1)
884  s->slice_context_count = 1;
885 
886 #if FF_API_FLAG_TRUNCATED
887  av_freep(&s->parse_context.buffer);
888  s->parse_context.buffer_size = 0;
889 #endif
890 
891  av_freep(&s->bitstream_buffer);
892  s->allocated_bitstream_buffer_size = 0;
893 
894  if (!s->avctx)
895  return;
896 
897  if (s->picture) {
898  for (int i = 0; i < MAX_PICTURE_COUNT; i++)
899  ff_mpv_picture_free(s->avctx, &s->picture[i]);
900  }
901  av_freep(&s->picture);
902  ff_mpv_picture_free(s->avctx, &s->last_picture);
903  ff_mpv_picture_free(s->avctx, &s->current_picture);
904  ff_mpv_picture_free(s->avctx, &s->next_picture);
905  av_frame_free(&s->new_picture);
906 
907  s->context_initialized = 0;
908  s->context_reinit = 0;
909  s->last_picture_ptr =
910  s->next_picture_ptr =
911  s->current_picture_ptr = NULL;
912  s->linesize = s->uvlinesize = 0;
913 }
914 
915 
917  uint8_t *dest, uint8_t *src,
918  int field_based, int field_select,
919  int src_x, int src_y,
920  int width, int height, ptrdiff_t stride,
921  int h_edge_pos, int v_edge_pos,
922  int w, int h, h264_chroma_mc_func *pix_op,
923  int motion_x, int motion_y)
924 {
925  const int lowres = s->avctx->lowres;
926  const int op_index = FFMIN(lowres, 3);
927  const int s_mask = (2 << lowres) - 1;
928  int emu = 0;
929  int sx, sy;
930 
931  if (s->quarter_sample) {
932  motion_x /= 2;
933  motion_y /= 2;
934  }
935 
936  sx = motion_x & s_mask;
937  sy = motion_y & s_mask;
938  src_x += motion_x >> lowres + 1;
939  src_y += motion_y >> lowres + 1;
940 
941  src += src_y * stride + src_x;
942 
943  if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
944  (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
945  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src,
946  s->linesize, s->linesize,
947  w + 1, (h + 1) << field_based,
948  src_x, src_y << field_based,
949  h_edge_pos, v_edge_pos);
950  src = s->sc.edge_emu_buffer;
951  emu = 1;
952  }
953 
954  sx = (sx << 2) >> lowres;
955  sy = (sy << 2) >> lowres;
956  if (field_select)
957  src += s->linesize;
958  pix_op[op_index](dest, src, stride, h, sx, sy);
959  return emu;
960 }
961 
962 /* apply one mpeg motion vector to the three components */
964  uint8_t *dest_y,
965  uint8_t *dest_cb,
966  uint8_t *dest_cr,
967  int field_based,
968  int bottom_field,
969  int field_select,
970  uint8_t **ref_picture,
971  h264_chroma_mc_func *pix_op,
972  int motion_x, int motion_y,
973  int h, int mb_y)
974 {
975  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
976  int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
977  ptrdiff_t uvlinesize, linesize;
978  const int lowres = s->avctx->lowres;
979  const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
980  const int block_s = 8>>lowres;
981  const int s_mask = (2 << lowres) - 1;
982  const int h_edge_pos = s->h_edge_pos >> lowres;
983  const int v_edge_pos = s->v_edge_pos >> lowres;
984  linesize = s->current_picture.f->linesize[0] << field_based;
985  uvlinesize = s->current_picture.f->linesize[1] << field_based;
986 
987  // FIXME obviously not perfect but qpel will not work in lowres anyway
988  if (s->quarter_sample) {
989  motion_x /= 2;
990  motion_y /= 2;
991  }
992 
993  if(field_based){
994  motion_y += (bottom_field - field_select)*((1 << lowres)-1);
995  }
996 
997  sx = motion_x & s_mask;
998  sy = motion_y & s_mask;
999  src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
1000  src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
1001 
1002  if (s->out_format == FMT_H263) {
1003  uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
1004  uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
1005  uvsrc_x = src_x >> 1;
1006  uvsrc_y = src_y >> 1;
1007  } else if (s->out_format == FMT_H261) {
1008  // even chroma mv's are full pel in H261
1009  mx = motion_x / 4;
1010  my = motion_y / 4;
1011  uvsx = (2 * mx) & s_mask;
1012  uvsy = (2 * my) & s_mask;
1013  uvsrc_x = s->mb_x * block_s + (mx >> lowres);
1014  uvsrc_y = mb_y * block_s + (my >> lowres);
1015  } else {
1016  if(s->chroma_y_shift){
1017  mx = motion_x / 2;
1018  my = motion_y / 2;
1019  uvsx = mx & s_mask;
1020  uvsy = my & s_mask;
1021  uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
1022  uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
1023  } else {
1024  if(s->chroma_x_shift){
1025  //Chroma422
1026  mx = motion_x / 2;
1027  uvsx = mx & s_mask;
1028  uvsy = motion_y & s_mask;
1029  uvsrc_y = src_y;
1030  uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1031  } else {
1032  //Chroma444
1033  uvsx = motion_x & s_mask;
1034  uvsy = motion_y & s_mask;
1035  uvsrc_x = src_x;
1036  uvsrc_y = src_y;
1037  }
1038  }
1039  }
1040 
1041  ptr_y = ref_picture[0] + src_y * linesize + src_x;
1042  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1043  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1044 
1045  if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
1046  (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
1047  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
1048  linesize >> field_based, linesize >> field_based,
1049  17, 17 + field_based,
1050  src_x, src_y << field_based, h_edge_pos,
1051  v_edge_pos);
1052  ptr_y = s->sc.edge_emu_buffer;
1053  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1054  uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
1055  uint8_t *vbuf =ubuf + 10 * s->uvlinesize;
1056  if (s->workaround_bugs & FF_BUG_IEDGE)
1057  vbuf -= s->uvlinesize;
1058  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
1059  uvlinesize >> field_based, uvlinesize >> field_based,
1060  9, 9 + field_based,
1061  uvsrc_x, uvsrc_y << field_based,
1062  h_edge_pos >> 1, v_edge_pos >> 1);
1063  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
1064  uvlinesize >> field_based,uvlinesize >> field_based,
1065  9, 9 + field_based,
1066  uvsrc_x, uvsrc_y << field_based,
1067  h_edge_pos >> 1, v_edge_pos >> 1);
1068  ptr_cb = ubuf;
1069  ptr_cr = vbuf;
1070  }
1071  }
1072 
1073  // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
1074  if (bottom_field) {
1075  dest_y += s->linesize;
1076  dest_cb += s->uvlinesize;
1077  dest_cr += s->uvlinesize;
1078  }
1079 
1080  if (field_select) {
1081  ptr_y += s->linesize;
1082  ptr_cb += s->uvlinesize;
1083  ptr_cr += s->uvlinesize;
1084  }
1085 
1086  sx = (sx << 2) >> lowres;
1087  sy = (sy << 2) >> lowres;
1088  pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
1089 
1090  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1091  int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
1092  uvsx = (uvsx << 2) >> lowres;
1093  uvsy = (uvsy << 2) >> lowres;
1094  if (hc) {
1095  pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
1096  pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
1097  }
1098  }
1099  // FIXME h261 lowres loop filter
1100 }
1101 
1103  uint8_t *dest_cb, uint8_t *dest_cr,
1104  uint8_t **ref_picture,
1105  h264_chroma_mc_func * pix_op,
1106  int mx, int my)
1107 {
1108  const int lowres = s->avctx->lowres;
1109  const int op_index = FFMIN(lowres, 3);
1110  const int block_s = 8 >> lowres;
1111  const int s_mask = (2 << lowres) - 1;
1112  const int h_edge_pos = s->h_edge_pos >> lowres + 1;
1113  const int v_edge_pos = s->v_edge_pos >> lowres + 1;
1114  int emu = 0, src_x, src_y, sx, sy;
1115  ptrdiff_t offset;
1116  uint8_t *ptr;
1117 
1118  if (s->quarter_sample) {
1119  mx /= 2;
1120  my /= 2;
1121  }
1122 
1123  /* In case of 8X8, we construct a single chroma motion vector
1124  with a special rounding */
1125  mx = ff_h263_round_chroma(mx);
1126  my = ff_h263_round_chroma(my);
1127 
1128  sx = mx & s_mask;
1129  sy = my & s_mask;
1130  src_x = s->mb_x * block_s + (mx >> lowres + 1);
1131  src_y = s->mb_y * block_s + (my >> lowres + 1);
1132 
1133  offset = src_y * s->uvlinesize + src_x;
1134  ptr = ref_picture[1] + offset;
1135  if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
1136  (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
1137  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
1138  s->uvlinesize, s->uvlinesize,
1139  9, 9,
1140  src_x, src_y, h_edge_pos, v_edge_pos);
1141  ptr = s->sc.edge_emu_buffer;
1142  emu = 1;
1143  }
1144  sx = (sx << 2) >> lowres;
1145  sy = (sy << 2) >> lowres;
1146  pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
1147 
1148  ptr = ref_picture[2] + offset;
1149  if (emu) {
1150  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
1151  s->uvlinesize, s->uvlinesize,
1152  9, 9,
1153  src_x, src_y, h_edge_pos, v_edge_pos);
1154  ptr = s->sc.edge_emu_buffer;
1155  }
1156  pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
1157 }
1158 
1159 /**
1160  * motion compensation of a single macroblock
1161  * @param s context
1162  * @param dest_y luma destination pointer
1163  * @param dest_cb chroma cb/u destination pointer
1164  * @param dest_cr chroma cr/v destination pointer
1165  * @param dir direction (0->forward, 1->backward)
1166  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1167  * @param pix_op halfpel motion compensation function (average or put normally)
1168  * the motion vectors are taken from s->mv and the MV type from s->mv_type
1169  */
1170 static inline void MPV_motion_lowres(MpegEncContext *s,
1171  uint8_t *dest_y, uint8_t *dest_cb,
1172  uint8_t *dest_cr,
1173  int dir, uint8_t **ref_picture,
1174  h264_chroma_mc_func *pix_op)
1175 {
1176  int mx, my;
1177  int mb_x, mb_y, i;
1178  const int lowres = s->avctx->lowres;
1179  const int block_s = 8 >>lowres;
1180 
1181  mb_x = s->mb_x;
1182  mb_y = s->mb_y;
1183 
1184  switch (s->mv_type) {
1185  case MV_TYPE_16X16:
1186  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1187  0, 0, 0,
1188  ref_picture, pix_op,
1189  s->mv[dir][0][0], s->mv[dir][0][1],
1190  2 * block_s, mb_y);
1191  break;
1192  case MV_TYPE_8X8:
1193  mx = 0;
1194  my = 0;
1195  for (i = 0; i < 4; i++) {
1196  hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
1197  s->linesize) * block_s,
1198  ref_picture[0], 0, 0,
1199  (2 * mb_x + (i & 1)) * block_s,
1200  (2 * mb_y + (i >> 1)) * block_s,
1201  s->width, s->height, s->linesize,
1202  s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1203  block_s, block_s, pix_op,
1204  s->mv[dir][i][0], s->mv[dir][i][1]);
1205 
1206  mx += s->mv[dir][i][0];
1207  my += s->mv[dir][i][1];
1208  }
1209 
1210  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
1211  chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
1212  pix_op, mx, my);
1213  break;
1214  case MV_TYPE_FIELD:
1215  if (s->picture_structure == PICT_FRAME) {
1216  /* top field */
1217  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1218  1, 0, s->field_select[dir][0],
1219  ref_picture, pix_op,
1220  s->mv[dir][0][0], s->mv[dir][0][1],
1221  block_s, mb_y);
1222  /* bottom field */
1223  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1224  1, 1, s->field_select[dir][1],
1225  ref_picture, pix_op,
1226  s->mv[dir][1][0], s->mv[dir][1][1],
1227  block_s, mb_y);
1228  } else {
1229  if (s->picture_structure != s->field_select[dir][0] + 1 &&
1230  s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
1231  ref_picture = s->current_picture_ptr->f->data;
1232 
1233  }
1234  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1235  0, 0, s->field_select[dir][0],
1236  ref_picture, pix_op,
1237  s->mv[dir][0][0],
1238  s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
1239  }
1240  break;
1241  case MV_TYPE_16X8:
1242  for (i = 0; i < 2; i++) {
1243  uint8_t **ref2picture;
1244 
1245  if (s->picture_structure == s->field_select[dir][i] + 1 ||
1246  s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
1247  ref2picture = ref_picture;
1248  } else {
1249  ref2picture = s->current_picture_ptr->f->data;
1250  }
1251 
1252  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1253  0, 0, s->field_select[dir][i],
1254  ref2picture, pix_op,
1255  s->mv[dir][i][0], s->mv[dir][i][1] +
1256  2 * block_s * i, block_s, mb_y >> 1);
1257 
1258  dest_y += 2 * block_s * s->linesize;
1259  dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
1260  dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
1261  }
1262  break;
1263  case MV_TYPE_DMV:
1264  if (s->picture_structure == PICT_FRAME) {
1265  for (i = 0; i < 2; i++) {
1266  int j;
1267  for (j = 0; j < 2; j++) {
1268  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1269  1, j, j ^ i,
1270  ref_picture, pix_op,
1271  s->mv[dir][2 * i + j][0],
1272  s->mv[dir][2 * i + j][1],
1273  block_s, mb_y);
1274  }
1275  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
1276  }
1277  } else {
1278  for (i = 0; i < 2; i++) {
1279  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1280  0, 0, s->picture_structure != i + 1,
1281  ref_picture, pix_op,
1282  s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
1283  2 * block_s, mb_y >> 1);
1284 
1285  // after put we make avg of the same block
1286  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
1287 
1288  // opposite parity is always in the same
1289  // frame if this is second field
1290  if (!s->first_field) {
1291  ref_picture = s->current_picture_ptr->f->data;
1292  }
1293  }
1294  }
1295  break;
1296  default:
1297  av_assert2(0);
1298  }
1299 }
1300 
1301 /**
1302  * find the lowest MB row referenced in the MVs
1303  */
1305 {
1306  int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1307  int my, off, i, mvs;
1308 
1309  if (s->picture_structure != PICT_FRAME || s->mcsel)
1310  goto unhandled;
1311 
1312  switch (s->mv_type) {
1313  case MV_TYPE_16X16:
1314  mvs = 1;
1315  break;
1316  case MV_TYPE_16X8:
1317  mvs = 2;
1318  break;
1319  case MV_TYPE_8X8:
1320  mvs = 4;
1321  break;
1322  default:
1323  goto unhandled;
1324  }
1325 
1326  for (i = 0; i < mvs; i++) {
1327  my = s->mv[dir][i][1];
1328  my_max = FFMAX(my_max, my);
1329  my_min = FFMIN(my_min, my);
1330  }
1331 
1332  off = ((FFMAX(-my_min, my_max)<<qpel_shift) + 63) >> 6;
1333 
1334  return av_clip(s->mb_y + off, 0, s->mb_height - 1);
1335 unhandled:
1336  return s->mb_height-1;
1337 }
1338 
1339 /* put block[] to dest[] */
1340 static inline void put_dct(MpegEncContext *s,
1341  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1342 {
1343  s->dct_unquantize_intra(s, block, i, qscale);
1344  s->idsp.idct_put(dest, line_size, block);
1345 }
1346 
1347 /* add block[] to dest[] */
1348 static inline void add_dct(MpegEncContext *s,
1349  int16_t *block, int i, uint8_t *dest, int line_size)
1350 {
1351  if (s->block_last_index[i] >= 0) {
1352  s->idsp.idct_add(dest, line_size, block);
1353  }
1354 }
1355 
1356 static inline void add_dequant_dct(MpegEncContext *s,
1357  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1358 {
1359  if (s->block_last_index[i] >= 0) {
1360  s->dct_unquantize_inter(s, block, i, qscale);
1361 
1362  s->idsp.idct_add(dest, line_size, block);
1363  }
1364 }
1365 
1366 /**
1367  * Clean dc, ac, coded_block for the current non-intra MB.
1368  */
1370 {
1371  int wrap = s->b8_stride;
1372  int xy = s->block_index[0];
1373 
1374  s->dc_val[0][xy ] =
1375  s->dc_val[0][xy + 1 ] =
1376  s->dc_val[0][xy + wrap] =
1377  s->dc_val[0][xy + 1 + wrap] = 1024;
1378  /* ac pred */
1379  memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1380  memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1381  if (s->msmpeg4_version>=3) {
1382  s->coded_block[xy ] =
1383  s->coded_block[xy + 1 ] =
1384  s->coded_block[xy + wrap] =
1385  s->coded_block[xy + 1 + wrap] = 0;
1386  }
1387  /* chroma */
1388  wrap = s->mb_stride;
1389  xy = s->mb_x + s->mb_y * wrap;
1390  s->dc_val[1][xy] =
1391  s->dc_val[2][xy] = 1024;
1392  /* ac pred */
1393  memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1394  memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1395 
1396  s->mbintra_table[xy]= 0;
1397 }
1398 
1399 /* generic function called after a macroblock has been parsed by the
1400  decoder or after it has been encoded by the encoder.
1401 
1402  Important variables used:
1403  s->mb_intra : true if intra macroblock
1404  s->mv_dir : motion vector direction
1405  s->mv_type : motion vector type
1406  s->mv : motion vector
1407  s->interlaced_dct : true if interlaced dct used (mpeg2)
1408  */
1409 static av_always_inline
1411  int lowres_flag, int is_mpeg12)
1412 {
1413 #define IS_ENCODER(s) (CONFIG_MPEGVIDEOENC && !lowres_flag && (s)->encoding)
1414 #define IS_MPEG12(s) (CONFIG_SMALL ? ((s)->out_format == FMT_MPEG1) : is_mpeg12)
1415  const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1416 
1417  s->current_picture.qscale_table[mb_xy] = s->qscale;
1418 
1419  /* update DC predictors for P macroblocks */
1420  if (!s->mb_intra) {
1421  if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
1422  if(s->mbintra_table[mb_xy])
1424  } else {
1425  s->last_dc[0] =
1426  s->last_dc[1] =
1427  s->last_dc[2] = 128 << s->intra_dc_precision;
1428  }
1429  }
1430  else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
1431  s->mbintra_table[mb_xy]=1;
1432 
1433  if (!IS_ENCODER(s) || (s->avctx->flags & AV_CODEC_FLAG_PSNR) || s->frame_skip_threshold || s->frame_skip_factor ||
1434  !((s->intra_only || s->pict_type == AV_PICTURE_TYPE_B) &&
1435  s->avctx->mb_decision != FF_MB_DECISION_RD)) { // FIXME precalc
1436  uint8_t *dest_y, *dest_cb, *dest_cr;
1437  int dct_linesize, dct_offset;
1438  op_pixels_func (*op_pix)[4];
1439  qpel_mc_func (*op_qpix)[16];
1440  const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
1441  const int uvlinesize = s->current_picture.f->linesize[1];
1442  const int readable = s->pict_type != AV_PICTURE_TYPE_B || IS_ENCODER(s) || s->avctx->draw_horiz_band || lowres_flag;
1443  const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
1444 
1445  /* avoid copy if macroblock skipped in last frame too */
1446  /* skip only during decoding as we might trash the buffers during encoding a bit */
1447  if (!IS_ENCODER(s)) {
1448  uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
1449 
1450  if (s->mb_skipped) {
1451  s->mb_skipped= 0;
1452  av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
1453  *mbskip_ptr = 1;
1454  } else if(!s->current_picture.reference) {
1455  *mbskip_ptr = 1;
1456  } else{
1457  *mbskip_ptr = 0; /* not skipped */
1458  }
1459  }
1460 
1461  dct_linesize = linesize << s->interlaced_dct;
1462  dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
1463 
1464  if(readable){
1465  dest_y= s->dest[0];
1466  dest_cb= s->dest[1];
1467  dest_cr= s->dest[2];
1468  }else{
1469  dest_y = s->sc.b_scratchpad;
1470  dest_cb= s->sc.b_scratchpad+16*linesize;
1471  dest_cr= s->sc.b_scratchpad+32*linesize;
1472  }
1473 
1474  if (!s->mb_intra) {
1475  /* motion handling */
1476  /* decoding or more than one mb_type (MC was already done otherwise) */
1477  if (!IS_ENCODER(s)) {
1478 
1479  if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
1480  if (s->mv_dir & MV_DIR_FORWARD) {
1481  ff_thread_await_progress(&s->last_picture_ptr->tf,
1483  0);
1484  }
1485  if (s->mv_dir & MV_DIR_BACKWARD) {
1486  ff_thread_await_progress(&s->next_picture_ptr->tf,
1488  0);
1489  }
1490  }
1491 
1492  if(lowres_flag){
1493  h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
1494 
1495  if (s->mv_dir & MV_DIR_FORWARD) {
1496  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
1497  op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
1498  }
1499  if (s->mv_dir & MV_DIR_BACKWARD) {
1500  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
1501  }
1502  }else{
1503  op_qpix = s->me.qpel_put;
1504  if ((is_mpeg12 || !s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
1505  op_pix = s->hdsp.put_pixels_tab;
1506  }else{
1507  op_pix = s->hdsp.put_no_rnd_pixels_tab;
1508  }
1509  if (s->mv_dir & MV_DIR_FORWARD) {
1510  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
1511  op_pix = s->hdsp.avg_pixels_tab;
1512  op_qpix= s->me.qpel_avg;
1513  }
1514  if (s->mv_dir & MV_DIR_BACKWARD) {
1515  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
1516  }
1517  }
1518  }
1519 
1520  /* skip dequant / idct if we are really late ;) */
1521  if(s->avctx->skip_idct){
1522  if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
1523  ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
1524  || s->avctx->skip_idct >= AVDISCARD_ALL)
1525  goto skip_idct;
1526  }
1527 
1528  /* add dct residue */
1529  if (IS_ENCODER(s) || !(IS_MPEG12(s) || s->msmpeg4_version
1530  || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
1531  add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
1532  add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
1533  add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
1534  add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
1535 
1536  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1537  if (s->chroma_y_shift){
1538  add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
1539  add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
1540  }else{
1541  dct_linesize >>= 1;
1542  dct_offset >>=1;
1543  add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
1544  add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
1545  add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
1546  add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
1547  }
1548  }
1549  } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
1550  add_dct(s, block[0], 0, dest_y , dct_linesize);
1551  add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
1552  add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
1553  add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
1554 
1555  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1556  if(s->chroma_y_shift){//Chroma420
1557  add_dct(s, block[4], 4, dest_cb, uvlinesize);
1558  add_dct(s, block[5], 5, dest_cr, uvlinesize);
1559  }else{
1560  //chroma422
1561  dct_linesize = uvlinesize << s->interlaced_dct;
1562  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
1563 
1564  add_dct(s, block[4], 4, dest_cb, dct_linesize);
1565  add_dct(s, block[5], 5, dest_cr, dct_linesize);
1566  add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
1567  add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
1568  if(!s->chroma_x_shift){//Chroma444
1569  add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
1570  add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
1571  add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
1572  add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
1573  }
1574  }
1575  }//fi gray
1576  } else if (CONFIG_WMV2_DECODER) {
1577  ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
1578  }
1579  } else {
1580  /* Only MPEG-4 Simple Studio Profile is supported in > 8-bit mode.
1581  TODO: Integrate 10-bit properly into mpegvideo.c so that ER works properly */
1582  if (!is_mpeg12 && CONFIG_MPEG4_DECODER && /* s->codec_id == AV_CODEC_ID_MPEG4 && */
1583  s->avctx->bits_per_raw_sample > 8) {
1584  ff_mpeg4_decode_studio(s, dest_y, dest_cb, dest_cr, block_size,
1585  uvlinesize, dct_linesize, dct_offset);
1586  }
1587  /* dct only in intra block */
1588  else if (IS_ENCODER(s) || !IS_MPEG12(s)) {
1589  put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
1590  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
1591  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
1592  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
1593 
1594  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1595  if(s->chroma_y_shift){
1596  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
1597  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
1598  }else{
1599  dct_offset >>=1;
1600  dct_linesize >>=1;
1601  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
1602  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
1603  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
1604  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
1605  }
1606  }
1607  }else{
1608  s->idsp.idct_put(dest_y, dct_linesize, block[0]);
1609  s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
1610  s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
1611  s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
1612 
1613  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1614  if(s->chroma_y_shift){
1615  s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
1616  s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
1617  }else{
1618 
1619  dct_linesize = uvlinesize << s->interlaced_dct;
1620  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
1621 
1622  s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
1623  s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
1624  s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
1625  s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
1626  if(!s->chroma_x_shift){//Chroma444
1627  s->idsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
1628  s->idsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
1629  s->idsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
1630  s->idsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
1631  }
1632  }
1633  }//gray
1634  }
1635  }
1636 skip_idct:
1637  if(!readable){
1638  s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
1639  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1640  s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
1641  s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
1642  }
1643  }
1644  }
1645 }
1646 
1648 {
1649  if (s->avctx->debug & FF_DEBUG_DCT_COEFF) {
1650  /* print DCT coefficients */
1651  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1652  for (int i = 0; i < 6; i++) {
1653  for (int j = 0; j < 64; j++) {
1654  av_log(s->avctx, AV_LOG_DEBUG, "%5d",
1655  block[i][s->idsp.idct_permutation[j]]);
1656  }
1657  av_log(s->avctx, AV_LOG_DEBUG, "\n");
1658  }
1659  }
1660 
1661 #if !CONFIG_SMALL
1662  if(s->out_format == FMT_MPEG1) {
1663  if(s->avctx->lowres) mpv_reconstruct_mb_internal(s, block, 1, 1);
1664  else mpv_reconstruct_mb_internal(s, block, 0, 1);
1665  } else
1666 #endif
1667  if(s->avctx->lowres) mpv_reconstruct_mb_internal(s, block, 1, 0);
1668  else mpv_reconstruct_mb_internal(s, block, 0, 0);
1669 }
1670 
1671 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
1672  const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
1673  const int uvlinesize = s->current_picture.f->linesize[1];
1674  const int width_of_mb = (4 + (s->avctx->bits_per_raw_sample > 8)) - s->avctx->lowres;
1675  const int height_of_mb = 4 - s->avctx->lowres;
1676 
1677  s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
1678  s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
1679  s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
1680  s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
1681  s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
1682  s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
1683  //block_index is not used by mpeg2, so it is not affected by chroma_format
1684 
1685  s->dest[0] = s->current_picture.f->data[0] + (int)((s->mb_x - 1U) << width_of_mb);
1686  s->dest[1] = s->current_picture.f->data[1] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
1687  s->dest[2] = s->current_picture.f->data[2] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
1688 
1689  if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
1690  {
1691  if(s->picture_structure==PICT_FRAME){
1692  s->dest[0] += s->mb_y * linesize << height_of_mb;
1693  s->dest[1] += s->mb_y * uvlinesize << (height_of_mb - s->chroma_y_shift);
1694  s->dest[2] += s->mb_y * uvlinesize << (height_of_mb - s->chroma_y_shift);
1695  }else{
1696  s->dest[0] += (s->mb_y>>1) * linesize << height_of_mb;
1697  s->dest[1] += (s->mb_y>>1) * uvlinesize << (height_of_mb - s->chroma_y_shift);
1698  s->dest[2] += (s->mb_y>>1) * uvlinesize << (height_of_mb - s->chroma_y_shift);
1699  av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
1700  }
1701  }
1702 }
1703 
1704 /**
1705  * set qscale and update qscale dependent variables.
1706  */
1707 void ff_set_qscale(MpegEncContext * s, int qscale)
1708 {
1709  if (qscale < 1)
1710  qscale = 1;
1711  else if (qscale > 31)
1712  qscale = 31;
1713 
1714  s->qscale = qscale;
1715  s->chroma_qscale= s->chroma_qscale_table[qscale];
1716 
1717  s->y_dc_scale= s->y_dc_scale_table[ qscale ];
1718  s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
1719 }
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:97
ff_h263_round_chroma
static int ff_h263_round_chroma(int x)
Definition: motion_est.h:101
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:38
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:736
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:248
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
free_duplicate_contexts
static void free_duplicate_contexts(MpegEncContext *s)
Definition: mpegvideo.c:438
level
uint8_t level
Definition: svq3.c:206
av_clip
#define av_clip
Definition: common.h:95
ff_mpeg_framesize_alloc
int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me, ScratchpadContext *sc, int linesize)
Definition: mpegpicture.c:78
blockdsp.h
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegutils.h:117
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
mpeg_motion_lowres
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t **ref_picture, h264_chroma_mc_func *pix_op, int motion_x, int motion_y, int h, int mb_y)
Definition: mpegvideo.c:963
ff_mpv_init_context_frame
int ff_mpv_init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
Definition: mpegvideo.c:523
backup_duplicate_context
static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
Definition: mpegvideo.c:447
ff_mpv_common_defaults
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:505
hpel_motion_lowres
static int hpel_motion_lowres(MpegEncContext *s, uint8_t *dest, uint8_t *src, int field_based, int field_select, int src_x, int src_y, int width, int height, ptrdiff_t stride, int h_edge_pos, int v_edge_pos, int w, int h, h264_chroma_mc_func *pix_op, int motion_x, int motion_y)
Definition: mpegvideo.c:916
ff_mpeg1_dc_scale_table
const uint8_t ff_mpeg1_dc_scale_table[128]
Definition: mpegvideodata.c:33
mpeg4videodec.h
MV_TYPE_16X8
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:250
lowest_referenced_row
static int lowest_referenced_row(MpegEncContext *s, int dir)
find the lowest MB row referenced in the MVs
Definition: mpegvideo.c:1304
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:62
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:111
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:474
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
w
uint8_t w
Definition: llviddspenc.c:38
ff_mpeg2_non_linear_qscale
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
ff_clean_intra_table_entries
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
Definition: mpegvideo.c:1369
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:37
init_duplicate_context
static int init_duplicate_context(MpegEncContext *s)
Definition: mpegvideo.c:349
ff_mpv_common_init_arm
av_cold void ff_mpv_common_init_arm(MpegEncContext *s)
Definition: mpegvideo_arm.c:44
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:1671
mpegvideo.h
ff_wmv2_add_mb
void ff_wmv2_add_mb(MpegEncContext *s, int16_t block1[6][64], uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr)
Definition: wmv2dec.c:85
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:81
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:253
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:287
mpegutils.h
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:239
free_duplicate_context
static void free_duplicate_context(MpegEncContext *s)
Definition: mpegvideo.c:418
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:245
MV_TYPE_DMV
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:252
IS_MPEG12
#define IS_MPEG12(s)
mpv_reconstruct_mb_internal
static av_always_inline void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
Definition: mpegvideo.c:1410
av_memdup
void * av_memdup(const void *p, size_t size)
Duplicate a buffer with av_malloc().
Definition: mem.c:312
chroma_4mv_motion_lowres
static void chroma_4mv_motion_lowres(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, h264_chroma_mc_func *pix_op, int mx, int my)
Definition: mpegvideo.c:1102
dct_unquantize_mpeg1_inter_c
static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:81
U
#define U(x)
Definition: vp56_arith.h:37
fail
#define fail()
Definition: checkasm.h:130
wrap
#define wrap(func)
Definition: neontest.h:65
h264_chroma_mc_func
void(* h264_chroma_mc_func)(uint8_t *dst, uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: h264chroma.h:25
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:119
MAX_THREADS
#define MAX_THREADS
Definition: frame_thread_encoder.c:36
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2690
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
FF_ALLOC_TYPED_ARRAY
#define FF_ALLOC_TYPED_ARRAY(p, nelem)
Definition: internal.h:96
MAX_PICTURE_COUNT
#define MAX_PICTURE_COUNT
Definition: mpegpicture.h:33
dct_unquantize_mpeg1_intra_c
static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:52
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:99
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:877
MpegEncContext::pblocks
int16_t(*[12] pblocks)[64]
Definition: mpegvideo.h:466
avassert.h
gray16
static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
Definition: mpegvideo.c:269
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
width
#define width
add_dct
static void add_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size)
Definition: mpegvideo.c:1348
s
#define s(width, name)
Definition: cbs_vp9.c:256
ALLOCZ_ARRAYS
#define ALLOCZ_ARRAYS(p, mult, numb)
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:68
dct_unquantize_mpeg2_intra_bitexact
static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:140
FMT_H261
@ FMT_H261
Definition: mpegutils.h:118
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
limits.h
ff_mpegvideodsp_init
av_cold void ff_mpegvideodsp_init(MpegVideoDSPContext *c)
Definition: mpegvideodsp.c:110
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:54
threadframe.h
NULL
#define NULL
Definition: coverity.c:32
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:329
me
#define me
Definition: vf_colormatrix.c:104
put_dct
static void put_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo.c:1340
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
ff_set_qscale
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:1707
mathops.h
FF_BUG_IEDGE
#define FF_BUG_IEDGE
Definition: avcodec.h:1286
lowres
static int lowres
Definition: ffplay.c:335
qpeldsp.h
ff_alternate_horizontal_scan
const uint8_t ff_alternate_horizontal_scan[64]
Definition: mpegvideodata.c:84
MpegEncContext::me
MotionEstContext me
Definition: mpegvideo.h:264
ME_MAP_SIZE
#define ME_MAP_SIZE
Definition: motion_est.h:38
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
dct_unquantize_mpeg2_intra_c
static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:110
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:249
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1328
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:53
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:249
ff_mpeg_er_init
int ff_mpeg_er_init(MpegEncContext *s)
Definition: mpeg_er.c:100
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:974
h264chroma.h
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1475
height
#define height
mpegvideodata.h
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
attributes.h
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:251
IS_ENCODER
#define IS_ENCODER(s)
dct_unquantize_mpeg2_inter_c
static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:174
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1474
clear_context
static void clear_context(MpegEncContext *s)
Definition: mpegvideo.c:657
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
FMT_H263
@ FMT_H263
Definition: mpegutils.h:119
dct_unquantize_h263_inter_c
static void dct_unquantize_h263_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:242
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
ff_alternate_vertical_scan
const uint8_t ff_alternate_vertical_scan[64]
Definition: mpegvideodata.c:95
ff_init_scantable
av_cold void ff_init_scantable(const uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: idctdsp.c:30
ff_h264chroma_init
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:41
internal.h
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
ff_mpv_common_init_ppc
void ff_mpv_common_init_ppc(MpegEncContext *s)
Definition: mpegvideo_altivec.c:119
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
MpegEncContext::sc
ScratchpadContext sc
Definition: mpegvideo.h:187
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:264
ff_mpv_picture_free
void av_cold ff_mpv_picture_free(AVCodecContext *avctx, Picture *pic)
Definition: mpegpicture.c:474
FF_DEBUG_NOMC
#define FF_DEBUG_NOMC
Definition: avcodec.h:1337
idctdsp.h
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ret
ret
Definition: filter_design.txt:187
wmv2dec.h
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
dct_init
static av_cold int dct_init(MpegEncContext *s)
Definition: mpegvideo.c:282
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
ff_mpv_free_context_frame
void ff_mpv_free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution as well as the slice thread contex...
Definition: mpegvideo.c:823
MpegEncContext::block
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:468
ff_mpeg4_decode_studio
void ff_mpeg4_decode_studio(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int block_size, int uvlinesize, int dct_linesize, int dct_offset)
Definition: mpeg4videodec.c:74
ScratchpadContext::edge_emu_buffer
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegpicture.h:37
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
dct_unquantize_h263_intra_c
static void dct_unquantize_h263_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:208
ff_mpv_common_init_x86
av_cold void ff_mpv_common_init_x86(MpegEncContext *s)
Definition: mpegvideo.c:454
MPV_motion_lowres
static void MPV_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, h264_chroma_mc_func *pix_op)
motion compensation of a single macroblock
Definition: mpegvideo.c:1170
ff_mpv_reconstruct_mb
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo.c:1647
ff_mpv_common_init_axp
av_cold void ff_mpv_common_init_axp(MpegEncContext *s)
Definition: mpegvideo_alpha.c:106
FF_MB_DECISION_RD
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:865
ff_mpv_common_init_mips
av_cold void ff_mpv_common_init_mips(MpegEncContext *s)
Definition: mpegvideo_init_mips.c:26
ff_default_chroma_qscale_table
const uint8_t ff_default_chroma_qscale_table[32]
Definition: mpegvideodata.c:21
ff_mpv_init_duplicate_contexts
int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
Initialize an MpegEncContext's thread contexts.
Definition: mpegvideo.c:394
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:278
COPY
#define COPY(a)
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:414
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:244
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
mpeg_er.h
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:370
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
add_dequant_dct
static void add_dequant_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo.c:1356
h
h
Definition: vp9dsp_template.c:2038
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
Definition: blockdsp.c:59
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:50
int
int
Definition: ffmpeg_filter.c:153
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:52
ff_mpv_common_init_neon
av_cold void ff_mpv_common_init_neon(MpegEncContext *s)
Definition: mpegvideo.c:127
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:64
gray8
static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
Definition: mpegvideo.c:275