FFmpeg
mpegvideo.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29 
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
35 #include "avcodec.h"
36 #include "blockdsp.h"
37 #include "h264chroma.h"
38 #include "idctdsp.h"
39 #include "internal.h"
40 #include "mathops.h"
41 #include "mpeg_er.h"
42 #include "mpegutils.h"
43 #include "mpegvideo.h"
44 #include "mpegvideodata.h"
45 #include "mjpegenc.h"
46 #include "msmpeg4.h"
47 #include "qpeldsp.h"
48 #include "thread.h"
49 #include "wmv2.h"
50 #include <limits.h>
51 
53  int16_t *block, int n, int qscale)
54 {
55  int i, level, nCoeffs;
56  const uint16_t *quant_matrix;
57 
58  nCoeffs= s->block_last_index[n];
59 
60  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
61  /* XXX: only MPEG-1 */
62  quant_matrix = s->intra_matrix;
63  for(i=1;i<=nCoeffs;i++) {
64  int j= s->intra_scantable.permutated[i];
65  level = block[j];
66  if (level) {
67  if (level < 0) {
68  level = -level;
69  level = (int)(level * qscale * quant_matrix[j]) >> 3;
70  level = (level - 1) | 1;
71  level = -level;
72  } else {
73  level = (int)(level * qscale * quant_matrix[j]) >> 3;
74  level = (level - 1) | 1;
75  }
76  block[j] = level;
77  }
78  }
79 }
80 
82  int16_t *block, int n, int qscale)
83 {
84  int i, level, nCoeffs;
85  const uint16_t *quant_matrix;
86 
87  nCoeffs= s->block_last_index[n];
88 
89  quant_matrix = s->inter_matrix;
90  for(i=0; i<=nCoeffs; i++) {
91  int j= s->intra_scantable.permutated[i];
92  level = block[j];
93  if (level) {
94  if (level < 0) {
95  level = -level;
96  level = (((level << 1) + 1) * qscale *
97  ((int) (quant_matrix[j]))) >> 4;
98  level = (level - 1) | 1;
99  level = -level;
100  } else {
101  level = (((level << 1) + 1) * qscale *
102  ((int) (quant_matrix[j]))) >> 4;
103  level = (level - 1) | 1;
104  }
105  block[j] = level;
106  }
107  }
108 }
109 
111  int16_t *block, int n, int qscale)
112 {
113  int i, level, nCoeffs;
114  const uint16_t *quant_matrix;
115 
116  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
117  else qscale <<= 1;
118 
119  if(s->alternate_scan) nCoeffs= 63;
120  else nCoeffs= s->block_last_index[n];
121 
122  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
123  quant_matrix = s->intra_matrix;
124  for(i=1;i<=nCoeffs;i++) {
125  int j= s->intra_scantable.permutated[i];
126  level = block[j];
127  if (level) {
128  if (level < 0) {
129  level = -level;
130  level = (int)(level * qscale * quant_matrix[j]) >> 4;
131  level = -level;
132  } else {
133  level = (int)(level * qscale * quant_matrix[j]) >> 4;
134  }
135  block[j] = level;
136  }
137  }
138 }
139 
141  int16_t *block, int n, int qscale)
142 {
143  int i, level, nCoeffs;
144  const uint16_t *quant_matrix;
145  int sum=-1;
146 
147  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
148  else qscale <<= 1;
149 
150  if(s->alternate_scan) nCoeffs= 63;
151  else nCoeffs= s->block_last_index[n];
152 
153  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
154  sum += block[0];
155  quant_matrix = s->intra_matrix;
156  for(i=1;i<=nCoeffs;i++) {
157  int j= s->intra_scantable.permutated[i];
158  level = block[j];
159  if (level) {
160  if (level < 0) {
161  level = -level;
162  level = (int)(level * qscale * quant_matrix[j]) >> 4;
163  level = -level;
164  } else {
165  level = (int)(level * qscale * quant_matrix[j]) >> 4;
166  }
167  block[j] = level;
168  sum+=level;
169  }
170  }
171  block[63]^=sum&1;
172 }
173 
175  int16_t *block, int n, int qscale)
176 {
177  int i, level, nCoeffs;
178  const uint16_t *quant_matrix;
179  int sum=-1;
180 
181  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
182  else qscale <<= 1;
183 
184  if(s->alternate_scan) nCoeffs= 63;
185  else nCoeffs= s->block_last_index[n];
186 
187  quant_matrix = s->inter_matrix;
188  for(i=0; i<=nCoeffs; i++) {
189  int j= s->intra_scantable.permutated[i];
190  level = block[j];
191  if (level) {
192  if (level < 0) {
193  level = -level;
194  level = (((level << 1) + 1) * qscale *
195  ((int) (quant_matrix[j]))) >> 5;
196  level = -level;
197  } else {
198  level = (((level << 1) + 1) * qscale *
199  ((int) (quant_matrix[j]))) >> 5;
200  }
201  block[j] = level;
202  sum+=level;
203  }
204  }
205  block[63]^=sum&1;
206 }
207 
209  int16_t *block, int n, int qscale)
210 {
211  int i, level, qmul, qadd;
212  int nCoeffs;
213 
214  av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
215 
216  qmul = qscale << 1;
217 
218  if (!s->h263_aic) {
219  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
220  qadd = (qscale - 1) | 1;
221  }else{
222  qadd = 0;
223  }
224  if(s->ac_pred)
225  nCoeffs=63;
226  else
227  nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ];
228 
229  for(i=1; i<=nCoeffs; i++) {
230  level = block[i];
231  if (level) {
232  if (level < 0) {
233  level = level * qmul - qadd;
234  } else {
235  level = level * qmul + qadd;
236  }
237  block[i] = level;
238  }
239  }
240 }
241 
243  int16_t *block, int n, int qscale)
244 {
245  int i, level, qmul, qadd;
246  int nCoeffs;
247 
248  av_assert2(s->block_last_index[n]>=0);
249 
250  qadd = (qscale - 1) | 1;
251  qmul = qscale << 1;
252 
253  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
254 
255  for(i=0; i<=nCoeffs; i++) {
256  level = block[i];
257  if (level) {
258  if (level < 0) {
259  level = level * qmul - qadd;
260  } else {
261  level = level * qmul + qadd;
262  }
263  block[i] = level;
264  }
265  }
266 }
267 
268 
269 static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
270 {
271  while(h--)
272  memset(dst + h*linesize, 128, 16);
273 }
274 
275 static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
276 {
277  while(h--)
278  memset(dst + h*linesize, 128, 8);
279 }
280 
281 /* init common dct for both encoder and decoder */
283 {
284  ff_blockdsp_init(&s->bdsp, s->avctx);
285  ff_h264chroma_init(&s->h264chroma, 8); //for lowres
286  ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
289 
290  if (s->avctx->debug & FF_DEBUG_NOMC) {
291  int i;
292  for (i=0; i<4; i++) {
293  s->hdsp.avg_pixels_tab[0][i] = gray16;
294  s->hdsp.put_pixels_tab[0][i] = gray16;
296 
297  s->hdsp.avg_pixels_tab[1][i] = gray8;
298  s->hdsp.put_pixels_tab[1][i] = gray8;
300  }
301  }
302 
311 
312  if (HAVE_INTRINSICS_NEON)
314 
315  if (ARCH_ALPHA)
317  if (ARCH_ARM)
319  if (ARCH_PPC)
321  if (ARCH_X86)
323  if (ARCH_MIPS)
325 
326  return 0;
327 }
328 
330 {
331  if (s->codec_id == AV_CODEC_ID_MPEG4)
333  ff_idctdsp_init(&s->idsp, s->avctx);
334 
335  /* load & permutate scantables
336  * note: only wmv uses different ones
337  */
338  if (s->alternate_scan) {
341  } else {
344  }
347 }
348 
350 {
351  return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, 0, 0,
353  s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
354  &s->linesize, &s->uvlinesize);
355 }
356 
358 {
359  int y_size = s->b8_stride * (2 * s->mb_height + 1);
360  int c_size = s->mb_stride * (s->mb_height + 1);
361  int yc_size = y_size + 2 * c_size;
362  int i;
363 
364  if (s->mb_height & 1)
365  yc_size += 2*s->b8_stride + 2*s->mb_stride;
366 
367  s->sc.edge_emu_buffer =
368  s->me.scratchpad =
369  s->me.temp =
370  s->sc.rd_scratchpad =
371  s->sc.b_scratchpad =
372  s->sc.obmc_scratchpad = NULL;
373 
374  if (s->encoding) {
377  return AVERROR(ENOMEM);
378 
379  if (s->noise_reduction) {
381  return AVERROR(ENOMEM);
382  }
383  }
384  if (!FF_ALLOCZ_TYPED_ARRAY(s->blocks, 2))
385  return AVERROR(ENOMEM);
386  s->block = s->blocks[0];
387 
388  for (i = 0; i < 12; i++) {
389  s->pblocks[i] = &s->block[i];
390  }
391 
392  if (!(s->block32 = av_mallocz(sizeof(*s->block32))) ||
393  !(s->dpcm_macroblock = av_mallocz(sizeof(*s->dpcm_macroblock))))
394  return AVERROR(ENOMEM);
395  s->dpcm_direction = 0;
396 
397  if (s->avctx->codec_tag == AV_RL32("VCR2")) {
398  // exchange uv
399  FFSWAP(void *, s->pblocks[4], s->pblocks[5]);
400  }
401 
402  if (s->out_format == FMT_H263) {
403  /* ac values */
404  if (!FF_ALLOCZ_TYPED_ARRAY(s->ac_val_base, yc_size))
405  return AVERROR(ENOMEM);
406  s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
407  s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
408  s->ac_val[2] = s->ac_val[1] + c_size;
409  }
410 
411  return 0;
412 }
413 
415 {
416  if (!s)
417  return;
418 
420  av_freep(&s->me.scratchpad);
421  s->me.temp =
422  s->sc.rd_scratchpad =
423  s->sc.b_scratchpad =
424  s->sc.obmc_scratchpad = NULL;
425 
426  av_freep(&s->dct_error_sum);
427  av_freep(&s->me.map);
428  av_freep(&s->me.score_map);
429  av_freep(&s->blocks);
430  av_freep(&s->block32);
432  av_freep(&s->ac_val_base);
433  s->block = NULL;
434 }
435 
437 {
438 #define COPY(a) bak->a = src->a
439  COPY(sc.edge_emu_buffer);
440  COPY(me.scratchpad);
441  COPY(me.temp);
442  COPY(sc.rd_scratchpad);
443  COPY(sc.b_scratchpad);
444  COPY(sc.obmc_scratchpad);
445  COPY(me.map);
446  COPY(me.score_map);
447  COPY(blocks);
448  COPY(block);
449  COPY(block32);
450  COPY(dpcm_macroblock);
451  COPY(dpcm_direction);
452  COPY(start_mb_y);
453  COPY(end_mb_y);
454  COPY(me.map_generation);
455  COPY(pb);
456  COPY(dct_error_sum);
457  COPY(dct_count[0]);
458  COPY(dct_count[1]);
459  COPY(ac_val_base);
460  COPY(ac_val[0]);
461  COPY(ac_val[1]);
462  COPY(ac_val[2]);
463 #undef COPY
464 }
465 
467 {
468  MpegEncContext bak;
469  int i, ret;
470  // FIXME copy only needed parts
471  backup_duplicate_context(&bak, dst);
472  memcpy(dst, src, sizeof(MpegEncContext));
473  backup_duplicate_context(dst, &bak);
474  for (i = 0; i < 12; i++) {
475  dst->pblocks[i] = &dst->block[i];
476  }
477  if (dst->avctx->codec_tag == AV_RL32("VCR2")) {
478  // exchange uv
479  FFSWAP(void *, dst->pblocks[4], dst->pblocks[5]);
480  }
481  if (!dst->sc.edge_emu_buffer &&
482  (ret = ff_mpeg_framesize_alloc(dst->avctx, &dst->me,
483  &dst->sc, dst->linesize)) < 0) {
484  av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
485  "scratch buffers.\n");
486  return ret;
487  }
488  return 0;
489 }
490 
492  const AVCodecContext *src)
493 {
494  int i, ret;
495  MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
496 
497  if (dst == src)
498  return 0;
499 
500  av_assert0(s != s1);
501 
502  // FIXME can parameters change on I-frames?
503  // in that case dst may need a reinit
504  if (!s->context_initialized) {
505  int err;
506  memcpy(s, s1, sizeof(MpegEncContext));
507 
508  s->avctx = dst;
509  s->bitstream_buffer = NULL;
511 
512  if (s1->context_initialized){
513 // s->picture_range_start += MAX_PICTURE_COUNT;
514 // s->picture_range_end += MAX_PICTURE_COUNT;
515  ff_mpv_idct_init(s);
516  if((err = ff_mpv_common_init(s)) < 0){
517  memset(s, 0, sizeof(MpegEncContext));
518  s->avctx = dst;
519  return err;
520  }
521  }
522  }
523 
524  if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
525  s->context_reinit = 0;
526  s->height = s1->height;
527  s->width = s1->width;
528  if ((ret = ff_mpv_common_frame_size_change(s)) < 0)
529  return ret;
530  }
531 
532  s->avctx->coded_height = s1->avctx->coded_height;
533  s->avctx->coded_width = s1->avctx->coded_width;
534  s->avctx->width = s1->avctx->width;
535  s->avctx->height = s1->avctx->height;
536 
537  s->quarter_sample = s1->quarter_sample;
538 
539  s->coded_picture_number = s1->coded_picture_number;
540  s->picture_number = s1->picture_number;
541 
542  av_assert0(!s->picture || s->picture != s1->picture);
543  if(s->picture)
544  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
545  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
546  if (s1->picture && s1->picture[i].f->buf[0] &&
547  (ret = ff_mpeg_ref_picture(s->avctx, &s->picture[i], &s1->picture[i])) < 0)
548  return ret;
549  }
550 
551 #define UPDATE_PICTURE(pic)\
552 do {\
553  ff_mpeg_unref_picture(s->avctx, &s->pic);\
554  if (s1->pic.f && s1->pic.f->buf[0])\
555  ret = ff_mpeg_ref_picture(s->avctx, &s->pic, &s1->pic);\
556  else\
557  ret = ff_update_picture_tables(&s->pic, &s1->pic);\
558  if (ret < 0)\
559  return ret;\
560 } while (0)
561 
562  UPDATE_PICTURE(current_picture);
564  UPDATE_PICTURE(next_picture);
565 
566 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
567  ((pic && pic >= old_ctx->picture && \
568  pic < old_ctx->picture + MAX_PICTURE_COUNT) ? \
569  &new_ctx->picture[pic - old_ctx->picture] : NULL)
570 
571  s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
572  s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
573  s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
574 
575  // Error/bug resilience
576  s->next_p_frame_damaged = s1->next_p_frame_damaged;
577  s->workaround_bugs = s1->workaround_bugs;
578  s->padding_bug_score = s1->padding_bug_score;
579 
580  // MPEG-4 timing info
581  memcpy(&s->last_time_base, &s1->last_time_base,
582  (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
583  (char *) &s1->last_time_base);
584 
585  // B-frame info
586  s->max_b_frames = s1->max_b_frames;
587  s->low_delay = s1->low_delay;
588  s->droppable = s1->droppable;
589 
590  // DivX handling (doesn't work)
591  s->divx_packed = s1->divx_packed;
592 
593  if (s1->bitstream_buffer) {
594  if (s1->bitstream_buffer_size +
598  s1->allocated_bitstream_buffer_size);
599  if (!s->bitstream_buffer) {
600  s->bitstream_buffer_size = 0;
601  return AVERROR(ENOMEM);
602  }
603  }
604  s->bitstream_buffer_size = s1->bitstream_buffer_size;
605  memcpy(s->bitstream_buffer, s1->bitstream_buffer,
606  s1->bitstream_buffer_size);
607  memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
609  }
610 
611  // linesize-dependent scratch buffer allocation
612  if (!s->sc.edge_emu_buffer)
613  if (s1->linesize) {
614  if (ff_mpeg_framesize_alloc(s->avctx, &s->me,
615  &s->sc, s1->linesize) < 0) {
616  av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
617  "scratch buffers.\n");
618  return AVERROR(ENOMEM);
619  }
620  } else {
621  av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
622  "be allocated due to unknown size.\n");
623  }
624 
625  // MPEG-2/interlacing info
626  memcpy(&s->progressive_sequence, &s1->progressive_sequence,
627  (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
628 
629  if (!s1->first_field) {
630  s->last_pict_type = s1->pict_type;
631  if (s1->current_picture_ptr)
632  s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
633  }
634 
635  return 0;
636 }
637 
638 /**
639  * Set the given MpegEncContext to common defaults
640  * (same for encoding and decoding).
641  * The changed fields will not depend upon the
642  * prior state of the MpegEncContext.
643  */
645 {
646  s->y_dc_scale_table =
649  s->progressive_frame = 1;
650  s->progressive_sequence = 1;
652 
653  s->coded_picture_number = 0;
654  s->picture_number = 0;
655 
656  s->f_code = 1;
657  s->b_code = 1;
658 
659  s->slice_context_count = 1;
660 }
661 
662 /**
663  * Set the given MpegEncContext to defaults for decoding.
664  * the changed fields will not depend upon
665  * the prior state of the MpegEncContext.
666  */
668 {
670 }
671 
673 {
674  s->avctx = avctx;
675  s->width = avctx->coded_width;
676  s->height = avctx->coded_height;
677  s->codec_id = avctx->codec->id;
678  s->workaround_bugs = avctx->workaround_bugs;
679 
680  /* convert fourcc to upper case */
681  s->codec_tag = avpriv_toupper4(avctx->codec_tag);
682 }
683 
684 /**
685  * Initialize and allocates MpegEncContext fields dependent on the resolution.
686  */
688 {
689  int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
690 
691  s->mb_width = (s->width + 15) / 16;
692  s->mb_stride = s->mb_width + 1;
693  s->b8_stride = s->mb_width * 2 + 1;
694  mb_array_size = s->mb_height * s->mb_stride;
695  mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
696 
697  /* set default edge pos, will be overridden
698  * in decode_header if needed */
699  s->h_edge_pos = s->mb_width * 16;
700  s->v_edge_pos = s->mb_height * 16;
701 
702  s->mb_num = s->mb_width * s->mb_height;
703 
704  s->block_wrap[0] =
705  s->block_wrap[1] =
706  s->block_wrap[2] =
707  s->block_wrap[3] = s->b8_stride;
708  s->block_wrap[4] =
709  s->block_wrap[5] = s->mb_stride;
710 
711  y_size = s->b8_stride * (2 * s->mb_height + 1);
712  c_size = s->mb_stride * (s->mb_height + 1);
713  yc_size = y_size + 2 * c_size;
714 
715  if (s->mb_height & 1)
716  yc_size += 2*s->b8_stride + 2*s->mb_stride;
717 
718  if (!FF_ALLOCZ_TYPED_ARRAY(s->mb_index2xy, s->mb_num + 1))
719  return AVERROR(ENOMEM);
720  for (y = 0; y < s->mb_height; y++)
721  for (x = 0; x < s->mb_width; x++)
722  s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
723 
724  s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
725 
726  if (s->encoding) {
727  /* Allocate MV tables */
728  if (!FF_ALLOCZ_TYPED_ARRAY(s->p_mv_table_base, mv_table_size) ||
729  !FF_ALLOCZ_TYPED_ARRAY(s->b_forw_mv_table_base, mv_table_size) ||
730  !FF_ALLOCZ_TYPED_ARRAY(s->b_back_mv_table_base, mv_table_size) ||
733  !FF_ALLOCZ_TYPED_ARRAY(s->b_direct_mv_table_base, mv_table_size))
734  return AVERROR(ENOMEM);
735  s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
741 
742  /* Allocate MB type table */
743  if (!FF_ALLOCZ_TYPED_ARRAY(s->mb_type, mb_array_size) ||
744  !FF_ALLOCZ_TYPED_ARRAY(s->lambda_table, mb_array_size) ||
745  !FF_ALLOC_TYPED_ARRAY (s->cplx_tab, mb_array_size) ||
746  !FF_ALLOC_TYPED_ARRAY (s->bits_tab, mb_array_size))
747  return AVERROR(ENOMEM);
748  }
749 
750  if (s->codec_id == AV_CODEC_ID_MPEG4 ||
752  /* interlaced direct mode decoding tables */
753  for (i = 0; i < 2; i++) {
754  int j, k;
755  for (j = 0; j < 2; j++) {
756  for (k = 0; k < 2; k++) {
757  if (!FF_ALLOCZ_TYPED_ARRAY(s->b_field_mv_table_base[i][j][k], mv_table_size))
758  return AVERROR(ENOMEM);
759  s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
760  s->mb_stride + 1;
761  }
762  if (!FF_ALLOCZ_TYPED_ARRAY(s->b_field_select_table [i][j], mv_table_size * 2) ||
763  !FF_ALLOCZ_TYPED_ARRAY(s->p_field_mv_table_base[i][j], mv_table_size))
764  return AVERROR(ENOMEM);
765  s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
766  }
767  if (!FF_ALLOCZ_TYPED_ARRAY(s->p_field_select_table[i], mv_table_size * 2))
768  return AVERROR(ENOMEM);
769  }
770  }
771  if (s->out_format == FMT_H263) {
772  /* cbp values, cbp, ac_pred, pred_dir */
773  if (!FF_ALLOCZ_TYPED_ARRAY(s->coded_block_base, y_size + (s->mb_height&1)*2*s->b8_stride) ||
774  !FF_ALLOCZ_TYPED_ARRAY(s->cbp_table, mb_array_size) ||
775  !FF_ALLOCZ_TYPED_ARRAY(s->pred_dir_table, mb_array_size))
776  return AVERROR(ENOMEM);
777  s->coded_block = s->coded_block_base + s->b8_stride + 1;
778  }
779 
780  if (s->h263_pred || s->h263_plus || !s->encoding) {
781  /* dc values */
782  // MN: we need these for error resilience of intra-frames
783  if (!FF_ALLOCZ_TYPED_ARRAY(s->dc_val_base, yc_size))
784  return AVERROR(ENOMEM);
785  s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
786  s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
787  s->dc_val[2] = s->dc_val[1] + c_size;
788  for (i = 0; i < yc_size; i++)
789  s->dc_val_base[i] = 1024;
790  }
791 
792  /* which mb is an intra block, init macroblock skip table */
793  if (!FF_ALLOCZ_TYPED_ARRAY(s->mbintra_table, mb_array_size) ||
794  // Note the + 1 is for a quicker MPEG-4 slice_end detection
795  !FF_ALLOCZ_TYPED_ARRAY(s->mbskip_table, mb_array_size + 2))
796  return AVERROR(ENOMEM);
797  memset(s->mbintra_table, 1, mb_array_size);
798 
799  return ff_mpeg_er_init(s);
800 }
801 
803 {
804  int i, j, k;
805 
806  memset(&s->next_picture, 0, sizeof(s->next_picture));
807  memset(&s->last_picture, 0, sizeof(s->last_picture));
808  memset(&s->current_picture, 0, sizeof(s->current_picture));
809  memset(&s->new_picture, 0, sizeof(s->new_picture));
810 
811  memset(s->thread_context, 0, sizeof(s->thread_context));
812 
813  s->me.map = NULL;
814  s->me.score_map = NULL;
815  s->dct_error_sum = NULL;
816  s->block = NULL;
817  s->blocks = NULL;
818  s->block32 = NULL;
819  memset(s->pblocks, 0, sizeof(s->pblocks));
820  s->dpcm_direction = 0;
821  s->dpcm_macroblock = NULL;
822  s->ac_val_base = NULL;
823  s->ac_val[0] =
824  s->ac_val[1] =
825  s->ac_val[2] =NULL;
826  s->sc.edge_emu_buffer = NULL;
827  s->me.scratchpad = NULL;
828  s->me.temp =
829  s->sc.rd_scratchpad =
830  s->sc.b_scratchpad =
831  s->sc.obmc_scratchpad = NULL;
832 
833 
834  s->bitstream_buffer = NULL;
836  s->picture = NULL;
837  s->mb_type = NULL;
838  s->p_mv_table_base = NULL;
844  s->p_mv_table = NULL;
845  s->b_forw_mv_table = NULL;
846  s->b_back_mv_table = NULL;
849  s->b_direct_mv_table = NULL;
850  for (i = 0; i < 2; i++) {
851  for (j = 0; j < 2; j++) {
852  for (k = 0; k < 2; k++) {
853  s->b_field_mv_table_base[i][j][k] = NULL;
854  s->b_field_mv_table[i][j][k] = NULL;
855  }
856  s->b_field_select_table[i][j] = NULL;
857  s->p_field_mv_table_base[i][j] = NULL;
858  s->p_field_mv_table[i][j] = NULL;
859  }
861  }
862 
863  s->dc_val_base = NULL;
864  s->coded_block_base = NULL;
865  s->mbintra_table = NULL;
866  s->cbp_table = NULL;
867  s->pred_dir_table = NULL;
868 
869  s->mbskip_table = NULL;
870 
872  s->er.er_temp_buffer = NULL;
873  s->mb_index2xy = NULL;
874  s->lambda_table = NULL;
875 
876  s->cplx_tab = NULL;
877  s->bits_tab = NULL;
878 }
879 
880 /**
881  * init common structure for both encoder and decoder.
882  * this assumes that some variables like width/height are already set
883  */
885 {
886  int i, ret;
887  int nb_slices = (HAVE_THREADS &&
889  s->avctx->thread_count : 1;
890 
891  clear_context(s);
892 
893  if (s->encoding && s->avctx->slices)
894  nb_slices = s->avctx->slices;
895 
897  s->mb_height = (s->height + 31) / 32 * 2;
898  else
899  s->mb_height = (s->height + 15) / 16;
900 
901  if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
903  "decoding to AV_PIX_FMT_NONE is not supported.\n");
904  return AVERROR(EINVAL);
905  }
906 
907  if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
908  int max_slices;
909  if (s->mb_height)
910  max_slices = FFMIN(MAX_THREADS, s->mb_height);
911  else
912  max_slices = MAX_THREADS;
913  av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
914  " reducing to %d\n", nb_slices, max_slices);
915  nb_slices = max_slices;
916  }
917 
918  if ((s->width || s->height) &&
919  av_image_check_size(s->width, s->height, 0, s->avctx))
920  return AVERROR(EINVAL);
921 
922  dct_init(s);
923 
924  /* set chroma shifts */
926  &s->chroma_x_shift,
927  &s->chroma_y_shift);
928  if (ret)
929  return ret;
930 
932  return AVERROR(ENOMEM);
933  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
934  s->picture[i].f = av_frame_alloc();
935  if (!s->picture[i].f)
936  return AVERROR(ENOMEM);
937  }
938 
939  if (!(s->next_picture.f = av_frame_alloc()) ||
940  !(s->last_picture.f = av_frame_alloc()) ||
941  !(s->current_picture.f = av_frame_alloc()) ||
942  !(s->new_picture.f = av_frame_alloc()))
943  return AVERROR(ENOMEM);
944 
945  if ((ret = init_context_frame(s)))
946  return AVERROR(ENOMEM);
947 
948  s->parse_context.state = -1;
949 
950  s->context_initialized = 1;
951  memset(s->thread_context, 0, sizeof(s->thread_context));
952  s->thread_context[0] = s;
953 
954 // if (s->width && s->height) {
955  if (nb_slices > 1) {
956  for (i = 0; i < nb_slices; i++) {
957  if (i) {
958  s->thread_context[i] = av_memdup(s, sizeof(MpegEncContext));
959  if (!s->thread_context[i])
960  return AVERROR(ENOMEM);
961  }
962  if ((ret = init_duplicate_context(s->thread_context[i])) < 0)
963  return ret;
965  (s->mb_height * (i) + nb_slices / 2) / nb_slices;
966  s->thread_context[i]->end_mb_y =
967  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
968  }
969  } else {
970  if ((ret = init_duplicate_context(s)) < 0)
971  return ret;
972  s->start_mb_y = 0;
973  s->end_mb_y = s->mb_height;
974  }
975  s->slice_context_count = nb_slices;
976 // }
977 
978  return 0;
979 }
980 
981 /**
982  * Frees and resets MpegEncContext fields depending on the resolution.
983  * Is used during resolution changes to avoid a full reinitialization of the
984  * codec.
985  */
987 {
988  int i, j, k;
989 
990  av_freep(&s->mb_type);
997  s->p_mv_table = NULL;
998  s->b_forw_mv_table = NULL;
999  s->b_back_mv_table = NULL;
1002  s->b_direct_mv_table = NULL;
1003  for (i = 0; i < 2; i++) {
1004  for (j = 0; j < 2; j++) {
1005  for (k = 0; k < 2; k++) {
1006  av_freep(&s->b_field_mv_table_base[i][j][k]);
1007  s->b_field_mv_table[i][j][k] = NULL;
1008  }
1009  av_freep(&s->b_field_select_table[i][j]);
1010  av_freep(&s->p_field_mv_table_base[i][j]);
1011  s->p_field_mv_table[i][j] = NULL;
1012  }
1014  }
1015 
1016  av_freep(&s->dc_val_base);
1018  av_freep(&s->mbintra_table);
1019  av_freep(&s->cbp_table);
1020  av_freep(&s->pred_dir_table);
1021 
1022  av_freep(&s->mbskip_table);
1023 
1025  av_freep(&s->er.er_temp_buffer);
1026  av_freep(&s->mb_index2xy);
1027  av_freep(&s->lambda_table);
1028 
1029  av_freep(&s->cplx_tab);
1030  av_freep(&s->bits_tab);
1031 
1032  s->linesize = s->uvlinesize = 0;
1033 }
1034 
1036 {
1037  int i, err = 0;
1038 
1039  if (!s->context_initialized)
1040  return AVERROR(EINVAL);
1041 
1042  if (s->slice_context_count > 1) {
1043  for (i = 0; i < s->slice_context_count; i++) {
1045  }
1046  for (i = 1; i < s->slice_context_count; i++) {
1047  av_freep(&s->thread_context[i]);
1048  }
1049  } else
1051 
1052  free_context_frame(s);
1053 
1054  if (s->picture)
1055  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1056  s->picture[i].needs_realloc = 1;
1057  }
1058 
1059  s->last_picture_ptr =
1060  s->next_picture_ptr =
1062 
1063  // init
1065  s->mb_height = (s->height + 31) / 32 * 2;
1066  else
1067  s->mb_height = (s->height + 15) / 16;
1068 
1069  if ((s->width || s->height) &&
1070  (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
1071  return err;
1072 
1073  if ((err = init_context_frame(s)))
1074  return err;
1075 
1076  memset(s->thread_context, 0, sizeof(s->thread_context));
1077  s->thread_context[0] = s;
1078 
1079  if (s->width && s->height) {
1080  int nb_slices = s->slice_context_count;
1081  if (nb_slices > 1) {
1082  for (i = 0; i < nb_slices; i++) {
1083  if (i) {
1084  s->thread_context[i] = av_memdup(s, sizeof(MpegEncContext));
1085  if (!s->thread_context[i]) {
1086  return AVERROR(ENOMEM);
1087  }
1088  }
1089  if ((err = init_duplicate_context(s->thread_context[i])) < 0)
1090  return err;
1091  s->thread_context[i]->start_mb_y =
1092  (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1093  s->thread_context[i]->end_mb_y =
1094  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1095  }
1096  } else {
1097  err = init_duplicate_context(s);
1098  if (err < 0)
1099  return err;
1100  s->start_mb_y = 0;
1101  s->end_mb_y = s->mb_height;
1102  }
1103  s->slice_context_count = nb_slices;
1104  }
1105 
1106  return 0;
1107 }
1108 
1109 /* init common structure for both encoder and decoder */
1111 {
1112  int i;
1113 
1114  if (!s)
1115  return;
1116 
1117  if (s->slice_context_count > 1) {
1118  for (i = 0; i < s->slice_context_count; i++) {
1120  }
1121  for (i = 1; i < s->slice_context_count; i++) {
1122  av_freep(&s->thread_context[i]);
1123  }
1124  s->slice_context_count = 1;
1125  } else free_duplicate_context(s);
1126 
1128  s->parse_context.buffer_size = 0;
1129 
1132 
1133  if (!s->avctx)
1134  return;
1135 
1136  if (s->picture) {
1137  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1139  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1140  av_frame_free(&s->picture[i].f);
1141  }
1142  }
1143  av_freep(&s->picture);
1156 
1157  free_context_frame(s);
1158 
1159  s->context_initialized = 0;
1160  s->last_picture_ptr =
1161  s->next_picture_ptr =
1163  s->linesize = s->uvlinesize = 0;
1164 }
1165 
1166 
1167 static void gray_frame(AVFrame *frame)
1168 {
1169  int i, h_chroma_shift, v_chroma_shift;
1170 
1171  av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
1172 
1173  for(i=0; i<frame->height; i++)
1174  memset(frame->data[0] + frame->linesize[0]*i, 0x80, frame->width);
1175  for(i=0; i<AV_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
1176  memset(frame->data[1] + frame->linesize[1]*i,
1177  0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
1178  memset(frame->data[2] + frame->linesize[2]*i,
1179  0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
1180  }
1181 }
1182 
1183 /**
1184  * generic function called after decoding
1185  * the header and before a frame is decoded.
1186  */
1188 {
1189  int i, ret;
1190  Picture *pic;
1191  s->mb_skipped = 0;
1192 
1193  if (!ff_thread_can_start_frame(avctx)) {
1194  av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1195  return -1;
1196  }
1197 
1198  /* mark & release old frames */
1199  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1201  s->last_picture_ptr->f->buf[0]) {
1203  }
1204 
1205  /* release forgotten pictures */
1206  /* if (MPEG-124 / H.263) */
1207  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1208  if (&s->picture[i] != s->last_picture_ptr &&
1209  &s->picture[i] != s->next_picture_ptr &&
1210  s->picture[i].reference && !s->picture[i].needs_realloc) {
1211  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1212  }
1213  }
1214 
1218 
1219  /* release non reference frames */
1220  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1221  if (!s->picture[i].reference)
1222  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1223  }
1224 
1225  if (s->current_picture_ptr && !s->current_picture_ptr->f->buf[0]) {
1226  // we already have an unused image
1227  // (maybe it was set before reading the header)
1228  pic = s->current_picture_ptr;
1229  } else {
1230  i = ff_find_unused_picture(s->avctx, s->picture, 0);
1231  if (i < 0) {
1232  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1233  return i;
1234  }
1235  pic = &s->picture[i];
1236  }
1237 
1238  pic->reference = 0;
1239  if (!s->droppable) {
1240  if (s->pict_type != AV_PICTURE_TYPE_B)
1241  pic->reference = 3;
1242  }
1243 
1245 
1246  if (alloc_picture(s, pic) < 0)
1247  return -1;
1248 
1249  s->current_picture_ptr = pic;
1250  // FIXME use only the vars from current_pic
1252  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1254  if (s->picture_structure != PICT_FRAME)
1257  }
1261 
1263  // if (s->avctx->flags && AV_CODEC_FLAG_QSCALE)
1264  // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1266 
1267  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1268  s->current_picture_ptr)) < 0)
1269  return ret;
1270 
1271  if (s->pict_type != AV_PICTURE_TYPE_B) {
1273  if (!s->droppable)
1275  }
1276  ff_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1278  s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
1279  s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
1281  s->pict_type, s->droppable);
1282 
1283  if ((!s->last_picture_ptr || !s->last_picture_ptr->f->buf[0]) &&
1284  (s->pict_type != AV_PICTURE_TYPE_I)) {
1285  int h_chroma_shift, v_chroma_shift;
1287  &h_chroma_shift, &v_chroma_shift);
1289  av_log(avctx, AV_LOG_DEBUG,
1290  "allocating dummy last picture for B frame\n");
1291  else if (s->pict_type != AV_PICTURE_TYPE_I)
1292  av_log(avctx, AV_LOG_ERROR,
1293  "warning: first frame is no keyframe\n");
1294 
1295  /* Allocate a dummy frame */
1296  i = ff_find_unused_picture(s->avctx, s->picture, 0);
1297  if (i < 0) {
1298  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1299  return i;
1300  }
1301  s->last_picture_ptr = &s->picture[i];
1302 
1303  s->last_picture_ptr->reference = 3;
1304  s->last_picture_ptr->f->key_frame = 0;
1306 
1307  if (alloc_picture(s, s->last_picture_ptr) < 0) {
1308  s->last_picture_ptr = NULL;
1309  return -1;
1310  }
1311 
1312  if (!avctx->hwaccel) {
1313  for(i=0; i<avctx->height; i++)
1314  memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
1315  0x80, avctx->width);
1316  if (s->last_picture_ptr->f->data[2]) {
1317  for(i=0; i<AV_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
1318  memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i,
1319  0x80, AV_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1320  memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i,
1321  0x80, AV_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1322  }
1323  }
1324 
1326  for(i=0; i<avctx->height; i++)
1327  memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 16, avctx->width);
1328  }
1329  }
1330 
1331  ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1332  ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1333  }
1334  if ((!s->next_picture_ptr || !s->next_picture_ptr->f->buf[0]) &&
1335  s->pict_type == AV_PICTURE_TYPE_B) {
1336  /* Allocate a dummy frame */
1337  i = ff_find_unused_picture(s->avctx, s->picture, 0);
1338  if (i < 0) {
1339  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1340  return i;
1341  }
1342  s->next_picture_ptr = &s->picture[i];
1343 
1344  s->next_picture_ptr->reference = 3;
1345  s->next_picture_ptr->f->key_frame = 0;
1347 
1348  if (alloc_picture(s, s->next_picture_ptr) < 0) {
1349  s->next_picture_ptr = NULL;
1350  return -1;
1351  }
1352  ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1353  ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1354  }
1355 
1356 #if 0 // BUFREF-FIXME
1357  memset(s->last_picture.f->data, 0, sizeof(s->last_picture.f->data));
1358  memset(s->next_picture.f->data, 0, sizeof(s->next_picture.f->data));
1359 #endif
1360  if (s->last_picture_ptr) {
1361  if (s->last_picture_ptr->f->buf[0] &&
1362  (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1363  s->last_picture_ptr)) < 0)
1364  return ret;
1365  }
1366  if (s->next_picture_ptr) {
1367  if (s->next_picture_ptr->f->buf[0] &&
1368  (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1369  s->next_picture_ptr)) < 0)
1370  return ret;
1371  }
1372 
1374  s->last_picture_ptr->f->buf[0]));
1375 
1376  if (s->picture_structure!= PICT_FRAME) {
1377  int i;
1378  for (i = 0; i < 4; i++) {
1380  s->current_picture.f->data[i] +=
1381  s->current_picture.f->linesize[i];
1382  }
1383  s->current_picture.f->linesize[i] *= 2;
1384  s->last_picture.f->linesize[i] *= 2;
1385  s->next_picture.f->linesize[i] *= 2;
1386  }
1387  }
1388 
1389  /* set dequantizer, we can't do it during init as
1390  * it might change for MPEG-4 and we can't do it in the header
1391  * decode as init is not called for MPEG-4 there yet */
1392  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1395  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1398  } else {
1401  }
1402 
1403  if (s->avctx->debug & FF_DEBUG_NOMC) {
1405  }
1406 
1407  return 0;
1408 }
1409 
1410 /* called after a frame has been decoded. */
1412 {
1413  emms_c();
1414 
1415  if (s->current_picture.reference)
1417 }
1418 
1420 {
1422  p->qscale_table, p->motion_val, &s->low_delay,
1423  s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
1424 }
1425 
1427 {
1429  int offset = 2*s->mb_stride + 1;
1430  if(!ref)
1431  return AVERROR(ENOMEM);
1432  av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
1433  ref->size -= offset;
1434  ref->data += offset;
1435  return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
1436 }
1437 
1439  uint8_t *dest, uint8_t *src,
1440  int field_based, int field_select,
1441  int src_x, int src_y,
1442  int width, int height, ptrdiff_t stride,
1443  int h_edge_pos, int v_edge_pos,
1444  int w, int h, h264_chroma_mc_func *pix_op,
1445  int motion_x, int motion_y)
1446 {
1447  const int lowres = s->avctx->lowres;
1448  const int op_index = FFMIN(lowres, 3);
1449  const int s_mask = (2 << lowres) - 1;
1450  int emu = 0;
1451  int sx, sy;
1452 
1453  if (s->quarter_sample) {
1454  motion_x /= 2;
1455  motion_y /= 2;
1456  }
1457 
1458  sx = motion_x & s_mask;
1459  sy = motion_y & s_mask;
1460  src_x += motion_x >> lowres + 1;
1461  src_y += motion_y >> lowres + 1;
1462 
1463  src += src_y * stride + src_x;
1464 
1465  if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
1466  (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
1468  s->linesize, s->linesize,
1469  w + 1, (h + 1) << field_based,
1470  src_x, src_y << field_based,
1471  h_edge_pos, v_edge_pos);
1472  src = s->sc.edge_emu_buffer;
1473  emu = 1;
1474  }
1475 
1476  sx = (sx << 2) >> lowres;
1477  sy = (sy << 2) >> lowres;
1478  if (field_select)
1479  src += s->linesize;
1480  pix_op[op_index](dest, src, stride, h, sx, sy);
1481  return emu;
1482 }
1483 
1484 /* apply one mpeg motion vector to the three components */
1486  uint8_t *dest_y,
1487  uint8_t *dest_cb,
1488  uint8_t *dest_cr,
1489  int field_based,
1490  int bottom_field,
1491  int field_select,
1492  uint8_t **ref_picture,
1493  h264_chroma_mc_func *pix_op,
1494  int motion_x, int motion_y,
1495  int h, int mb_y)
1496 {
1497  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1498  int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
1499  ptrdiff_t uvlinesize, linesize;
1500  const int lowres = s->avctx->lowres;
1501  const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
1502  const int block_s = 8>>lowres;
1503  const int s_mask = (2 << lowres) - 1;
1504  const int h_edge_pos = s->h_edge_pos >> lowres;
1505  const int v_edge_pos = s->v_edge_pos >> lowres;
1506  linesize = s->current_picture.f->linesize[0] << field_based;
1507  uvlinesize = s->current_picture.f->linesize[1] << field_based;
1508 
1509  // FIXME obviously not perfect but qpel will not work in lowres anyway
1510  if (s->quarter_sample) {
1511  motion_x /= 2;
1512  motion_y /= 2;
1513  }
1514 
1515  if(field_based){
1516  motion_y += (bottom_field - field_select)*((1 << lowres)-1);
1517  }
1518 
1519  sx = motion_x & s_mask;
1520  sy = motion_y & s_mask;
1521  src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
1522  src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
1523 
1524  if (s->out_format == FMT_H263) {
1525  uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
1526  uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
1527  uvsrc_x = src_x >> 1;
1528  uvsrc_y = src_y >> 1;
1529  } else if (s->out_format == FMT_H261) {
1530  // even chroma mv's are full pel in H261
1531  mx = motion_x / 4;
1532  my = motion_y / 4;
1533  uvsx = (2 * mx) & s_mask;
1534  uvsy = (2 * my) & s_mask;
1535  uvsrc_x = s->mb_x * block_s + (mx >> lowres);
1536  uvsrc_y = mb_y * block_s + (my >> lowres);
1537  } else {
1538  if(s->chroma_y_shift){
1539  mx = motion_x / 2;
1540  my = motion_y / 2;
1541  uvsx = mx & s_mask;
1542  uvsy = my & s_mask;
1543  uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
1544  uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
1545  } else {
1546  if(s->chroma_x_shift){
1547  //Chroma422
1548  mx = motion_x / 2;
1549  uvsx = mx & s_mask;
1550  uvsy = motion_y & s_mask;
1551  uvsrc_y = src_y;
1552  uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1553  } else {
1554  //Chroma444
1555  uvsx = motion_x & s_mask;
1556  uvsy = motion_y & s_mask;
1557  uvsrc_x = src_x;
1558  uvsrc_y = src_y;
1559  }
1560  }
1561  }
1562 
1563  ptr_y = ref_picture[0] + src_y * linesize + src_x;
1564  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1565  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1566 
1567  if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
1568  (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
1569  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
1570  linesize >> field_based, linesize >> field_based,
1571  17, 17 + field_based,
1572  src_x, src_y << field_based, h_edge_pos,
1573  v_edge_pos);
1574  ptr_y = s->sc.edge_emu_buffer;
1575  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1576  uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
1577  uint8_t *vbuf =ubuf + 10 * s->uvlinesize;
1578  if (s->workaround_bugs & FF_BUG_IEDGE)
1579  vbuf -= s->uvlinesize;
1580  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
1581  uvlinesize >> field_based, uvlinesize >> field_based,
1582  9, 9 + field_based,
1583  uvsrc_x, uvsrc_y << field_based,
1584  h_edge_pos >> 1, v_edge_pos >> 1);
1585  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
1586  uvlinesize >> field_based,uvlinesize >> field_based,
1587  9, 9 + field_based,
1588  uvsrc_x, uvsrc_y << field_based,
1589  h_edge_pos >> 1, v_edge_pos >> 1);
1590  ptr_cb = ubuf;
1591  ptr_cr = vbuf;
1592  }
1593  }
1594 
1595  // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
1596  if (bottom_field) {
1597  dest_y += s->linesize;
1598  dest_cb += s->uvlinesize;
1599  dest_cr += s->uvlinesize;
1600  }
1601 
1602  if (field_select) {
1603  ptr_y += s->linesize;
1604  ptr_cb += s->uvlinesize;
1605  ptr_cr += s->uvlinesize;
1606  }
1607 
1608  sx = (sx << 2) >> lowres;
1609  sy = (sy << 2) >> lowres;
1610  pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
1611 
1612  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1613  int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
1614  uvsx = (uvsx << 2) >> lowres;
1615  uvsy = (uvsy << 2) >> lowres;
1616  if (hc) {
1617  pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
1618  pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
1619  }
1620  }
1621  // FIXME h261 lowres loop filter
1622 }
1623 
1625  uint8_t *dest_cb, uint8_t *dest_cr,
1626  uint8_t **ref_picture,
1627  h264_chroma_mc_func * pix_op,
1628  int mx, int my)
1629 {
1630  const int lowres = s->avctx->lowres;
1631  const int op_index = FFMIN(lowres, 3);
1632  const int block_s = 8 >> lowres;
1633  const int s_mask = (2 << lowres) - 1;
1634  const int h_edge_pos = s->h_edge_pos >> lowres + 1;
1635  const int v_edge_pos = s->v_edge_pos >> lowres + 1;
1636  int emu = 0, src_x, src_y, sx, sy;
1637  ptrdiff_t offset;
1638  uint8_t *ptr;
1639 
1640  if (s->quarter_sample) {
1641  mx /= 2;
1642  my /= 2;
1643  }
1644 
1645  /* In case of 8X8, we construct a single chroma motion vector
1646  with a special rounding */
1647  mx = ff_h263_round_chroma(mx);
1648  my = ff_h263_round_chroma(my);
1649 
1650  sx = mx & s_mask;
1651  sy = my & s_mask;
1652  src_x = s->mb_x * block_s + (mx >> lowres + 1);
1653  src_y = s->mb_y * block_s + (my >> lowres + 1);
1654 
1655  offset = src_y * s->uvlinesize + src_x;
1656  ptr = ref_picture[1] + offset;
1657  if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
1658  (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
1660  s->uvlinesize, s->uvlinesize,
1661  9, 9,
1662  src_x, src_y, h_edge_pos, v_edge_pos);
1663  ptr = s->sc.edge_emu_buffer;
1664  emu = 1;
1665  }
1666  sx = (sx << 2) >> lowres;
1667  sy = (sy << 2) >> lowres;
1668  pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
1669 
1670  ptr = ref_picture[2] + offset;
1671  if (emu) {
1673  s->uvlinesize, s->uvlinesize,
1674  9, 9,
1675  src_x, src_y, h_edge_pos, v_edge_pos);
1676  ptr = s->sc.edge_emu_buffer;
1677  }
1678  pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
1679 }
1680 
1681 /**
1682  * motion compensation of a single macroblock
1683  * @param s context
1684  * @param dest_y luma destination pointer
1685  * @param dest_cb chroma cb/u destination pointer
1686  * @param dest_cr chroma cr/v destination pointer
1687  * @param dir direction (0->forward, 1->backward)
1688  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1689  * @param pix_op halfpel motion compensation function (average or put normally)
1690  * the motion vectors are taken from s->mv and the MV type from s->mv_type
1691  */
1692 static inline void MPV_motion_lowres(MpegEncContext *s,
1693  uint8_t *dest_y, uint8_t *dest_cb,
1694  uint8_t *dest_cr,
1695  int dir, uint8_t **ref_picture,
1696  h264_chroma_mc_func *pix_op)
1697 {
1698  int mx, my;
1699  int mb_x, mb_y, i;
1700  const int lowres = s->avctx->lowres;
1701  const int block_s = 8 >>lowres;
1702 
1703  mb_x = s->mb_x;
1704  mb_y = s->mb_y;
1705 
1706  switch (s->mv_type) {
1707  case MV_TYPE_16X16:
1708  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1709  0, 0, 0,
1710  ref_picture, pix_op,
1711  s->mv[dir][0][0], s->mv[dir][0][1],
1712  2 * block_s, mb_y);
1713  break;
1714  case MV_TYPE_8X8:
1715  mx = 0;
1716  my = 0;
1717  for (i = 0; i < 4; i++) {
1718  hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
1719  s->linesize) * block_s,
1720  ref_picture[0], 0, 0,
1721  (2 * mb_x + (i & 1)) * block_s,
1722  (2 * mb_y + (i >> 1)) * block_s,
1723  s->width, s->height, s->linesize,
1724  s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1725  block_s, block_s, pix_op,
1726  s->mv[dir][i][0], s->mv[dir][i][1]);
1727 
1728  mx += s->mv[dir][i][0];
1729  my += s->mv[dir][i][1];
1730  }
1731 
1732  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
1733  chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
1734  pix_op, mx, my);
1735  break;
1736  case MV_TYPE_FIELD:
1737  if (s->picture_structure == PICT_FRAME) {
1738  /* top field */
1739  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1740  1, 0, s->field_select[dir][0],
1741  ref_picture, pix_op,
1742  s->mv[dir][0][0], s->mv[dir][0][1],
1743  block_s, mb_y);
1744  /* bottom field */
1745  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1746  1, 1, s->field_select[dir][1],
1747  ref_picture, pix_op,
1748  s->mv[dir][1][0], s->mv[dir][1][1],
1749  block_s, mb_y);
1750  } else {
1751  if (s->picture_structure != s->field_select[dir][0] + 1 &&
1752  s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
1753  ref_picture = s->current_picture_ptr->f->data;
1754 
1755  }
1756  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1757  0, 0, s->field_select[dir][0],
1758  ref_picture, pix_op,
1759  s->mv[dir][0][0],
1760  s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
1761  }
1762  break;
1763  case MV_TYPE_16X8:
1764  for (i = 0; i < 2; i++) {
1765  uint8_t **ref2picture;
1766 
1767  if (s->picture_structure == s->field_select[dir][i] + 1 ||
1768  s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
1769  ref2picture = ref_picture;
1770  } else {
1771  ref2picture = s->current_picture_ptr->f->data;
1772  }
1773 
1774  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1775  0, 0, s->field_select[dir][i],
1776  ref2picture, pix_op,
1777  s->mv[dir][i][0], s->mv[dir][i][1] +
1778  2 * block_s * i, block_s, mb_y >> 1);
1779 
1780  dest_y += 2 * block_s * s->linesize;
1781  dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
1782  dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
1783  }
1784  break;
1785  case MV_TYPE_DMV:
1786  if (s->picture_structure == PICT_FRAME) {
1787  for (i = 0; i < 2; i++) {
1788  int j;
1789  for (j = 0; j < 2; j++) {
1790  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1791  1, j, j ^ i,
1792  ref_picture, pix_op,
1793  s->mv[dir][2 * i + j][0],
1794  s->mv[dir][2 * i + j][1],
1795  block_s, mb_y);
1796  }
1798  }
1799  } else {
1800  for (i = 0; i < 2; i++) {
1801  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1802  0, 0, s->picture_structure != i + 1,
1803  ref_picture, pix_op,
1804  s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
1805  2 * block_s, mb_y >> 1);
1806 
1807  // after put we make avg of the same block
1809 
1810  // opposite parity is always in the same
1811  // frame if this is second field
1812  if (!s->first_field) {
1813  ref_picture = s->current_picture_ptr->f->data;
1814  }
1815  }
1816  }
1817  break;
1818  default:
1819  av_assert2(0);
1820  }
1821 }
1822 
1823 /**
1824  * find the lowest MB row referenced in the MVs
1825  */
1827 {
1828  int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1829  int my, off, i, mvs;
1830 
1831  if (s->picture_structure != PICT_FRAME || s->mcsel)
1832  goto unhandled;
1833 
1834  switch (s->mv_type) {
1835  case MV_TYPE_16X16:
1836  mvs = 1;
1837  break;
1838  case MV_TYPE_16X8:
1839  mvs = 2;
1840  break;
1841  case MV_TYPE_8X8:
1842  mvs = 4;
1843  break;
1844  default:
1845  goto unhandled;
1846  }
1847 
1848  for (i = 0; i < mvs; i++) {
1849  my = s->mv[dir][i][1];
1850  my_max = FFMAX(my_max, my);
1851  my_min = FFMIN(my_min, my);
1852  }
1853 
1854  off = ((FFMAX(-my_min, my_max)<<qpel_shift) + 63) >> 6;
1855 
1856  return av_clip(s->mb_y + off, 0, s->mb_height - 1);
1857 unhandled:
1858  return s->mb_height-1;
1859 }
1860 
1861 /* put block[] to dest[] */
1862 static inline void put_dct(MpegEncContext *s,
1863  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1864 {
1865  s->dct_unquantize_intra(s, block, i, qscale);
1866  s->idsp.idct_put(dest, line_size, block);
1867 }
1868 
1869 /* add block[] to dest[] */
1870 static inline void add_dct(MpegEncContext *s,
1871  int16_t *block, int i, uint8_t *dest, int line_size)
1872 {
1873  if (s->block_last_index[i] >= 0) {
1874  s->idsp.idct_add(dest, line_size, block);
1875  }
1876 }
1877 
1878 static inline void add_dequant_dct(MpegEncContext *s,
1879  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1880 {
1881  if (s->block_last_index[i] >= 0) {
1882  s->dct_unquantize_inter(s, block, i, qscale);
1883 
1884  s->idsp.idct_add(dest, line_size, block);
1885  }
1886 }
1887 
1888 /**
1889  * Clean dc, ac, coded_block for the current non-intra MB.
1890  */
1892 {
1893  int wrap = s->b8_stride;
1894  int xy = s->block_index[0];
1895 
1896  s->dc_val[0][xy ] =
1897  s->dc_val[0][xy + 1 ] =
1898  s->dc_val[0][xy + wrap] =
1899  s->dc_val[0][xy + 1 + wrap] = 1024;
1900  /* ac pred */
1901  memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1902  memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1903  if (s->msmpeg4_version>=3) {
1904  s->coded_block[xy ] =
1905  s->coded_block[xy + 1 ] =
1906  s->coded_block[xy + wrap] =
1907  s->coded_block[xy + 1 + wrap] = 0;
1908  }
1909  /* chroma */
1910  wrap = s->mb_stride;
1911  xy = s->mb_x + s->mb_y * wrap;
1912  s->dc_val[1][xy] =
1913  s->dc_val[2][xy] = 1024;
1914  /* ac pred */
1915  memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1916  memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1917 
1918  s->mbintra_table[xy]= 0;
1919 }
1920 
1921 /* generic function called after a macroblock has been parsed by the
1922  decoder or after it has been encoded by the encoder.
1923 
1924  Important variables used:
1925  s->mb_intra : true if intra macroblock
1926  s->mv_dir : motion vector direction
1927  s->mv_type : motion vector type
1928  s->mv : motion vector
1929  s->interlaced_dct : true if interlaced dct used (mpeg2)
1930  */
1931 static av_always_inline
1933  int lowres_flag, int is_mpeg12)
1934 {
1935  const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1936 
1937  if (CONFIG_XVMC &&
1938  s->avctx->hwaccel && s->avctx->hwaccel->decode_mb) {
1939  s->avctx->hwaccel->decode_mb(s);//xvmc uses pblocks
1940  return;
1941  }
1942 
1943  if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
1944  /* print DCT coefficients */
1945  int i,j;
1946  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1947  for(i=0; i<6; i++){
1948  for(j=0; j<64; j++){
1949  av_log(s->avctx, AV_LOG_DEBUG, "%5d",
1950  block[i][s->idsp.idct_permutation[j]]);
1951  }
1952  av_log(s->avctx, AV_LOG_DEBUG, "\n");
1953  }
1954  }
1955 
1956  s->current_picture.qscale_table[mb_xy] = s->qscale;
1957 
1958  /* update DC predictors for P macroblocks */
1959  if (!s->mb_intra) {
1960  if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
1961  if(s->mbintra_table[mb_xy])
1963  } else {
1964  s->last_dc[0] =
1965  s->last_dc[1] =
1966  s->last_dc[2] = 128 << s->intra_dc_precision;
1967  }
1968  }
1969  else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
1970  s->mbintra_table[mb_xy]=1;
1971 
1973  !(s->encoding && (s->intra_only || s->pict_type == AV_PICTURE_TYPE_B) &&
1974  s->avctx->mb_decision != FF_MB_DECISION_RD)) { // FIXME precalc
1975  uint8_t *dest_y, *dest_cb, *dest_cr;
1976  int dct_linesize, dct_offset;
1977  op_pixels_func (*op_pix)[4];
1978  qpel_mc_func (*op_qpix)[16];
1979  const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
1980  const int uvlinesize = s->current_picture.f->linesize[1];
1981  const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
1982  const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
1983 
1984  /* avoid copy if macroblock skipped in last frame too */
1985  /* skip only during decoding as we might trash the buffers during encoding a bit */
1986  if(!s->encoding){
1987  uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
1988 
1989  if (s->mb_skipped) {
1990  s->mb_skipped= 0;
1992  *mbskip_ptr = 1;
1993  } else if(!s->current_picture.reference) {
1994  *mbskip_ptr = 1;
1995  } else{
1996  *mbskip_ptr = 0; /* not skipped */
1997  }
1998  }
1999 
2000  dct_linesize = linesize << s->interlaced_dct;
2001  dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2002 
2003  if(readable){
2004  dest_y= s->dest[0];
2005  dest_cb= s->dest[1];
2006  dest_cr= s->dest[2];
2007  }else{
2008  dest_y = s->sc.b_scratchpad;
2009  dest_cb= s->sc.b_scratchpad+16*linesize;
2010  dest_cr= s->sc.b_scratchpad+32*linesize;
2011  }
2012 
2013  if (!s->mb_intra) {
2014  /* motion handling */
2015  /* decoding or more than one mb_type (MC was already done otherwise) */
2016  if(!s->encoding){
2017 
2018  if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2019  if (s->mv_dir & MV_DIR_FORWARD) {
2021  lowest_referenced_row(s, 0),
2022  0);
2023  }
2024  if (s->mv_dir & MV_DIR_BACKWARD) {
2026  lowest_referenced_row(s, 1),
2027  0);
2028  }
2029  }
2030 
2031  if(lowres_flag){
2033 
2034  if (s->mv_dir & MV_DIR_FORWARD) {
2035  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
2037  }
2038  if (s->mv_dir & MV_DIR_BACKWARD) {
2039  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
2040  }
2041  }else{
2042  op_qpix = s->me.qpel_put;
2043  if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2044  op_pix = s->hdsp.put_pixels_tab;
2045  }else{
2046  op_pix = s->hdsp.put_no_rnd_pixels_tab;
2047  }
2048  if (s->mv_dir & MV_DIR_FORWARD) {
2049  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
2050  op_pix = s->hdsp.avg_pixels_tab;
2051  op_qpix= s->me.qpel_avg;
2052  }
2053  if (s->mv_dir & MV_DIR_BACKWARD) {
2054  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
2055  }
2056  }
2057  }
2058 
2059  /* skip dequant / idct if we are really late ;) */
2060  if(s->avctx->skip_idct){
2063  || s->avctx->skip_idct >= AVDISCARD_ALL)
2064  goto skip_idct;
2065  }
2066 
2067  /* add dct residue */
2069  || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2070  add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2071  add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2072  add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2073  add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2074 
2075  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
2076  if (s->chroma_y_shift){
2077  add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2078  add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2079  }else{
2080  dct_linesize >>= 1;
2081  dct_offset >>=1;
2082  add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2083  add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2084  add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2085  add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2086  }
2087  }
2088  } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2089  add_dct(s, block[0], 0, dest_y , dct_linesize);
2090  add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2091  add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2092  add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2093 
2094  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
2095  if(s->chroma_y_shift){//Chroma420
2096  add_dct(s, block[4], 4, dest_cb, uvlinesize);
2097  add_dct(s, block[5], 5, dest_cr, uvlinesize);
2098  }else{
2099  //chroma422
2100  dct_linesize = uvlinesize << s->interlaced_dct;
2101  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2102 
2103  add_dct(s, block[4], 4, dest_cb, dct_linesize);
2104  add_dct(s, block[5], 5, dest_cr, dct_linesize);
2105  add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2106  add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2107  if(!s->chroma_x_shift){//Chroma444
2108  add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2109  add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2110  add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2111  add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2112  }
2113  }
2114  }//fi gray
2115  }
2116  else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2117  ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2118  }
2119  } else {
2120  /* Only MPEG-4 Simple Studio Profile is supported in > 8-bit mode.
2121  TODO: Integrate 10-bit properly into mpegvideo.c so that ER works properly */
2122  if (s->avctx->bits_per_raw_sample > 8){
2123  const int act_block_size = block_size * 2;
2124 
2125  if(s->dpcm_direction == 0) {
2126  s->idsp.idct_put(dest_y, dct_linesize, (int16_t*)(*s->block32)[0]);
2127  s->idsp.idct_put(dest_y + act_block_size, dct_linesize, (int16_t*)(*s->block32)[1]);
2128  s->idsp.idct_put(dest_y + dct_offset, dct_linesize, (int16_t*)(*s->block32)[2]);
2129  s->idsp.idct_put(dest_y + dct_offset + act_block_size, dct_linesize, (int16_t*)(*s->block32)[3]);
2130 
2131  dct_linesize = uvlinesize << s->interlaced_dct;
2132  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2133 
2134  s->idsp.idct_put(dest_cb, dct_linesize, (int16_t*)(*s->block32)[4]);
2135  s->idsp.idct_put(dest_cr, dct_linesize, (int16_t*)(*s->block32)[5]);
2136  s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, (int16_t*)(*s->block32)[6]);
2137  s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, (int16_t*)(*s->block32)[7]);
2138  if(!s->chroma_x_shift){//Chroma444
2139  s->idsp.idct_put(dest_cb + act_block_size, dct_linesize, (int16_t*)(*s->block32)[8]);
2140  s->idsp.idct_put(dest_cr + act_block_size, dct_linesize, (int16_t*)(*s->block32)[9]);
2141  s->idsp.idct_put(dest_cb + act_block_size + dct_offset, dct_linesize, (int16_t*)(*s->block32)[10]);
2142  s->idsp.idct_put(dest_cr + act_block_size + dct_offset, dct_linesize, (int16_t*)(*s->block32)[11]);
2143  }
2144  } else if(s->dpcm_direction == 1) {
2145  int i, w, h;
2146  uint16_t *dest_pcm[3] = {(uint16_t*)dest_y, (uint16_t*)dest_cb, (uint16_t*)dest_cr};
2147  int linesize[3] = {dct_linesize, uvlinesize, uvlinesize};
2148  for(i = 0; i < 3; i++) {
2149  int idx = 0;
2150  int vsub = i ? s->chroma_y_shift : 0;
2151  int hsub = i ? s->chroma_x_shift : 0;
2152  for(h = 0; h < (16 >> vsub); h++){
2153  for(w = 0; w < (16 >> hsub); w++)
2154  dest_pcm[i][w] = (*s->dpcm_macroblock)[i][idx++];
2155  dest_pcm[i] += linesize[i] / 2;
2156  }
2157  }
2158  } else if(s->dpcm_direction == -1) {
2159  int i, w, h;
2160  uint16_t *dest_pcm[3] = {(uint16_t*)dest_y, (uint16_t*)dest_cb, (uint16_t*)dest_cr};
2161  int linesize[3] = {dct_linesize, uvlinesize, uvlinesize};
2162  for(i = 0; i < 3; i++) {
2163  int idx = 0;
2164  int vsub = i ? s->chroma_y_shift : 0;
2165  int hsub = i ? s->chroma_x_shift : 0;
2166  dest_pcm[i] += (linesize[i] / 2) * ((16 >> vsub) - 1);
2167  for(h = (16 >> vsub)-1; h >= 1; h--){
2168  for(w = (16 >> hsub)-1; w >= 1; w--)
2169  dest_pcm[i][w] = (*s->dpcm_macroblock)[i][idx++];
2170  dest_pcm[i] -= linesize[i] / 2;
2171  }
2172  }
2173  }
2174  }
2175  /* dct only in intra block */
2177  put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2178  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2179  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2180  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2181 
2182  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
2183  if(s->chroma_y_shift){
2184  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2185  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2186  }else{
2187  dct_offset >>=1;
2188  dct_linesize >>=1;
2189  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2190  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2191  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2192  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2193  }
2194  }
2195  }else{
2196  s->idsp.idct_put(dest_y, dct_linesize, block[0]);
2197  s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2198  s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
2199  s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2200 
2201  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
2202  if(s->chroma_y_shift){
2203  s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
2204  s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
2205  }else{
2206 
2207  dct_linesize = uvlinesize << s->interlaced_dct;
2208  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2209 
2210  s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
2211  s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
2212  s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2213  s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2214  if(!s->chroma_x_shift){//Chroma444
2215  s->idsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
2216  s->idsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
2217  s->idsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2218  s->idsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2219  }
2220  }
2221  }//gray
2222  }
2223  }
2224 skip_idct:
2225  if(!readable){
2226  s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2227  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
2228  s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2229  s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2230  }
2231  }
2232  }
2233 }
2234 
2236 {
2237 #if !CONFIG_SMALL
2238  if(s->out_format == FMT_MPEG1) {
2239  if(s->avctx->lowres) mpv_reconstruct_mb_internal(s, block, 1, 1);
2240  else mpv_reconstruct_mb_internal(s, block, 0, 1);
2241  } else
2242 #endif
2243  if(s->avctx->lowres) mpv_reconstruct_mb_internal(s, block, 1, 0);
2244  else mpv_reconstruct_mb_internal(s, block, 0, 0);
2245 }
2246 
2248 {
2251  s->first_field, s->low_delay);
2252 }
2253 
2254 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2255  const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
2256  const int uvlinesize = s->current_picture.f->linesize[1];
2257  const int width_of_mb = (4 + (s->avctx->bits_per_raw_sample > 8)) - s->avctx->lowres;
2258  const int height_of_mb = 4 - s->avctx->lowres;
2259 
2260  s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2261  s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2262  s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2263  s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2264  s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2265  s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2266  //block_index is not used by mpeg2, so it is not affected by chroma_format
2267 
2268  s->dest[0] = s->current_picture.f->data[0] + (int)((s->mb_x - 1U) << width_of_mb);
2269  s->dest[1] = s->current_picture.f->data[1] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
2270  s->dest[2] = s->current_picture.f->data[2] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
2271 
2273  {
2274  if(s->picture_structure==PICT_FRAME){
2275  s->dest[0] += s->mb_y * linesize << height_of_mb;
2276  s->dest[1] += s->mb_y * uvlinesize << (height_of_mb - s->chroma_y_shift);
2277  s->dest[2] += s->mb_y * uvlinesize << (height_of_mb - s->chroma_y_shift);
2278  }else{
2279  s->dest[0] += (s->mb_y>>1) * linesize << height_of_mb;
2280  s->dest[1] += (s->mb_y>>1) * uvlinesize << (height_of_mb - s->chroma_y_shift);
2281  s->dest[2] += (s->mb_y>>1) * uvlinesize << (height_of_mb - s->chroma_y_shift);
2283  }
2284  }
2285 }
2286 
2288  int i;
2289  MpegEncContext *s = avctx->priv_data;
2290 
2291  if (!s || !s->picture)
2292  return;
2293 
2294  for (i = 0; i < MAX_PICTURE_COUNT; i++)
2295  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
2297 
2301 
2302  s->mb_x= s->mb_y= 0;
2303  s->closed_gop= 0;
2304 
2305  s->parse_context.state= -1;
2307  s->parse_context.overread= 0;
2309  s->parse_context.index= 0;
2310  s->parse_context.last_index= 0;
2311  s->bitstream_buffer_size=0;
2312  s->pp_time=0;
2313 }
2314 
2315 /**
2316  * set qscale and update qscale dependent variables.
2317  */
2318 void ff_set_qscale(MpegEncContext * s, int qscale)
2319 {
2320  if (qscale < 1)
2321  qscale = 1;
2322  else if (qscale > 31)
2323  qscale = 31;
2324 
2325  s->qscale = qscale;
2326  s->chroma_qscale= s->chroma_qscale_table[qscale];
2327 
2328  s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2330 }
2331 
2333 {
2336 }
int last_time_base
Definition: mpegvideo.h:388
int bitstream_buffer_size
Definition: mpegvideo.h:416
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free.
Definition: motion_est.h:52
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:342
int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
Definition: frame.c:55
IDCTDSPContext idsp
Definition: mpegvideo.h:230
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
#define NULL
Definition: coverity.c:32
static int init_duplicate_context(MpegEncContext *s)
Definition: mpegvideo.c:357
int ff_thread_can_start_frame(AVCodecContext *avctx)
const struct AVCodec * codec
Definition: avcodec.h:540
int16_t(* b_bidir_back_mv_table_base)[2]
Definition: mpegvideo.h:244
av_cold void ff_mpv_common_init_arm(MpegEncContext *s)
Definition: mpegvideo_arm.c:43
discard all frames except keyframes
Definition: avcodec.h:235
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:2254
int picture_number
Definition: mpegvideo.h:127
void ff_wmv2_add_mb(MpegEncContext *s, int16_t block1[6][64], uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr)
Definition: wmv2.c:83
av_cold void ff_mpv_common_init_neon(MpegEncContext *s)
Definition: mpegvideo.c:126
ScanTable intra_v_scantable
Definition: mpegvideo.h:93
av_cold void ff_mpegvideodsp_init(MpegVideoDSPContext *c)
Definition: mpegvideodsp.c:110
This structure describes decoded (raw) audio or video data.
Definition: frame.h:314
int16_t(* p_mv_table)[2]
MV table (1MV per MB) P-frame encoding.
Definition: mpegvideo.h:248
int dpcm_direction
Definition: mpegvideo.h:516
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
Definition: mpegvideo.h:153
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:269
const uint8_t * y_dc_scale_table
qscale -> y_dc_scale table
Definition: mpegvideo.h:188
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegpicture.h:36
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:719
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:68
void(* dct_unquantize_h263_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:534
static av_always_inline void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
Definition: mpegvideo.c:1932
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
static void chroma_4mv_motion_lowres(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, h264_chroma_mc_func *pix_op, int mx, int my)
Definition: mpegvideo.c:1624
uint8_t * coded_block_base
Definition: mpegvideo.h:191
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:505
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
Definition: mpegvideo.h:154
int16_t(*[3] ac_val)[16]
used for MPEG-4 AC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:194
MJPEG encoder.
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:132
h264_chroma_mc_func put_h264_chroma_pixels_tab[4]
Definition: h264chroma.h:28
#define me
static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
Definition: mpegvideo.c:275
static void gray_frame(AVFrame *frame)
Definition: mpegvideo.c:1167
int msmpeg4_version
0=not msmpeg4, 1=mp41, 2=mp42, 3=mp43/divx3 4=wmv1/7 5=wmv2/8
Definition: mpegvideo.h:438
int needs_realloc
Picture needs to be reallocated (eg due to a frame size change)
Definition: mpegpicture.h:85
uint8_t * bitstream_buffer
Definition: mpegvideo.h:415
enum AVCodecID codec_id
Definition: mpegvideo.h:112
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
Definition: blockdsp.c:60
#define FF_ALLOC_TYPED_ARRAY(p, nelem)
Definition: internal.h:140
int field_picture
whether or not the picture was encoded in separate fields
Definition: mpegpicture.h:79
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:741
int16_t(*[2][2] p_field_mv_table)[2]
MV table (2MV per MB) interlaced P-frame encoding.
Definition: mpegvideo.h:254
int16_t(* p_mv_table_base)[2]
Definition: mpegvideo.h:240
int studio_profile
Definition: mpegvideo.h:384
uint8_t raster_end[64]
Definition: idctdsp.h:34
static int lowest_referenced_row(MpegEncContext *s, int dir)
find the lowest MB row referenced in the MVs
Definition: mpegvideo.c:1826
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:41
uint32_t * score_map
map to store the scores
Definition: motion_est.h:59
mpegvideo header.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
discard all
Definition: avcodec.h:236
uint8_t permutated[64]
Definition: idctdsp.h:33
static void free_duplicate_context(MpegEncContext *s)
Definition: mpegvideo.c:414
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1762
int padding_bug_score
used to detect the VERY common padding bug in MPEG-4
Definition: mpegvideo.h:411
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
Definition: mpegpicture.c:353
int mb_num
number of MBs of a picture
Definition: mpegvideo.h:133
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1695
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them.reget_buffer() and buffer age optimizations no longer work.*The contents of buffers must not be written to after ff_thread_report_progress() has been called on them.This includes draw_edges().Porting codecs to frame threading
void ff_draw_horiz_band(AVCodecContext *avctx, AVFrame *cur, AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
Definition: mpegutils.c:51
int frame_start_found
Definition: parser.h:34
int qscale
QP.
Definition: mpegvideo.h:204
int h263_aic
Advanced INTRA Coding (AIC)
Definition: mpegvideo.h:87
int16_t(* b_back_mv_table)[2]
MV table (1MV per MB) backward mode B-frame encoding.
Definition: mpegvideo.h:250
void(* draw_horiz_band)(struct AVCodecContext *s, const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], int y, int type, int height)
If non NULL, &#39;draw_horiz_band&#39; is called by the libavcodec decoder to draw a horizontal band...
Definition: avcodec.h:766
enum AVPictureType last_picture
Definition: movenc.c:68
int chroma_x_shift
Definition: mpegvideo.h:489
int encoding
true if we are encoding (vs decoding)
Definition: mpegvideo.h:114
int field_select[2][2]
Definition: mpegvideo.h:277
int block_wrap[6]
Definition: mpegvideo.h:294
static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:81
Macro definitions for various function/variable attributes.
int16_t(* b_back_mv_table_base)[2]
Definition: mpegvideo.h:242
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
Definition: mpegvideo.c:436
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
Definition: mpegvideo.c:1891
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:63
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo.c:2247
int context_reinit
Definition: mpegvideo.h:567
const uint8_t ff_mpeg1_dc_scale_table[128]
Definition: mpegvideodata.c:33
int16_t * dc_val_base
Definition: mpegvideo.h:186
ScratchpadContext sc
Definition: mpegvideo.h:202
The exact code depends on how similar the blocks are and how related they are to the block
uint8_t
#define ME_MAP_SIZE
Definition: motion_est.h:38
#define av_cold
Definition: attributes.h:88
av_cold void ff_mpv_common_init_axp(MpegEncContext *s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
enum OutputFormat out_format
output format
Definition: mpegvideo.h:104
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
#define f(width, name)
Definition: cbs_vp9.c:255
int ff_mpv_common_frame_size_change(MpegEncContext *s)
Definition: mpegvideo.c:1035
int noise_reduction
Definition: mpegvideo.h:585
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
uint8_t * pred_dir_table
used to store pred_dir for partitioned decoding
Definition: mpegvideo.h:200
Multithreading support functions.
int frame_skip_threshold
Definition: mpegvideo.h:579
qpel_mc_func(* qpel_put)[16]
Definition: motion_est.h:91
void ff_free_picture_tables(Picture *pic)
Definition: mpegpicture.c:452
int no_rounding
apply no rounding to motion compensation (MPEG-4, msmpeg4, ...) for B-frames rounding mode is always ...
Definition: mpegvideo.h:284
int interlaced_dct
Definition: mpegvideo.h:494
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:180
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
Definition: mpegpicture.c:438
int intra_dc_precision
Definition: mpegvideo.h:467
quarterpel DSP functions
void ff_mpv_common_init_ppc(MpegEncContext *s)
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:38
#define height
int16_t(* b_bidir_forw_mv_table)[2]
MV table (1MV per MB) bidir mode B-frame encoding.
Definition: mpegvideo.h:251
float * cplx_tab
Definition: mpegvideo.h:563
#define ff_dlog(a,...)
uint16_t pp_time
time distance between the last 2 p,s,i frames
Definition: mpegvideo.h:392
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:461
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:329
int mb_height
number of MBs horizontally & vertically
Definition: mpegvideo.h:129
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1770
int mpeg4_studio_profile
Definition: idctdsp.h:99
int codec_tag
internal codec_tag upper case converted from avctx codec_tag
Definition: mpegvideo.h:120
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:308
int16_t(*[2][2] p_field_mv_table_base)[2]
Definition: mpegvideo.h:246
#define FF_BUG_IEDGE
Definition: avcodec.h:1580
#define av_log(a,...)
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:2318
static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
Definition: mpegvideo.c:269
int intra_only
if true, only intra pictures are generated
Definition: mpegvideo.h:102
ThreadFrame tf
Definition: mpegpicture.h:47
#define U(x)
Definition: vp56_arith.h:37
#define src
Definition: vp8dsp.c:254
int16_t * dc_val[3]
used for MPEG-4 DC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:187
enum AVCodecID id
Definition: codec.h:204
int h263_plus
H.263+ headers.
Definition: mpegvideo.h:109
int slice_context_count
number of used thread_contexts
Definition: mpegvideo.h:156
unsigned int buffer_size
Definition: parser.h:32
int width
Definition: frame.h:372
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
int last_dc[3]
last DC values for MPEG-1
Definition: mpegvideo.h:185
static void add_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size)
Definition: mpegvideo.c:1870
int mb_skipped
MUST BE SET only during DECODING.
Definition: mpegvideo.h:195
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
int chroma_y_shift
Definition: mpegvideo.h:490
int partitioned_frame
is current frame partitioned
Definition: mpegvideo.h:405
uint8_t * rd_scratchpad
scratchpad for rate distortion mb decision
Definition: mpegpicture.h:37
#define MAX_PICTURE_COUNT
Definition: mpegpicture.h:32
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2601
ERContext er
Definition: mpegvideo.h:569
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1809
int last_lambda_for[5]
last lambda for a specific pict type
Definition: mpegvideo.h:219
int reference
Definition: mpegpicture.h:87
void(* h264_chroma_mc_func)(uint8_t *dst, uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: h264chroma.h:25
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:140
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:611
#define wrap(func)
Definition: neontest.h:65
static void put_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo.c:1862
simple assert() macros that are a bit more flexible than ISO C assert().
int overread_index
the index into ParseContext.buffer of the overread bytes
Definition: parser.h:36
#define PICT_TOP_FIELD
Definition: mpegutils.h:37
static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:52
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:401
uint16_t * mb_type
Table for candidate MB types for encoding (defines in mpegutils.h)
Definition: mpegvideo.h:291
int low_delay
no reordering needed / has no B-frames
Definition: mpegvideo.h:406
uint8_t *[2][2] b_field_select_table
Definition: mpegvideo.h:257
void * av_memdup(const void *p, size_t size)
Duplicate a buffer with av_malloc().
Definition: mem.c:285
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:1110
#define FFMAX(a, b)
Definition: common.h:94
av_cold void ff_mpv_common_init_x86(MpegEncContext *s)
Definition: mpegvideo.c:454
void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo.c:2287
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
int coded_picture_number
used to set pic->coded_picture_number, should not be used for/by anything else
Definition: mpegvideo.h:126
int * lambda_table
Definition: mpegvideo.h:208
uint8_t * error_status_table
const uint8_t ff_alternate_horizontal_scan[64]
Definition: mpegvideodata.c:84
int ff_mpeg_er_init(MpegEncContext *s)
Definition: mpeg_er.c:100
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:502
common internal API header
#define MAX_THREADS
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:317
int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
Definition: mpegvideo.c:1426
int progressive_frame
Definition: mpegvideo.h:492
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:397
#define UPDATE_PICTURE(pic)
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:333
int top_field_first
Definition: mpegvideo.h:469
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1801
uint8_t * er_temp_buffer
int overread
the number of bytes which where irreversibly read from the next frame
Definition: parser.h:35
#define FFMIN(a, b)
Definition: common.h:96
int last_index
Definition: parser.h:31
int next_p_frame_damaged
set if the next p frame is damaged, to avoid showing trashed B-frames
Definition: mpegvideo.h:360
static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:174
Picture new_picture
copy of the source picture structure for encoding.
Definition: mpegvideo.h:174
#define width
void(* dct_unquantize_mpeg1_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:524
int width
picture width / height.
Definition: avcodec.h:704
uint8_t * mbskip_table
used to avoid copy if macroblock skipped (for black regions for example) and used for B-frame encodin...
Definition: mpegvideo.h:196
uint8_t w
Definition: llviddspenc.c:38
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:53
Picture * current_picture_ptr
pointer to the current picture
Definition: mpegvideo.h:184
Picture.
Definition: mpegpicture.h:45
int alternate_scan
Definition: mpegvideo.h:474
unsigned int allocated_bitstream_buffer_size
Definition: mpegvideo.h:417
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
int16_t(* ac_val_base)[16]
Definition: mpegvideo.h:193
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:312
void(* idct_add)(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
block -> idct -> add dest -> clip to unsigned 8 bit -> dest.
Definition: idctdsp.h:79
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1802
int16_t(*[2][2][2] b_field_mv_table_base)[2]
Definition: mpegvideo.h:247
int16_t(* b_forw_mv_table_base)[2]
Definition: mpegvideo.h:241
static int alloc_picture(MpegEncContext *s, Picture *pic)
Definition: mpegvideo.c:349
int16_t(*[12] pblocks)[64]
Definition: mpegvideo.h:509
int block_last_index[12]
last non zero coefficient in block
Definition: mpegvideo.h:86
MotionEstContext me
Definition: mpegvideo.h:282
int frame_skip_factor
Definition: mpegvideo.h:580
uint8_t idct_permutation[64]
IDCT input permutation.
Definition: idctdsp.h:96
av_cold void ff_mpv_common_init_mips(MpegEncContext *s)
int mb_decision
macroblock decision mode
Definition: avcodec.h:1019
uint8_t * mbintra_table
used to avoid setting {ac, dc, cbp}-pred stuff to zero on inter MB decoding
Definition: mpegvideo.h:198
void(* idct_put)(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
block -> idct -> clip to unsigned 8 bit -> dest.
Definition: idctdsp.h:72
void(* dct_unquantize_mpeg2_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:530
void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table, uint32_t *mbtype_table, int8_t *qscale_table, int16_t(*motion_val[2])[2], int *low_delay, int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.
Definition: mpegutils.c:103
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo.c:491
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:51
void ff_mpv_decode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for decoding.
Definition: mpegvideo.c:667
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:1790
int block_index[6]
index to current MB in block based arrays with edges
Definition: mpegvideo.h:293
int * mb_index2xy
mb_index -> mb_x + mb_y*mb_stride
Definition: mpegvideo.h:297
int first_field
is 1 for the first field of a field picture 0 otherwise
Definition: mpegvideo.h:495
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:387
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:266
static void clear_context(MpegEncContext *s)
Definition: mpegvideo.c:802
AVBufferRef * qscale_table_buf
Definition: mpegpicture.h:49
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:263
int16_t(* b_bidir_forw_mv_table_base)[2]
Definition: mpegvideo.h:243
int coded_picture_number
picture number in bitstream order
Definition: frame.h:428
uint16_t inter_matrix[64]
Definition: mpegvideo.h:302
uint8_t * buffer
Definition: parser.h:29
struct MpegEncContext * thread_context[MAX_THREADS]
Definition: mpegvideo.h:155
Libavcodec external API header.
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:134
BlockDSPContext bdsp
Definition: mpegvideo.h:226
enum AVDiscard skip_idct
Skip IDCT/dequantization for selected frames.
Definition: avcodec.h:2009
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:345
int debug
debug
Definition: avcodec.h:1616
main external API structure.
Definition: avcodec.h:531
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int shared, int encoding, int chroma_x_shift, int chroma_y_shift, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
Definition: mpegpicture.c:231
ScanTable intra_scantable
Definition: mpegvideo.h:91
uint8_t * data
The data buffer.
Definition: buffer.h:89
uint8_t * coded_block
used for coded block pattern prediction (msmpeg4v3, wmv1)
Definition: mpegvideo.h:192
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:100
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> (&#39;D&#39;<<24) + (&#39;C&#39;<<16) + (&#39;B&#39;<<8) + &#39;A&#39;).
Definition: avcodec.h:556
static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:110
void(* dct_unquantize_mpeg2_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:528
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:268
void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
Definition: mpegvideo.c:1419
uint32_t state
contains the last few bytes in MSB order
Definition: parser.h:33
Picture * picture
main picture buffer
Definition: mpegvideo.h:136
int progressive_sequence
Definition: mpegvideo.h:459
int coded_height
Definition: avcodec.h:719
ScanTable intra_h_scantable
Definition: mpegvideo.h:92
op_pixels_func put_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)>>1.
Definition: hpeldsp.h:82
int16_t(*[2][2][2] b_field_mv_table)[2]
MV table (4MV per MB) interlaced B-frame encoding.
Definition: mpegvideo.h:255
uint8_t * cbp_table
used to store cbp, ac_pred for partitioned decoding
Definition: mpegvideo.h:199
int closed_gop
MPEG1/2 GOP is closed.
Definition: mpegvideo.h:211
int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me, ScratchpadContext *sc, int linesize)
Definition: mpegpicture.c:58
unsigned int avpriv_toupper4(unsigned int x)
Definition: utils.c:1890
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1628
struct AVFrame * f
Definition: mpegpicture.h:46
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:1022
void(* dct_unquantize_h263_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:532
int context_initialized
Definition: mpegvideo.h:124
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:135
#define s1
Definition: regdef.h:38
static int ff_h263_round_chroma(int x)
Definition: motion_est.h:101
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo.c:1187
int f_code
forward MV resolution
Definition: mpegvideo.h:238
#define COPY(a)
#define MV_DIR_FORWARD
Definition: mpegvideo.h:262
int max_b_frames
max number of B-frames for encoding
Definition: mpegvideo.h:115
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:212
int size
Size of data in bytes.
Definition: buffer.h:93
int h263_pred
use MPEG-4/H.263 ac/dc predictions
Definition: mpegvideo.h:105
int16_t(* b_bidir_back_mv_table)[2]
MV table (1MV per MB) bidir mode B-frame encoding.
Definition: mpegvideo.h:252
static int init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
Definition: mpegvideo.c:687
uint8_t *[2] p_field_select_table
Definition: mpegvideo.h:256
int16_t(* b_direct_mv_table)[2]
MV table (1MV per MB) direct mode B-frame encoding.
Definition: mpegvideo.h:253
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:328
const uint8_t * c_dc_scale_table
qscale -> c_dc_scale table
Definition: mpegvideo.h:189
uint8_t level
Definition: svq3.c:205
qpel_mc_func(* qpel_avg)[16]
Definition: motion_est.h:92
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
Definition: mpegvideo.h:276
int16_t(* b_forw_mv_table)[2]
MV table (1MV per MB) forward mode B-frame encoding.
Definition: mpegvideo.h:249
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
Definition: mpegvideo.h:131
static void dct_unquantize_h263_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:208
MpegEncContext.
Definition: mpegvideo.h:81
Picture * next_picture_ptr
pointer to the next picture (for bidir pred)
Definition: mpegvideo.h:183
int8_t * qscale_table
Definition: mpegpicture.h:50
struct AVCodecContext * avctx
Definition: mpegvideo.h:98
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
Deallocate a picture.
Definition: mpegpicture.c:294
A reference to a data buffer.
Definition: buffer.h:81
discard all non reference
Definition: avcodec.h:232
void(* dct_unquantize_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:538
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
int
MpegVideoDSPContext mdsp
Definition: mpegvideo.h:232
int(* dct_error_sum)[64]
Definition: mpegvideo.h:332
common internal api header.
int32_t(* block32)[12][64]
Definition: mpegvideo.h:515
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
Definition: mpegvideo.h:130
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Definition: mpegvideo.c:672
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
const uint8_t ff_default_chroma_qscale_table[32]
Definition: mpegvideodata.c:21
uint8_t * dest[3]
Definition: mpegvideo.h:295
static av_cold int dct_init(MpegEncContext *s)
Definition: mpegvideo.c:282
int last_pict_type
Definition: mpegvideo.h:214
static void dct_unquantize_h263_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:242
Picture last_picture
copy of the previous picture structure.
Definition: mpegvideo.h:162
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
Picture * last_picture_ptr
pointer to the previous picture.
Definition: mpegvideo.h:182
Bi-dir predicted.
Definition: avutil.h:276
int index
Definition: parser.h:30
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
Definition: avcodec.h:1565
uint8_t * b_scratchpad
scratchpad used for writing into write only buffers
Definition: mpegpicture.h:39
const uint8_t * chroma_qscale_table
qscale -> chroma_qscale (H.263)
Definition: mpegvideo.h:190
const uint8_t ff_alternate_vertical_scan[64]
Definition: mpegvideodata.c:95
uint32_t * map
map to avoid duplicate evaluations
Definition: motion_est.h:58
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo.c:466
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:215
static int lowres
Definition: ffplay.c:336
H264ChromaContext h264chroma
Definition: mpegvideo.h:228
int16_t(* blocks)[12][64]
Definition: mpegvideo.h:512
h264_chroma_mc_func avg_h264_chroma_pixels_tab[4]
Definition: h264chroma.h:29
int slices
Number of slices.
Definition: avcodec.h:1182
void * priv_data
Definition: avcodec.h:558
#define PICT_FRAME
Definition: mpegutils.h:39
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:884
int picture_structure
Definition: mpegvideo.h:464
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: idctdsp.c:29
VideoDSPContext vdsp
Definition: mpegvideo.h:236
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:238
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:466
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo.c:1411
void(* dct_unquantize_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:536
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:270
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo.c:2235
uint8_t * obmc_scratchpad
Definition: mpegpicture.h:38
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:511
ParseContext parse_context
Definition: mpegvideo.h:362
static void add_dequant_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo.c:1878
Picture next_picture
copy of the next picture structure.
Definition: mpegvideo.h:168
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:392
#define FF_DEBUG_NOMC
Definition: avcodec.h:1641
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t **ref_picture, h264_chroma_mc_func *pix_op, int motion_x, int motion_y, int h, int mb_y)
Definition: mpegvideo.c:1485
int chroma_qscale
chroma QP
Definition: mpegvideo.h:205
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding). ...
Definition: mpegvideo.c:644
static void free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution.
Definition: mpegvideo.c:986
static int hpel_motion_lowres(MpegEncContext *s, uint8_t *dest, uint8_t *src, int field_based, int field_select, int src_x, int src_y, int width, int height, ptrdiff_t stride, int h_edge_pos, int v_edge_pos, int w, int h, h264_chroma_mc_func *pix_op, int motion_x, int motion_y)
Definition: mpegvideo.c:1438
int height
Definition: frame.h:372
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:300
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegpicture.h:56
#define av_freep(p)
int16_t(* dpcm_macroblock)[3][256]
Definition: mpegvideo.h:517
void(* decode_mb)(struct MpegEncContext *s)
Called for every Macroblock in a slice.
Definition: avcodec.h:2543
int workaround_bugs
workaround bugs in encoders which cannot be detected automatically
Definition: mpegvideo.h:119
ScanTable inter_scantable
if inter == intra then intra should be used to reduce the cache usage
Definition: mpegvideo.h:90
#define av_always_inline
Definition: attributes.h:45
uint8_t * temp
Definition: motion_est.h:56
#define FFSWAP(type, a, b)
Definition: common.h:99
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:141
#define stride
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:267
int16_t(* b_direct_mv_table_base)[2]
Definition: mpegvideo.h:245
int b_code
backward MV resolution for B-frames (MPEG-4)
Definition: mpegvideo.h:239
float * bits_tab
Definition: mpegvideo.h:563
void(* dct_unquantize_mpeg1_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:526
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:91
void ff_mpv_report_decode_progress(MpegEncContext *s)
Definition: mpegvideo.c:2332
static void hsub(htype *dst, const htype *src, int bins)
Definition: vf_median.c:75
static void MPV_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, h264_chroma_mc_func *pix_op)
motion compensation of a single macroblock
Definition: mpegvideo.c:1692
int i
Definition: input.c:407
Predicted.
Definition: avutil.h:275
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
HpelDSPContext hdsp
Definition: mpegvideo.h:229