FFmpeg
mpegvideo.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29 
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
35 #include "libavutil/timer.h"
36 #include "avcodec.h"
37 #include "blockdsp.h"
38 #include "h264chroma.h"
39 #include "idctdsp.h"
40 #include "internal.h"
41 #include "mathops.h"
42 #include "mpeg_er.h"
43 #include "mpegutils.h"
44 #include "mpegvideo.h"
45 #include "mpegvideodata.h"
46 #include "mjpegenc.h"
47 #include "msmpeg4.h"
48 #include "qpeldsp.h"
49 #include "thread.h"
50 #include "wmv2.h"
51 #include <limits.h>
52 
54  int16_t *block, int n, int qscale)
55 {
56  int i, level, nCoeffs;
57  const uint16_t *quant_matrix;
58 
59  nCoeffs= s->block_last_index[n];
60 
61  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
62  /* XXX: only MPEG-1 */
63  quant_matrix = s->intra_matrix;
64  for(i=1;i<=nCoeffs;i++) {
65  int j= s->intra_scantable.permutated[i];
66  level = block[j];
67  if (level) {
68  if (level < 0) {
69  level = -level;
70  level = (int)(level * qscale * quant_matrix[j]) >> 3;
71  level = (level - 1) | 1;
72  level = -level;
73  } else {
74  level = (int)(level * qscale * quant_matrix[j]) >> 3;
75  level = (level - 1) | 1;
76  }
77  block[j] = level;
78  }
79  }
80 }
81 
83  int16_t *block, int n, int qscale)
84 {
85  int i, level, nCoeffs;
86  const uint16_t *quant_matrix;
87 
88  nCoeffs= s->block_last_index[n];
89 
90  quant_matrix = s->inter_matrix;
91  for(i=0; i<=nCoeffs; i++) {
92  int j= s->intra_scantable.permutated[i];
93  level = block[j];
94  if (level) {
95  if (level < 0) {
96  level = -level;
97  level = (((level << 1) + 1) * qscale *
98  ((int) (quant_matrix[j]))) >> 4;
99  level = (level - 1) | 1;
100  level = -level;
101  } else {
102  level = (((level << 1) + 1) * qscale *
103  ((int) (quant_matrix[j]))) >> 4;
104  level = (level - 1) | 1;
105  }
106  block[j] = level;
107  }
108  }
109 }
110 
112  int16_t *block, int n, int qscale)
113 {
114  int i, level, nCoeffs;
115  const uint16_t *quant_matrix;
116 
117  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
118  else qscale <<= 1;
119 
120  if(s->alternate_scan) nCoeffs= 63;
121  else nCoeffs= s->block_last_index[n];
122 
123  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
124  quant_matrix = s->intra_matrix;
125  for(i=1;i<=nCoeffs;i++) {
126  int j= s->intra_scantable.permutated[i];
127  level = block[j];
128  if (level) {
129  if (level < 0) {
130  level = -level;
131  level = (int)(level * qscale * quant_matrix[j]) >> 4;
132  level = -level;
133  } else {
134  level = (int)(level * qscale * quant_matrix[j]) >> 4;
135  }
136  block[j] = level;
137  }
138  }
139 }
140 
142  int16_t *block, int n, int qscale)
143 {
144  int i, level, nCoeffs;
145  const uint16_t *quant_matrix;
146  int sum=-1;
147 
148  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
149  else qscale <<= 1;
150 
151  if(s->alternate_scan) nCoeffs= 63;
152  else nCoeffs= s->block_last_index[n];
153 
154  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
155  sum += block[0];
156  quant_matrix = s->intra_matrix;
157  for(i=1;i<=nCoeffs;i++) {
158  int j= s->intra_scantable.permutated[i];
159  level = block[j];
160  if (level) {
161  if (level < 0) {
162  level = -level;
163  level = (int)(level * qscale * quant_matrix[j]) >> 4;
164  level = -level;
165  } else {
166  level = (int)(level * qscale * quant_matrix[j]) >> 4;
167  }
168  block[j] = level;
169  sum+=level;
170  }
171  }
172  block[63]^=sum&1;
173 }
174 
176  int16_t *block, int n, int qscale)
177 {
178  int i, level, nCoeffs;
179  const uint16_t *quant_matrix;
180  int sum=-1;
181 
182  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
183  else qscale <<= 1;
184 
185  if(s->alternate_scan) nCoeffs= 63;
186  else nCoeffs= s->block_last_index[n];
187 
188  quant_matrix = s->inter_matrix;
189  for(i=0; i<=nCoeffs; i++) {
190  int j= s->intra_scantable.permutated[i];
191  level = block[j];
192  if (level) {
193  if (level < 0) {
194  level = -level;
195  level = (((level << 1) + 1) * qscale *
196  ((int) (quant_matrix[j]))) >> 5;
197  level = -level;
198  } else {
199  level = (((level << 1) + 1) * qscale *
200  ((int) (quant_matrix[j]))) >> 5;
201  }
202  block[j] = level;
203  sum+=level;
204  }
205  }
206  block[63]^=sum&1;
207 }
208 
210  int16_t *block, int n, int qscale)
211 {
212  int i, level, qmul, qadd;
213  int nCoeffs;
214 
215  av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
216 
217  qmul = qscale << 1;
218 
219  if (!s->h263_aic) {
220  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
221  qadd = (qscale - 1) | 1;
222  }else{
223  qadd = 0;
224  }
225  if(s->ac_pred)
226  nCoeffs=63;
227  else
228  nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ];
229 
230  for(i=1; i<=nCoeffs; i++) {
231  level = block[i];
232  if (level) {
233  if (level < 0) {
234  level = level * qmul - qadd;
235  } else {
236  level = level * qmul + qadd;
237  }
238  block[i] = level;
239  }
240  }
241 }
242 
244  int16_t *block, int n, int qscale)
245 {
246  int i, level, qmul, qadd;
247  int nCoeffs;
248 
249  av_assert2(s->block_last_index[n]>=0);
250 
251  qadd = (qscale - 1) | 1;
252  qmul = qscale << 1;
253 
254  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
255 
256  for(i=0; i<=nCoeffs; i++) {
257  level = block[i];
258  if (level) {
259  if (level < 0) {
260  level = level * qmul - qadd;
261  } else {
262  level = level * qmul + qadd;
263  }
264  block[i] = level;
265  }
266  }
267 }
268 
269 
270 static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
271 {
272  while(h--)
273  memset(dst + h*linesize, 128, 16);
274 }
275 
276 static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
277 {
278  while(h--)
279  memset(dst + h*linesize, 128, 8);
280 }
281 
282 /* init common dct for both encoder and decoder */
284 {
285  ff_blockdsp_init(&s->bdsp, s->avctx);
286  ff_h264chroma_init(&s->h264chroma, 8); //for lowres
287  ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
288  ff_mpegvideodsp_init(&s->mdsp);
289  ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
290 
291  if (s->avctx->debug & FF_DEBUG_NOMC) {
292  int i;
293  for (i=0; i<4; i++) {
294  s->hdsp.avg_pixels_tab[0][i] = gray16;
295  s->hdsp.put_pixels_tab[0][i] = gray16;
296  s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
297 
298  s->hdsp.avg_pixels_tab[1][i] = gray8;
299  s->hdsp.put_pixels_tab[1][i] = gray8;
300  s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
301  }
302  }
303 
304  s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
305  s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
306  s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
307  s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
308  s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
309  if (s->avctx->flags & AV_CODEC_FLAG_BITEXACT)
310  s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
311  s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
312 
313  if (HAVE_INTRINSICS_NEON)
315 
316  if (ARCH_ALPHA)
318  if (ARCH_ARM)
320  if (ARCH_PPC)
322  if (ARCH_X86)
324  if (ARCH_MIPS)
326 
327  return 0;
328 }
329 
331 {
332  if (s->codec_id == AV_CODEC_ID_MPEG4)
333  s->idsp.mpeg4_studio_profile = s->studio_profile;
334  ff_idctdsp_init(&s->idsp, s->avctx);
335 
336  /* load & permutate scantables
337  * note: only wmv uses different ones
338  */
339  if (s->alternate_scan) {
340  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
341  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
342  } else {
343  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
344  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
345  }
346  ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
347  ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
348 }
349 
350 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
351 {
352  return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 0,
353  s->chroma_x_shift, s->chroma_y_shift, s->out_format,
354  s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
355  &s->linesize, &s->uvlinesize);
356 }
357 
359 {
360  int y_size = s->b8_stride * (2 * s->mb_height + 1);
361  int c_size = s->mb_stride * (s->mb_height + 1);
362  int yc_size = y_size + 2 * c_size;
363  int i;
364 
365  if (s->mb_height & 1)
366  yc_size += 2*s->b8_stride + 2*s->mb_stride;
367 
368  s->sc.edge_emu_buffer =
369  s->me.scratchpad =
370  s->me.temp =
371  s->sc.rd_scratchpad =
372  s->sc.b_scratchpad =
373  s->sc.obmc_scratchpad = NULL;
374 
375  if (s->encoding) {
376  FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
377  ME_MAP_SIZE * sizeof(uint32_t), fail)
378  FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
379  ME_MAP_SIZE * sizeof(uint32_t), fail)
380  if (s->noise_reduction) {
381  FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
382  2 * 64 * sizeof(int), fail)
383  }
384  }
385  FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
386  s->block = s->blocks[0];
387 
388  for (i = 0; i < 12; i++) {
389  s->pblocks[i] = &s->block[i];
390  }
391 
392  FF_ALLOCZ_OR_GOTO(s->avctx, s->block32, sizeof(*s->block32), fail)
393  s->dpcm_direction = 0;
394  FF_ALLOCZ_OR_GOTO(s->avctx, s->dpcm_macroblock, sizeof(*s->dpcm_macroblock), fail)
395 
396  if (s->avctx->codec_tag == AV_RL32("VCR2")) {
397  // exchange uv
398  FFSWAP(void *, s->pblocks[4], s->pblocks[5]);
399  }
400 
401  if (s->out_format == FMT_H263) {
402  /* ac values */
403  FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
404  yc_size * sizeof(int16_t) * 16, fail);
405  s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
406  s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
407  s->ac_val[2] = s->ac_val[1] + c_size;
408  }
409 
410  return 0;
411 fail:
412  return -1; // free() through ff_mpv_common_end()
413 }
414 
416 {
417  if (!s)
418  return;
419 
420  av_freep(&s->sc.edge_emu_buffer);
421  av_freep(&s->me.scratchpad);
422  s->me.temp =
423  s->sc.rd_scratchpad =
424  s->sc.b_scratchpad =
425  s->sc.obmc_scratchpad = NULL;
426 
427  av_freep(&s->dct_error_sum);
428  av_freep(&s->me.map);
429  av_freep(&s->me.score_map);
430  av_freep(&s->blocks);
431  av_freep(&s->block32);
432  av_freep(&s->dpcm_macroblock);
433  av_freep(&s->ac_val_base);
434  s->block = NULL;
435 }
436 
438 {
439 #define COPY(a) bak->a = src->a
440  COPY(sc.edge_emu_buffer);
441  COPY(me.scratchpad);
442  COPY(me.temp);
443  COPY(sc.rd_scratchpad);
444  COPY(sc.b_scratchpad);
445  COPY(sc.obmc_scratchpad);
446  COPY(me.map);
447  COPY(me.score_map);
448  COPY(blocks);
449  COPY(block);
450  COPY(block32);
451  COPY(dpcm_macroblock);
452  COPY(dpcm_direction);
453  COPY(start_mb_y);
454  COPY(end_mb_y);
455  COPY(me.map_generation);
456  COPY(pb);
457  COPY(dct_error_sum);
458  COPY(dct_count[0]);
459  COPY(dct_count[1]);
460  COPY(ac_val_base);
461  COPY(ac_val[0]);
462  COPY(ac_val[1]);
463  COPY(ac_val[2]);
464 #undef COPY
465 }
466 
468 {
469  MpegEncContext bak;
470  int i, ret;
471  // FIXME copy only needed parts
472  // START_TIMER
473  backup_duplicate_context(&bak, dst);
474  memcpy(dst, src, sizeof(MpegEncContext));
475  backup_duplicate_context(dst, &bak);
476  for (i = 0; i < 12; i++) {
477  dst->pblocks[i] = &dst->block[i];
478  }
479  if (dst->avctx->codec_tag == AV_RL32("VCR2")) {
480  // exchange uv
481  FFSWAP(void *, dst->pblocks[4], dst->pblocks[5]);
482  }
483  if (!dst->sc.edge_emu_buffer &&
484  (ret = ff_mpeg_framesize_alloc(dst->avctx, &dst->me,
485  &dst->sc, dst->linesize)) < 0) {
486  av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
487  "scratch buffers.\n");
488  return ret;
489  }
490  // STOP_TIMER("update_duplicate_context")
491  // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
492  return 0;
493 }
494 
496  const AVCodecContext *src)
497 {
498  int i, ret;
499  MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
500 
501  if (dst == src)
502  return 0;
503 
504  av_assert0(s != s1);
505 
506  // FIXME can parameters change on I-frames?
507  // in that case dst may need a reinit
508  if (!s->context_initialized) {
509  int err;
510  memcpy(s, s1, sizeof(MpegEncContext));
511 
512  s->avctx = dst;
513  s->bitstream_buffer = NULL;
514  s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
515 
516  if (s1->context_initialized){
517 // s->picture_range_start += MAX_PICTURE_COUNT;
518 // s->picture_range_end += MAX_PICTURE_COUNT;
520  if((err = ff_mpv_common_init(s)) < 0){
521  memset(s, 0, sizeof(MpegEncContext));
522  s->avctx = dst;
523  return err;
524  }
525  }
526  }
527 
528  if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
529  s->context_reinit = 0;
530  s->height = s1->height;
531  s->width = s1->width;
533  return ret;
534  }
535 
536  s->avctx->coded_height = s1->avctx->coded_height;
537  s->avctx->coded_width = s1->avctx->coded_width;
538  s->avctx->width = s1->avctx->width;
539  s->avctx->height = s1->avctx->height;
540 
541  s->quarter_sample = s1->quarter_sample;
542 
543  s->coded_picture_number = s1->coded_picture_number;
544  s->picture_number = s1->picture_number;
545 
546  av_assert0(!s->picture || s->picture != s1->picture);
547  if(s->picture)
548  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
549  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
550  if (s1->picture && s1->picture[i].f->buf[0] &&
551  (ret = ff_mpeg_ref_picture(s->avctx, &s->picture[i], &s1->picture[i])) < 0)
552  return ret;
553  }
554 
555 #define UPDATE_PICTURE(pic)\
556 do {\
557  ff_mpeg_unref_picture(s->avctx, &s->pic);\
558  if (s1->pic.f && s1->pic.f->buf[0])\
559  ret = ff_mpeg_ref_picture(s->avctx, &s->pic, &s1->pic);\
560  else\
561  ret = ff_update_picture_tables(&s->pic, &s1->pic);\
562  if (ret < 0)\
563  return ret;\
564 } while (0)
565 
566  UPDATE_PICTURE(current_picture);
568  UPDATE_PICTURE(next_picture);
569 
570 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
571  ((pic && pic >= old_ctx->picture && \
572  pic < old_ctx->picture + MAX_PICTURE_COUNT) ? \
573  &new_ctx->picture[pic - old_ctx->picture] : NULL)
574 
575  s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
576  s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
577  s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
578 
579  // Error/bug resilience
580  s->next_p_frame_damaged = s1->next_p_frame_damaged;
581  s->workaround_bugs = s1->workaround_bugs;
582  s->padding_bug_score = s1->padding_bug_score;
583 
584  // MPEG-4 timing info
585  memcpy(&s->last_time_base, &s1->last_time_base,
586  (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
587  (char *) &s1->last_time_base);
588 
589  // B-frame info
590  s->max_b_frames = s1->max_b_frames;
591  s->low_delay = s1->low_delay;
592  s->droppable = s1->droppable;
593 
594  // DivX handling (doesn't work)
595  s->divx_packed = s1->divx_packed;
596 
597  if (s1->bitstream_buffer) {
598  if (s1->bitstream_buffer_size +
599  AV_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size) {
600  av_fast_malloc(&s->bitstream_buffer,
601  &s->allocated_bitstream_buffer_size,
602  s1->allocated_bitstream_buffer_size);
603  if (!s->bitstream_buffer) {
604  s->bitstream_buffer_size = 0;
605  return AVERROR(ENOMEM);
606  }
607  }
608  s->bitstream_buffer_size = s1->bitstream_buffer_size;
609  memcpy(s->bitstream_buffer, s1->bitstream_buffer,
610  s1->bitstream_buffer_size);
611  memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
613  }
614 
615  // linesize-dependent scratch buffer allocation
616  if (!s->sc.edge_emu_buffer)
617  if (s1->linesize) {
618  if (ff_mpeg_framesize_alloc(s->avctx, &s->me,
619  &s->sc, s1->linesize) < 0) {
620  av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
621  "scratch buffers.\n");
622  return AVERROR(ENOMEM);
623  }
624  } else {
625  av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
626  "be allocated due to unknown size.\n");
627  }
628 
629  // MPEG-2/interlacing info
630  memcpy(&s->progressive_sequence, &s1->progressive_sequence,
631  (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
632 
633  if (!s1->first_field) {
634  s->last_pict_type = s1->pict_type;
635  if (s1->current_picture_ptr)
636  s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
637  }
638 
639  return 0;
640 }
641 
642 /**
643  * Set the given MpegEncContext to common defaults
644  * (same for encoding and decoding).
645  * The changed fields will not depend upon the
646  * prior state of the MpegEncContext.
647  */
649 {
650  s->y_dc_scale_table =
651  s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
652  s->chroma_qscale_table = ff_default_chroma_qscale_table;
653  s->progressive_frame = 1;
654  s->progressive_sequence = 1;
655  s->picture_structure = PICT_FRAME;
656 
657  s->coded_picture_number = 0;
658  s->picture_number = 0;
659 
660  s->f_code = 1;
661  s->b_code = 1;
662 
663  s->slice_context_count = 1;
664 }
665 
666 /**
667  * Set the given MpegEncContext to defaults for decoding.
668  * the changed fields will not depend upon
669  * the prior state of the MpegEncContext.
670  */
672 {
674 }
675 
677 {
678  s->avctx = avctx;
679  s->width = avctx->coded_width;
680  s->height = avctx->coded_height;
681  s->codec_id = avctx->codec->id;
682  s->workaround_bugs = avctx->workaround_bugs;
683 
684  /* convert fourcc to upper case */
685  s->codec_tag = avpriv_toupper4(avctx->codec_tag);
686 }
687 
688 /**
689  * Initialize and allocates MpegEncContext fields dependent on the resolution.
690  */
692 {
693  int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
694 
695  s->mb_width = (s->width + 15) / 16;
696  s->mb_stride = s->mb_width + 1;
697  s->b8_stride = s->mb_width * 2 + 1;
698  mb_array_size = s->mb_height * s->mb_stride;
699  mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
700 
701  /* set default edge pos, will be overridden
702  * in decode_header if needed */
703  s->h_edge_pos = s->mb_width * 16;
704  s->v_edge_pos = s->mb_height * 16;
705 
706  s->mb_num = s->mb_width * s->mb_height;
707 
708  s->block_wrap[0] =
709  s->block_wrap[1] =
710  s->block_wrap[2] =
711  s->block_wrap[3] = s->b8_stride;
712  s->block_wrap[4] =
713  s->block_wrap[5] = s->mb_stride;
714 
715  y_size = s->b8_stride * (2 * s->mb_height + 1);
716  c_size = s->mb_stride * (s->mb_height + 1);
717  yc_size = y_size + 2 * c_size;
718 
719  if (s->mb_height & 1)
720  yc_size += 2*s->b8_stride + 2*s->mb_stride;
721 
722  FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
723  fail); // error resilience code looks cleaner with this
724  for (y = 0; y < s->mb_height; y++)
725  for (x = 0; x < s->mb_width; x++)
726  s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
727 
728  s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
729 
730  if (s->encoding) {
731  /* Allocate MV tables */
732  FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
733  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
734  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
735  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
736  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
737  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
738  s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
739  s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
740  s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
741  s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
742  s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
743  s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
744 
745  /* Allocate MB type table */
746  FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
747 
748  FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
749 
750  FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
751  mb_array_size * sizeof(float), fail);
752  FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
753  mb_array_size * sizeof(float), fail);
754 
755  }
756 
757  if (s->codec_id == AV_CODEC_ID_MPEG4 ||
758  (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
759  /* interlaced direct mode decoding tables */
760  for (i = 0; i < 2; i++) {
761  int j, k;
762  for (j = 0; j < 2; j++) {
763  for (k = 0; k < 2; k++) {
764  FF_ALLOCZ_OR_GOTO(s->avctx,
765  s->b_field_mv_table_base[i][j][k],
766  mv_table_size * 2 * sizeof(int16_t),
767  fail);
768  s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
769  s->mb_stride + 1;
770  }
771  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
772  FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
773  s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
774  }
775  FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
776  }
777  }
778  if (s->out_format == FMT_H263) {
779  /* cbp values */
780  FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size + (s->mb_height&1)*2*s->b8_stride, fail);
781  s->coded_block = s->coded_block_base + s->b8_stride + 1;
782 
783  /* cbp, ac_pred, pred_dir */
784  FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
785  FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
786  }
787 
788  if (s->h263_pred || s->h263_plus || !s->encoding) {
789  /* dc values */
790  // MN: we need these for error resilience of intra-frames
791  FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
792  s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
793  s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
794  s->dc_val[2] = s->dc_val[1] + c_size;
795  for (i = 0; i < yc_size; i++)
796  s->dc_val_base[i] = 1024;
797  }
798 
799  /* which mb is an intra block */
800  FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
801  memset(s->mbintra_table, 1, mb_array_size);
802 
803  /* init macroblock skip table */
804  FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
805  // Note the + 1 is for a quicker MPEG-4 slice_end detection
806 
807  return ff_mpeg_er_init(s);
808 fail:
809  return AVERROR(ENOMEM);
810 }
811 
813 {
814  int i, j, k;
815 
816  memset(&s->next_picture, 0, sizeof(s->next_picture));
817  memset(&s->last_picture, 0, sizeof(s->last_picture));
818  memset(&s->current_picture, 0, sizeof(s->current_picture));
819  memset(&s->new_picture, 0, sizeof(s->new_picture));
820 
821  memset(s->thread_context, 0, sizeof(s->thread_context));
822 
823  s->me.map = NULL;
824  s->me.score_map = NULL;
825  s->dct_error_sum = NULL;
826  s->block = NULL;
827  s->blocks = NULL;
828  s->block32 = NULL;
829  memset(s->pblocks, 0, sizeof(s->pblocks));
830  s->dpcm_direction = 0;
831  s->dpcm_macroblock = NULL;
832  s->ac_val_base = NULL;
833  s->ac_val[0] =
834  s->ac_val[1] =
835  s->ac_val[2] =NULL;
836  s->sc.edge_emu_buffer = NULL;
837  s->me.scratchpad = NULL;
838  s->me.temp =
839  s->sc.rd_scratchpad =
840  s->sc.b_scratchpad =
841  s->sc.obmc_scratchpad = NULL;
842 
843 
844  s->bitstream_buffer = NULL;
845  s->allocated_bitstream_buffer_size = 0;
846  s->picture = NULL;
847  s->mb_type = NULL;
848  s->p_mv_table_base = NULL;
849  s->b_forw_mv_table_base = NULL;
850  s->b_back_mv_table_base = NULL;
851  s->b_bidir_forw_mv_table_base = NULL;
852  s->b_bidir_back_mv_table_base = NULL;
853  s->b_direct_mv_table_base = NULL;
854  s->p_mv_table = NULL;
855  s->b_forw_mv_table = NULL;
856  s->b_back_mv_table = NULL;
857  s->b_bidir_forw_mv_table = NULL;
858  s->b_bidir_back_mv_table = NULL;
859  s->b_direct_mv_table = NULL;
860  for (i = 0; i < 2; i++) {
861  for (j = 0; j < 2; j++) {
862  for (k = 0; k < 2; k++) {
863  s->b_field_mv_table_base[i][j][k] = NULL;
864  s->b_field_mv_table[i][j][k] = NULL;
865  }
866  s->b_field_select_table[i][j] = NULL;
867  s->p_field_mv_table_base[i][j] = NULL;
868  s->p_field_mv_table[i][j] = NULL;
869  }
870  s->p_field_select_table[i] = NULL;
871  }
872 
873  s->dc_val_base = NULL;
874  s->coded_block_base = NULL;
875  s->mbintra_table = NULL;
876  s->cbp_table = NULL;
877  s->pred_dir_table = NULL;
878 
879  s->mbskip_table = NULL;
880 
881  s->er.error_status_table = NULL;
882  s->er.er_temp_buffer = NULL;
883  s->mb_index2xy = NULL;
884  s->lambda_table = NULL;
885 
886  s->cplx_tab = NULL;
887  s->bits_tab = NULL;
888 }
889 
890 /**
891  * init common structure for both encoder and decoder.
892  * this assumes that some variables like width/height are already set
893  */
895 {
896  int i, ret;
897  int nb_slices = (HAVE_THREADS &&
898  s->avctx->active_thread_type & FF_THREAD_SLICE) ?
899  s->avctx->thread_count : 1;
900 
901  clear_context(s);
902 
903  if (s->encoding && s->avctx->slices)
904  nb_slices = s->avctx->slices;
905 
906  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
907  s->mb_height = (s->height + 31) / 32 * 2;
908  else
909  s->mb_height = (s->height + 15) / 16;
910 
911  if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
912  av_log(s->avctx, AV_LOG_ERROR,
913  "decoding to AV_PIX_FMT_NONE is not supported.\n");
914  return -1;
915  }
916 
917  if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
918  int max_slices;
919  if (s->mb_height)
920  max_slices = FFMIN(MAX_THREADS, s->mb_height);
921  else
922  max_slices = MAX_THREADS;
923  av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
924  " reducing to %d\n", nb_slices, max_slices);
925  nb_slices = max_slices;
926  }
927 
928  if ((s->width || s->height) &&
929  av_image_check_size(s->width, s->height, 0, s->avctx))
930  return -1;
931 
932  dct_init(s);
933 
934  /* set chroma shifts */
935  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
936  &s->chroma_x_shift,
937  &s->chroma_y_shift);
938  if (ret)
939  return ret;
940 
941  FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
942  MAX_PICTURE_COUNT * sizeof(Picture), fail);
943  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
944  s->picture[i].f = av_frame_alloc();
945  if (!s->picture[i].f)
946  goto fail;
947  }
948  s->next_picture.f = av_frame_alloc();
949  if (!s->next_picture.f)
950  goto fail;
951  s->last_picture.f = av_frame_alloc();
952  if (!s->last_picture.f)
953  goto fail;
954  s->current_picture.f = av_frame_alloc();
955  if (!s->current_picture.f)
956  goto fail;
957  s->new_picture.f = av_frame_alloc();
958  if (!s->new_picture.f)
959  goto fail;
960 
961  if (init_context_frame(s))
962  goto fail;
963 
964  s->parse_context.state = -1;
965 
966  s->context_initialized = 1;
967  memset(s->thread_context, 0, sizeof(s->thread_context));
968  s->thread_context[0] = s;
969 
970 // if (s->width && s->height) {
971  if (nb_slices > 1) {
972  for (i = 0; i < nb_slices; i++) {
973  if (i) {
974  s->thread_context[i] = av_memdup(s, sizeof(MpegEncContext));
975  if (!s->thread_context[i])
976  goto fail;
977  }
978  if (init_duplicate_context(s->thread_context[i]) < 0)
979  goto fail;
980  s->thread_context[i]->start_mb_y =
981  (s->mb_height * (i) + nb_slices / 2) / nb_slices;
982  s->thread_context[i]->end_mb_y =
983  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
984  }
985  } else {
986  if (init_duplicate_context(s) < 0)
987  goto fail;
988  s->start_mb_y = 0;
989  s->end_mb_y = s->mb_height;
990  }
991  s->slice_context_count = nb_slices;
992 // }
993 
994  return 0;
995  fail:
997  return -1;
998 }
999 
1000 /**
1001  * Frees and resets MpegEncContext fields depending on the resolution.
1002  * Is used during resolution changes to avoid a full reinitialization of the
1003  * codec.
1004  */
1006 {
1007  int i, j, k;
1008 
1009  av_freep(&s->mb_type);
1010  av_freep(&s->p_mv_table_base);
1011  av_freep(&s->b_forw_mv_table_base);
1012  av_freep(&s->b_back_mv_table_base);
1013  av_freep(&s->b_bidir_forw_mv_table_base);
1014  av_freep(&s->b_bidir_back_mv_table_base);
1015  av_freep(&s->b_direct_mv_table_base);
1016  s->p_mv_table = NULL;
1017  s->b_forw_mv_table = NULL;
1018  s->b_back_mv_table = NULL;
1019  s->b_bidir_forw_mv_table = NULL;
1020  s->b_bidir_back_mv_table = NULL;
1021  s->b_direct_mv_table = NULL;
1022  for (i = 0; i < 2; i++) {
1023  for (j = 0; j < 2; j++) {
1024  for (k = 0; k < 2; k++) {
1025  av_freep(&s->b_field_mv_table_base[i][j][k]);
1026  s->b_field_mv_table[i][j][k] = NULL;
1027  }
1028  av_freep(&s->b_field_select_table[i][j]);
1029  av_freep(&s->p_field_mv_table_base[i][j]);
1030  s->p_field_mv_table[i][j] = NULL;
1031  }
1032  av_freep(&s->p_field_select_table[i]);
1033  }
1034 
1035  av_freep(&s->dc_val_base);
1036  av_freep(&s->coded_block_base);
1037  av_freep(&s->mbintra_table);
1038  av_freep(&s->cbp_table);
1039  av_freep(&s->pred_dir_table);
1040 
1041  av_freep(&s->mbskip_table);
1042 
1043  av_freep(&s->er.error_status_table);
1044  av_freep(&s->er.er_temp_buffer);
1045  av_freep(&s->mb_index2xy);
1046  av_freep(&s->lambda_table);
1047 
1048  av_freep(&s->cplx_tab);
1049  av_freep(&s->bits_tab);
1050 
1051  s->linesize = s->uvlinesize = 0;
1052 }
1053 
1055 {
1056  int i, err = 0;
1057 
1058  if (!s->context_initialized)
1059  return AVERROR(EINVAL);
1060 
1061  if (s->slice_context_count > 1) {
1062  for (i = 0; i < s->slice_context_count; i++) {
1063  free_duplicate_context(s->thread_context[i]);
1064  }
1065  for (i = 1; i < s->slice_context_count; i++) {
1066  av_freep(&s->thread_context[i]);
1067  }
1068  } else
1070 
1072 
1073  if (s->picture)
1074  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1075  s->picture[i].needs_realloc = 1;
1076  }
1077 
1078  s->last_picture_ptr =
1079  s->next_picture_ptr =
1080  s->current_picture_ptr = NULL;
1081 
1082  // init
1083  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1084  s->mb_height = (s->height + 31) / 32 * 2;
1085  else
1086  s->mb_height = (s->height + 15) / 16;
1087 
1088  if ((s->width || s->height) &&
1089  (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
1090  goto fail;
1091 
1092  /* set chroma shifts */
1093  err = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1094  &s->chroma_x_shift,
1095  &s->chroma_y_shift);
1096  if (err < 0)
1097  return err;
1098 
1099  if ((err = init_context_frame(s)))
1100  goto fail;
1101 
1102  memset(s->thread_context, 0, sizeof(s->thread_context));
1103  s->thread_context[0] = s;
1104 
1105  if (s->width && s->height) {
1106  int nb_slices = s->slice_context_count;
1107  if (nb_slices > 1) {
1108  for (i = 0; i < nb_slices; i++) {
1109  if (i) {
1110  s->thread_context[i] = av_memdup(s, sizeof(MpegEncContext));
1111  if (!s->thread_context[i]) {
1112  err = AVERROR(ENOMEM);
1113  goto fail;
1114  }
1115  }
1116  if ((err = init_duplicate_context(s->thread_context[i])) < 0)
1117  goto fail;
1118  s->thread_context[i]->start_mb_y =
1119  (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1120  s->thread_context[i]->end_mb_y =
1121  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1122  }
1123  } else {
1124  err = init_duplicate_context(s);
1125  if (err < 0)
1126  goto fail;
1127  s->start_mb_y = 0;
1128  s->end_mb_y = s->mb_height;
1129  }
1130  s->slice_context_count = nb_slices;
1131  }
1132 
1133  return 0;
1134  fail:
1136  return err;
1137 }
1138 
1139 /* init common structure for both encoder and decoder */
1141 {
1142  int i;
1143 
1144  if (!s)
1145  return ;
1146 
1147  if (s->slice_context_count > 1) {
1148  for (i = 0; i < s->slice_context_count; i++) {
1149  free_duplicate_context(s->thread_context[i]);
1150  }
1151  for (i = 1; i < s->slice_context_count; i++) {
1152  av_freep(&s->thread_context[i]);
1153  }
1154  s->slice_context_count = 1;
1155  } else free_duplicate_context(s);
1156 
1157  av_freep(&s->parse_context.buffer);
1158  s->parse_context.buffer_size = 0;
1159 
1160  av_freep(&s->bitstream_buffer);
1161  s->allocated_bitstream_buffer_size = 0;
1162 
1163  if (s->picture) {
1164  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1165  ff_free_picture_tables(&s->picture[i]);
1166  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1167  av_frame_free(&s->picture[i].f);
1168  }
1169  }
1170  av_freep(&s->picture);
1171  ff_free_picture_tables(&s->last_picture);
1172  ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1173  av_frame_free(&s->last_picture.f);
1174  ff_free_picture_tables(&s->current_picture);
1175  ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1176  av_frame_free(&s->current_picture.f);
1177  ff_free_picture_tables(&s->next_picture);
1178  ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1179  av_frame_free(&s->next_picture.f);
1180  ff_free_picture_tables(&s->new_picture);
1181  ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1182  av_frame_free(&s->new_picture.f);
1183 
1185 
1186  s->context_initialized = 0;
1187  s->last_picture_ptr =
1188  s->next_picture_ptr =
1189  s->current_picture_ptr = NULL;
1190  s->linesize = s->uvlinesize = 0;
1191 }
1192 
1193 
1194 static void gray_frame(AVFrame *frame)
1195 {
1196  int i, h_chroma_shift, v_chroma_shift;
1197 
1198  av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
1199 
1200  for(i=0; i<frame->height; i++)
1201  memset(frame->data[0] + frame->linesize[0]*i, 0x80, frame->width);
1202  for(i=0; i<AV_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
1203  memset(frame->data[1] + frame->linesize[1]*i,
1204  0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
1205  memset(frame->data[2] + frame->linesize[2]*i,
1206  0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
1207  }
1208 }
1209 
1210 /**
1211  * generic function called after decoding
1212  * the header and before a frame is decoded.
1213  */
1215 {
1216  int i, ret;
1217  Picture *pic;
1218  s->mb_skipped = 0;
1219 
1220  if (!ff_thread_can_start_frame(avctx)) {
1221  av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1222  return -1;
1223  }
1224 
1225  /* mark & release old frames */
1226  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1227  s->last_picture_ptr != s->next_picture_ptr &&
1228  s->last_picture_ptr->f->buf[0]) {
1229  ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1230  }
1231 
1232  /* release forgotten pictures */
1233  /* if (MPEG-124 / H.263) */
1234  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1235  if (&s->picture[i] != s->last_picture_ptr &&
1236  &s->picture[i] != s->next_picture_ptr &&
1237  s->picture[i].reference && !s->picture[i].needs_realloc) {
1238  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1239  }
1240  }
1241 
1242  ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1243  ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1244  ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1245 
1246  /* release non reference frames */
1247  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1248  if (!s->picture[i].reference)
1249  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1250  }
1251 
1252  if (s->current_picture_ptr && !s->current_picture_ptr->f->buf[0]) {
1253  // we already have an unused image
1254  // (maybe it was set before reading the header)
1255  pic = s->current_picture_ptr;
1256  } else {
1257  i = ff_find_unused_picture(s->avctx, s->picture, 0);
1258  if (i < 0) {
1259  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1260  return i;
1261  }
1262  pic = &s->picture[i];
1263  }
1264 
1265  pic->reference = 0;
1266  if (!s->droppable) {
1267  if (s->pict_type != AV_PICTURE_TYPE_B)
1268  pic->reference = 3;
1269  }
1270 
1271  pic->f->coded_picture_number = s->coded_picture_number++;
1272 
1273  if (alloc_picture(s, pic, 0) < 0)
1274  return -1;
1275 
1276  s->current_picture_ptr = pic;
1277  // FIXME use only the vars from current_pic
1278  s->current_picture_ptr->f->top_field_first = s->top_field_first;
1279  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1280  s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1281  if (s->picture_structure != PICT_FRAME)
1282  s->current_picture_ptr->f->top_field_first =
1283  (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1284  }
1285  s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
1286  !s->progressive_sequence;
1287  s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1288 
1289  s->current_picture_ptr->f->pict_type = s->pict_type;
1290  // if (s->avctx->flags && AV_CODEC_FLAG_QSCALE)
1291  // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1292  s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1293 
1294  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1295  s->current_picture_ptr)) < 0)
1296  return ret;
1297 
1298  if (s->pict_type != AV_PICTURE_TYPE_B) {
1299  s->last_picture_ptr = s->next_picture_ptr;
1300  if (!s->droppable)
1301  s->next_picture_ptr = s->current_picture_ptr;
1302  }
1303  ff_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1304  s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1305  s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
1306  s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
1307  s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
1308  s->pict_type, s->droppable);
1309 
1310  if ((!s->last_picture_ptr || !s->last_picture_ptr->f->buf[0]) &&
1311  (s->pict_type != AV_PICTURE_TYPE_I)) {
1312  int h_chroma_shift, v_chroma_shift;
1313  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1314  &h_chroma_shift, &v_chroma_shift);
1315  if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f->buf[0])
1316  av_log(avctx, AV_LOG_DEBUG,
1317  "allocating dummy last picture for B frame\n");
1318  else if (s->pict_type != AV_PICTURE_TYPE_I)
1319  av_log(avctx, AV_LOG_ERROR,
1320  "warning: first frame is no keyframe\n");
1321 
1322  /* Allocate a dummy frame */
1323  i = ff_find_unused_picture(s->avctx, s->picture, 0);
1324  if (i < 0) {
1325  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1326  return i;
1327  }
1328  s->last_picture_ptr = &s->picture[i];
1329 
1330  s->last_picture_ptr->reference = 3;
1331  s->last_picture_ptr->f->key_frame = 0;
1332  s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1333 
1334  if (alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1335  s->last_picture_ptr = NULL;
1336  return -1;
1337  }
1338 
1339  if (!avctx->hwaccel) {
1340  for(i=0; i<avctx->height; i++)
1341  memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
1342  0x80, avctx->width);
1343  if (s->last_picture_ptr->f->data[2]) {
1344  for(i=0; i<AV_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
1345  memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i,
1346  0x80, AV_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1347  memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i,
1348  0x80, AV_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1349  }
1350  }
1351 
1352  if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
1353  for(i=0; i<avctx->height; i++)
1354  memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 16, avctx->width);
1355  }
1356  }
1357 
1358  ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1359  ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1360  }
1361  if ((!s->next_picture_ptr || !s->next_picture_ptr->f->buf[0]) &&
1362  s->pict_type == AV_PICTURE_TYPE_B) {
1363  /* Allocate a dummy frame */
1364  i = ff_find_unused_picture(s->avctx, s->picture, 0);
1365  if (i < 0) {
1366  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1367  return i;
1368  }
1369  s->next_picture_ptr = &s->picture[i];
1370 
1371  s->next_picture_ptr->reference = 3;
1372  s->next_picture_ptr->f->key_frame = 0;
1373  s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1374 
1375  if (alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1376  s->next_picture_ptr = NULL;
1377  return -1;
1378  }
1379  ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1380  ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1381  }
1382 
1383 #if 0 // BUFREF-FIXME
1384  memset(s->last_picture.f->data, 0, sizeof(s->last_picture.f->data));
1385  memset(s->next_picture.f->data, 0, sizeof(s->next_picture.f->data));
1386 #endif
1387  if (s->last_picture_ptr) {
1388  if (s->last_picture_ptr->f->buf[0] &&
1389  (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1390  s->last_picture_ptr)) < 0)
1391  return ret;
1392  }
1393  if (s->next_picture_ptr) {
1394  if (s->next_picture_ptr->f->buf[0] &&
1395  (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1396  s->next_picture_ptr)) < 0)
1397  return ret;
1398  }
1399 
1400  av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1401  s->last_picture_ptr->f->buf[0]));
1402 
1403  if (s->picture_structure!= PICT_FRAME) {
1404  int i;
1405  for (i = 0; i < 4; i++) {
1406  if (s->picture_structure == PICT_BOTTOM_FIELD) {
1407  s->current_picture.f->data[i] +=
1408  s->current_picture.f->linesize[i];
1409  }
1410  s->current_picture.f->linesize[i] *= 2;
1411  s->last_picture.f->linesize[i] *= 2;
1412  s->next_picture.f->linesize[i] *= 2;
1413  }
1414  }
1415 
1416  /* set dequantizer, we can't do it during init as
1417  * it might change for MPEG-4 and we can't do it in the header
1418  * decode as init is not called for MPEG-4 there yet */
1419  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1420  s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1421  s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1422  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1423  s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1424  s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1425  } else {
1426  s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1427  s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1428  }
1429 
1430  if (s->avctx->debug & FF_DEBUG_NOMC) {
1431  gray_frame(s->current_picture_ptr->f);
1432  }
1433 
1434  return 0;
1435 }
1436 
1437 /* called after a frame has been decoded. */
1439 {
1440  emms_c();
1441 
1442  if (s->current_picture.reference)
1443  ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1444 }
1445 
1447 {
1448  ff_print_debug_info2(s->avctx, pict, s->mbskip_table, p->mb_type,
1449  p->qscale_table, p->motion_val, &s->low_delay,
1450  s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
1451 }
1452 
1454 {
1456  int offset = 2*s->mb_stride + 1;
1457  if(!ref)
1458  return AVERROR(ENOMEM);
1459  av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
1460  ref->size -= offset;
1461  ref->data += offset;
1462  return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
1463 }
1464 
1466  uint8_t *dest, uint8_t *src,
1467  int field_based, int field_select,
1468  int src_x, int src_y,
1469  int width, int height, ptrdiff_t stride,
1470  int h_edge_pos, int v_edge_pos,
1471  int w, int h, h264_chroma_mc_func *pix_op,
1472  int motion_x, int motion_y)
1473 {
1474  const int lowres = s->avctx->lowres;
1475  const int op_index = FFMIN(lowres, 3);
1476  const int s_mask = (2 << lowres) - 1;
1477  int emu = 0;
1478  int sx, sy;
1479 
1480  if (s->quarter_sample) {
1481  motion_x /= 2;
1482  motion_y /= 2;
1483  }
1484 
1485  sx = motion_x & s_mask;
1486  sy = motion_y & s_mask;
1487  src_x += motion_x >> lowres + 1;
1488  src_y += motion_y >> lowres + 1;
1489 
1490  src += src_y * stride + src_x;
1491 
1492  if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
1493  (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
1494  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src,
1495  s->linesize, s->linesize,
1496  w + 1, (h + 1) << field_based,
1497  src_x, src_y << field_based,
1498  h_edge_pos, v_edge_pos);
1499  src = s->sc.edge_emu_buffer;
1500  emu = 1;
1501  }
1502 
1503  sx = (sx << 2) >> lowres;
1504  sy = (sy << 2) >> lowres;
1505  if (field_select)
1506  src += s->linesize;
1507  pix_op[op_index](dest, src, stride, h, sx, sy);
1508  return emu;
1509 }
1510 
1511 /* apply one mpeg motion vector to the three components */
1513  uint8_t *dest_y,
1514  uint8_t *dest_cb,
1515  uint8_t *dest_cr,
1516  int field_based,
1517  int bottom_field,
1518  int field_select,
1519  uint8_t **ref_picture,
1520  h264_chroma_mc_func *pix_op,
1521  int motion_x, int motion_y,
1522  int h, int mb_y)
1523 {
1524  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1525  int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
1526  ptrdiff_t uvlinesize, linesize;
1527  const int lowres = s->avctx->lowres;
1528  const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
1529  const int block_s = 8>>lowres;
1530  const int s_mask = (2 << lowres) - 1;
1531  const int h_edge_pos = s->h_edge_pos >> lowres;
1532  const int v_edge_pos = s->v_edge_pos >> lowres;
1533  linesize = s->current_picture.f->linesize[0] << field_based;
1534  uvlinesize = s->current_picture.f->linesize[1] << field_based;
1535 
1536  // FIXME obviously not perfect but qpel will not work in lowres anyway
1537  if (s->quarter_sample) {
1538  motion_x /= 2;
1539  motion_y /= 2;
1540  }
1541 
1542  if(field_based){
1543  motion_y += (bottom_field - field_select)*((1 << lowres)-1);
1544  }
1545 
1546  sx = motion_x & s_mask;
1547  sy = motion_y & s_mask;
1548  src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
1549  src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
1550 
1551  if (s->out_format == FMT_H263) {
1552  uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
1553  uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
1554  uvsrc_x = src_x >> 1;
1555  uvsrc_y = src_y >> 1;
1556  } else if (s->out_format == FMT_H261) {
1557  // even chroma mv's are full pel in H261
1558  mx = motion_x / 4;
1559  my = motion_y / 4;
1560  uvsx = (2 * mx) & s_mask;
1561  uvsy = (2 * my) & s_mask;
1562  uvsrc_x = s->mb_x * block_s + (mx >> lowres);
1563  uvsrc_y = mb_y * block_s + (my >> lowres);
1564  } else {
1565  if(s->chroma_y_shift){
1566  mx = motion_x / 2;
1567  my = motion_y / 2;
1568  uvsx = mx & s_mask;
1569  uvsy = my & s_mask;
1570  uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
1571  uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
1572  } else {
1573  if(s->chroma_x_shift){
1574  //Chroma422
1575  mx = motion_x / 2;
1576  uvsx = mx & s_mask;
1577  uvsy = motion_y & s_mask;
1578  uvsrc_y = src_y;
1579  uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1580  } else {
1581  //Chroma444
1582  uvsx = motion_x & s_mask;
1583  uvsy = motion_y & s_mask;
1584  uvsrc_x = src_x;
1585  uvsrc_y = src_y;
1586  }
1587  }
1588  }
1589 
1590  ptr_y = ref_picture[0] + src_y * linesize + src_x;
1591  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1592  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1593 
1594  if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
1595  (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
1596  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
1597  linesize >> field_based, linesize >> field_based,
1598  17, 17 + field_based,
1599  src_x, src_y << field_based, h_edge_pos,
1600  v_edge_pos);
1601  ptr_y = s->sc.edge_emu_buffer;
1602  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1603  uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
1604  uint8_t *vbuf =ubuf + 10 * s->uvlinesize;
1605  if (s->workaround_bugs & FF_BUG_IEDGE)
1606  vbuf -= s->uvlinesize;
1607  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
1608  uvlinesize >> field_based, uvlinesize >> field_based,
1609  9, 9 + field_based,
1610  uvsrc_x, uvsrc_y << field_based,
1611  h_edge_pos >> 1, v_edge_pos >> 1);
1612  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
1613  uvlinesize >> field_based,uvlinesize >> field_based,
1614  9, 9 + field_based,
1615  uvsrc_x, uvsrc_y << field_based,
1616  h_edge_pos >> 1, v_edge_pos >> 1);
1617  ptr_cb = ubuf;
1618  ptr_cr = vbuf;
1619  }
1620  }
1621 
1622  // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
1623  if (bottom_field) {
1624  dest_y += s->linesize;
1625  dest_cb += s->uvlinesize;
1626  dest_cr += s->uvlinesize;
1627  }
1628 
1629  if (field_select) {
1630  ptr_y += s->linesize;
1631  ptr_cb += s->uvlinesize;
1632  ptr_cr += s->uvlinesize;
1633  }
1634 
1635  sx = (sx << 2) >> lowres;
1636  sy = (sy << 2) >> lowres;
1637  pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
1638 
1639  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1640  int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
1641  uvsx = (uvsx << 2) >> lowres;
1642  uvsy = (uvsy << 2) >> lowres;
1643  if (hc) {
1644  pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
1645  pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
1646  }
1647  }
1648  // FIXME h261 lowres loop filter
1649 }
1650 
1652  uint8_t *dest_cb, uint8_t *dest_cr,
1653  uint8_t **ref_picture,
1654  h264_chroma_mc_func * pix_op,
1655  int mx, int my)
1656 {
1657  const int lowres = s->avctx->lowres;
1658  const int op_index = FFMIN(lowres, 3);
1659  const int block_s = 8 >> lowres;
1660  const int s_mask = (2 << lowres) - 1;
1661  const int h_edge_pos = s->h_edge_pos >> lowres + 1;
1662  const int v_edge_pos = s->v_edge_pos >> lowres + 1;
1663  int emu = 0, src_x, src_y, sx, sy;
1664  ptrdiff_t offset;
1665  uint8_t *ptr;
1666 
1667  if (s->quarter_sample) {
1668  mx /= 2;
1669  my /= 2;
1670  }
1671 
1672  /* In case of 8X8, we construct a single chroma motion vector
1673  with a special rounding */
1674  mx = ff_h263_round_chroma(mx);
1675  my = ff_h263_round_chroma(my);
1676 
1677  sx = mx & s_mask;
1678  sy = my & s_mask;
1679  src_x = s->mb_x * block_s + (mx >> lowres + 1);
1680  src_y = s->mb_y * block_s + (my >> lowres + 1);
1681 
1682  offset = src_y * s->uvlinesize + src_x;
1683  ptr = ref_picture[1] + offset;
1684  if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
1685  (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
1686  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
1687  s->uvlinesize, s->uvlinesize,
1688  9, 9,
1689  src_x, src_y, h_edge_pos, v_edge_pos);
1690  ptr = s->sc.edge_emu_buffer;
1691  emu = 1;
1692  }
1693  sx = (sx << 2) >> lowres;
1694  sy = (sy << 2) >> lowres;
1695  pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
1696 
1697  ptr = ref_picture[2] + offset;
1698  if (emu) {
1699  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
1700  s->uvlinesize, s->uvlinesize,
1701  9, 9,
1702  src_x, src_y, h_edge_pos, v_edge_pos);
1703  ptr = s->sc.edge_emu_buffer;
1704  }
1705  pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
1706 }
1707 
1708 /**
1709  * motion compensation of a single macroblock
1710  * @param s context
1711  * @param dest_y luma destination pointer
1712  * @param dest_cb chroma cb/u destination pointer
1713  * @param dest_cr chroma cr/v destination pointer
1714  * @param dir direction (0->forward, 1->backward)
1715  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1716  * @param pix_op halfpel motion compensation function (average or put normally)
1717  * the motion vectors are taken from s->mv and the MV type from s->mv_type
1718  */
1719 static inline void MPV_motion_lowres(MpegEncContext *s,
1720  uint8_t *dest_y, uint8_t *dest_cb,
1721  uint8_t *dest_cr,
1722  int dir, uint8_t **ref_picture,
1723  h264_chroma_mc_func *pix_op)
1724 {
1725  int mx, my;
1726  int mb_x, mb_y, i;
1727  const int lowres = s->avctx->lowres;
1728  const int block_s = 8 >>lowres;
1729 
1730  mb_x = s->mb_x;
1731  mb_y = s->mb_y;
1732 
1733  switch (s->mv_type) {
1734  case MV_TYPE_16X16:
1735  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1736  0, 0, 0,
1737  ref_picture, pix_op,
1738  s->mv[dir][0][0], s->mv[dir][0][1],
1739  2 * block_s, mb_y);
1740  break;
1741  case MV_TYPE_8X8:
1742  mx = 0;
1743  my = 0;
1744  for (i = 0; i < 4; i++) {
1745  hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
1746  s->linesize) * block_s,
1747  ref_picture[0], 0, 0,
1748  (2 * mb_x + (i & 1)) * block_s,
1749  (2 * mb_y + (i >> 1)) * block_s,
1750  s->width, s->height, s->linesize,
1751  s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1752  block_s, block_s, pix_op,
1753  s->mv[dir][i][0], s->mv[dir][i][1]);
1754 
1755  mx += s->mv[dir][i][0];
1756  my += s->mv[dir][i][1];
1757  }
1758 
1759  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
1760  chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
1761  pix_op, mx, my);
1762  break;
1763  case MV_TYPE_FIELD:
1764  if (s->picture_structure == PICT_FRAME) {
1765  /* top field */
1766  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1767  1, 0, s->field_select[dir][0],
1768  ref_picture, pix_op,
1769  s->mv[dir][0][0], s->mv[dir][0][1],
1770  block_s, mb_y);
1771  /* bottom field */
1772  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1773  1, 1, s->field_select[dir][1],
1774  ref_picture, pix_op,
1775  s->mv[dir][1][0], s->mv[dir][1][1],
1776  block_s, mb_y);
1777  } else {
1778  if (s->picture_structure != s->field_select[dir][0] + 1 &&
1779  s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
1780  ref_picture = s->current_picture_ptr->f->data;
1781 
1782  }
1783  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1784  0, 0, s->field_select[dir][0],
1785  ref_picture, pix_op,
1786  s->mv[dir][0][0],
1787  s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
1788  }
1789  break;
1790  case MV_TYPE_16X8:
1791  for (i = 0; i < 2; i++) {
1792  uint8_t **ref2picture;
1793 
1794  if (s->picture_structure == s->field_select[dir][i] + 1 ||
1795  s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
1796  ref2picture = ref_picture;
1797  } else {
1798  ref2picture = s->current_picture_ptr->f->data;
1799  }
1800 
1801  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1802  0, 0, s->field_select[dir][i],
1803  ref2picture, pix_op,
1804  s->mv[dir][i][0], s->mv[dir][i][1] +
1805  2 * block_s * i, block_s, mb_y >> 1);
1806 
1807  dest_y += 2 * block_s * s->linesize;
1808  dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
1809  dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
1810  }
1811  break;
1812  case MV_TYPE_DMV:
1813  if (s->picture_structure == PICT_FRAME) {
1814  for (i = 0; i < 2; i++) {
1815  int j;
1816  for (j = 0; j < 2; j++) {
1817  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1818  1, j, j ^ i,
1819  ref_picture, pix_op,
1820  s->mv[dir][2 * i + j][0],
1821  s->mv[dir][2 * i + j][1],
1822  block_s, mb_y);
1823  }
1824  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
1825  }
1826  } else {
1827  for (i = 0; i < 2; i++) {
1828  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1829  0, 0, s->picture_structure != i + 1,
1830  ref_picture, pix_op,
1831  s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
1832  2 * block_s, mb_y >> 1);
1833 
1834  // after put we make avg of the same block
1835  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
1836 
1837  // opposite parity is always in the same
1838  // frame if this is second field
1839  if (!s->first_field) {
1840  ref_picture = s->current_picture_ptr->f->data;
1841  }
1842  }
1843  }
1844  break;
1845  default:
1846  av_assert2(0);
1847  }
1848 }
1849 
1850 /**
1851  * find the lowest MB row referenced in the MVs
1852  */
1854 {
1855  int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1856  int my, off, i, mvs;
1857 
1858  if (s->picture_structure != PICT_FRAME || s->mcsel)
1859  goto unhandled;
1860 
1861  switch (s->mv_type) {
1862  case MV_TYPE_16X16:
1863  mvs = 1;
1864  break;
1865  case MV_TYPE_16X8:
1866  mvs = 2;
1867  break;
1868  case MV_TYPE_8X8:
1869  mvs = 4;
1870  break;
1871  default:
1872  goto unhandled;
1873  }
1874 
1875  for (i = 0; i < mvs; i++) {
1876  my = s->mv[dir][i][1];
1877  my_max = FFMAX(my_max, my);
1878  my_min = FFMIN(my_min, my);
1879  }
1880 
1881  off = ((FFMAX(-my_min, my_max)<<qpel_shift) + 63) >> 6;
1882 
1883  return av_clip(s->mb_y + off, 0, s->mb_height - 1);
1884 unhandled:
1885  return s->mb_height-1;
1886 }
1887 
1888 /* put block[] to dest[] */
1889 static inline void put_dct(MpegEncContext *s,
1890  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1891 {
1892  s->dct_unquantize_intra(s, block, i, qscale);
1893  s->idsp.idct_put(dest, line_size, block);
1894 }
1895 
1896 /* add block[] to dest[] */
1897 static inline void add_dct(MpegEncContext *s,
1898  int16_t *block, int i, uint8_t *dest, int line_size)
1899 {
1900  if (s->block_last_index[i] >= 0) {
1901  s->idsp.idct_add(dest, line_size, block);
1902  }
1903 }
1904 
1905 static inline void add_dequant_dct(MpegEncContext *s,
1906  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1907 {
1908  if (s->block_last_index[i] >= 0) {
1909  s->dct_unquantize_inter(s, block, i, qscale);
1910 
1911  s->idsp.idct_add(dest, line_size, block);
1912  }
1913 }
1914 
1915 /**
1916  * Clean dc, ac, coded_block for the current non-intra MB.
1917  */
1919 {
1920  int wrap = s->b8_stride;
1921  int xy = s->block_index[0];
1922 
1923  s->dc_val[0][xy ] =
1924  s->dc_val[0][xy + 1 ] =
1925  s->dc_val[0][xy + wrap] =
1926  s->dc_val[0][xy + 1 + wrap] = 1024;
1927  /* ac pred */
1928  memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1929  memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1930  if (s->msmpeg4_version>=3) {
1931  s->coded_block[xy ] =
1932  s->coded_block[xy + 1 ] =
1933  s->coded_block[xy + wrap] =
1934  s->coded_block[xy + 1 + wrap] = 0;
1935  }
1936  /* chroma */
1937  wrap = s->mb_stride;
1938  xy = s->mb_x + s->mb_y * wrap;
1939  s->dc_val[1][xy] =
1940  s->dc_val[2][xy] = 1024;
1941  /* ac pred */
1942  memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1943  memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1944 
1945  s->mbintra_table[xy]= 0;
1946 }
1947 
1948 /* generic function called after a macroblock has been parsed by the
1949  decoder or after it has been encoded by the encoder.
1950 
1951  Important variables used:
1952  s->mb_intra : true if intra macroblock
1953  s->mv_dir : motion vector direction
1954  s->mv_type : motion vector type
1955  s->mv : motion vector
1956  s->interlaced_dct : true if interlaced dct used (mpeg2)
1957  */
1958 static av_always_inline
1960  int lowres_flag, int is_mpeg12)
1961 {
1962  const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1963 
1964  if (CONFIG_XVMC &&
1965  s->avctx->hwaccel && s->avctx->hwaccel->decode_mb) {
1966  s->avctx->hwaccel->decode_mb(s);//xvmc uses pblocks
1967  return;
1968  }
1969 
1970  if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
1971  /* print DCT coefficients */
1972  int i,j;
1973  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1974  for(i=0; i<6; i++){
1975  for(j=0; j<64; j++){
1976  av_log(s->avctx, AV_LOG_DEBUG, "%5d",
1977  block[i][s->idsp.idct_permutation[j]]);
1978  }
1979  av_log(s->avctx, AV_LOG_DEBUG, "\n");
1980  }
1981  }
1982 
1983  s->current_picture.qscale_table[mb_xy] = s->qscale;
1984 
1985  /* update DC predictors for P macroblocks */
1986  if (!s->mb_intra) {
1987  if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
1988  if(s->mbintra_table[mb_xy])
1990  } else {
1991  s->last_dc[0] =
1992  s->last_dc[1] =
1993  s->last_dc[2] = 128 << s->intra_dc_precision;
1994  }
1995  }
1996  else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
1997  s->mbintra_table[mb_xy]=1;
1998 
1999  if ((s->avctx->flags & AV_CODEC_FLAG_PSNR) || s->frame_skip_threshold || s->frame_skip_factor ||
2000  !(s->encoding && (s->intra_only || s->pict_type == AV_PICTURE_TYPE_B) &&
2001  s->avctx->mb_decision != FF_MB_DECISION_RD)) { // FIXME precalc
2002  uint8_t *dest_y, *dest_cb, *dest_cr;
2003  int dct_linesize, dct_offset;
2004  op_pixels_func (*op_pix)[4];
2005  qpel_mc_func (*op_qpix)[16];
2006  const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
2007  const int uvlinesize = s->current_picture.f->linesize[1];
2008  const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2009  const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2010 
2011  /* avoid copy if macroblock skipped in last frame too */
2012  /* skip only during decoding as we might trash the buffers during encoding a bit */
2013  if(!s->encoding){
2014  uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2015 
2016  if (s->mb_skipped) {
2017  s->mb_skipped= 0;
2018  av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
2019  *mbskip_ptr = 1;
2020  } else if(!s->current_picture.reference) {
2021  *mbskip_ptr = 1;
2022  } else{
2023  *mbskip_ptr = 0; /* not skipped */
2024  }
2025  }
2026 
2027  dct_linesize = linesize << s->interlaced_dct;
2028  dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2029 
2030  if(readable){
2031  dest_y= s->dest[0];
2032  dest_cb= s->dest[1];
2033  dest_cr= s->dest[2];
2034  }else{
2035  dest_y = s->sc.b_scratchpad;
2036  dest_cb= s->sc.b_scratchpad+16*linesize;
2037  dest_cr= s->sc.b_scratchpad+32*linesize;
2038  }
2039 
2040  if (!s->mb_intra) {
2041  /* motion handling */
2042  /* decoding or more than one mb_type (MC was already done otherwise) */
2043  if(!s->encoding){
2044 
2045  if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2046  if (s->mv_dir & MV_DIR_FORWARD) {
2047  ff_thread_await_progress(&s->last_picture_ptr->tf,
2049  0);
2050  }
2051  if (s->mv_dir & MV_DIR_BACKWARD) {
2052  ff_thread_await_progress(&s->next_picture_ptr->tf,
2054  0);
2055  }
2056  }
2057 
2058  if(lowres_flag){
2059  h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
2060 
2061  if (s->mv_dir & MV_DIR_FORWARD) {
2062  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
2063  op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
2064  }
2065  if (s->mv_dir & MV_DIR_BACKWARD) {
2066  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
2067  }
2068  }else{
2069  op_qpix = s->me.qpel_put;
2070  if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2071  op_pix = s->hdsp.put_pixels_tab;
2072  }else{
2073  op_pix = s->hdsp.put_no_rnd_pixels_tab;
2074  }
2075  if (s->mv_dir & MV_DIR_FORWARD) {
2076  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
2077  op_pix = s->hdsp.avg_pixels_tab;
2078  op_qpix= s->me.qpel_avg;
2079  }
2080  if (s->mv_dir & MV_DIR_BACKWARD) {
2081  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
2082  }
2083  }
2084  }
2085 
2086  /* skip dequant / idct if we are really late ;) */
2087  if(s->avctx->skip_idct){
2088  if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2089  ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2090  || s->avctx->skip_idct >= AVDISCARD_ALL)
2091  goto skip_idct;
2092  }
2093 
2094  /* add dct residue */
2095  if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2096  || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2097  add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2098  add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2099  add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2100  add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2101 
2102  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
2103  if (s->chroma_y_shift){
2104  add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2105  add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2106  }else{
2107  dct_linesize >>= 1;
2108  dct_offset >>=1;
2109  add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2110  add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2111  add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2112  add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2113  }
2114  }
2115  } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2116  add_dct(s, block[0], 0, dest_y , dct_linesize);
2117  add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2118  add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2119  add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2120 
2121  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
2122  if(s->chroma_y_shift){//Chroma420
2123  add_dct(s, block[4], 4, dest_cb, uvlinesize);
2124  add_dct(s, block[5], 5, dest_cr, uvlinesize);
2125  }else{
2126  //chroma422
2127  dct_linesize = uvlinesize << s->interlaced_dct;
2128  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2129 
2130  add_dct(s, block[4], 4, dest_cb, dct_linesize);
2131  add_dct(s, block[5], 5, dest_cr, dct_linesize);
2132  add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2133  add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2134  if(!s->chroma_x_shift){//Chroma444
2135  add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2136  add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2137  add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2138  add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2139  }
2140  }
2141  }//fi gray
2142  }
2143  else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2144  ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2145  }
2146  } else {
2147  /* Only MPEG-4 Simple Studio Profile is supported in > 8-bit mode.
2148  TODO: Integrate 10-bit properly into mpegvideo.c so that ER works properly */
2149  if (s->avctx->bits_per_raw_sample > 8){
2150  const int act_block_size = block_size * 2;
2151 
2152  if(s->dpcm_direction == 0) {
2153  s->idsp.idct_put(dest_y, dct_linesize, (int16_t*)(*s->block32)[0]);
2154  s->idsp.idct_put(dest_y + act_block_size, dct_linesize, (int16_t*)(*s->block32)[1]);
2155  s->idsp.idct_put(dest_y + dct_offset, dct_linesize, (int16_t*)(*s->block32)[2]);
2156  s->idsp.idct_put(dest_y + dct_offset + act_block_size, dct_linesize, (int16_t*)(*s->block32)[3]);
2157 
2158  dct_linesize = uvlinesize << s->interlaced_dct;
2159  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2160 
2161  s->idsp.idct_put(dest_cb, dct_linesize, (int16_t*)(*s->block32)[4]);
2162  s->idsp.idct_put(dest_cr, dct_linesize, (int16_t*)(*s->block32)[5]);
2163  s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, (int16_t*)(*s->block32)[6]);
2164  s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, (int16_t*)(*s->block32)[7]);
2165  if(!s->chroma_x_shift){//Chroma444
2166  s->idsp.idct_put(dest_cb + act_block_size, dct_linesize, (int16_t*)(*s->block32)[8]);
2167  s->idsp.idct_put(dest_cr + act_block_size, dct_linesize, (int16_t*)(*s->block32)[9]);
2168  s->idsp.idct_put(dest_cb + act_block_size + dct_offset, dct_linesize, (int16_t*)(*s->block32)[10]);
2169  s->idsp.idct_put(dest_cr + act_block_size + dct_offset, dct_linesize, (int16_t*)(*s->block32)[11]);
2170  }
2171  } else if(s->dpcm_direction == 1) {
2172  int i, w, h;
2173  uint16_t *dest_pcm[3] = {(uint16_t*)dest_y, (uint16_t*)dest_cb, (uint16_t*)dest_cr};
2174  int linesize[3] = {dct_linesize, uvlinesize, uvlinesize};
2175  for(i = 0; i < 3; i++) {
2176  int idx = 0;
2177  int vsub = i ? s->chroma_y_shift : 0;
2178  int hsub = i ? s->chroma_x_shift : 0;
2179  for(h = 0; h < (16 >> vsub); h++){
2180  for(w = 0; w < (16 >> hsub); w++)
2181  dest_pcm[i][w] = (*s->dpcm_macroblock)[i][idx++];
2182  dest_pcm[i] += linesize[i] / 2;
2183  }
2184  }
2185  } else if(s->dpcm_direction == -1) {
2186  int i, w, h;
2187  uint16_t *dest_pcm[3] = {(uint16_t*)dest_y, (uint16_t*)dest_cb, (uint16_t*)dest_cr};
2188  int linesize[3] = {dct_linesize, uvlinesize, uvlinesize};
2189  for(i = 0; i < 3; i++) {
2190  int idx = 0;
2191  int vsub = i ? s->chroma_y_shift : 0;
2192  int hsub = i ? s->chroma_x_shift : 0;
2193  dest_pcm[i] += (linesize[i] / 2) * ((16 >> vsub) - 1);
2194  for(h = (16 >> vsub)-1; h >= 1; h--){
2195  for(w = (16 >> hsub)-1; w >= 1; w--)
2196  dest_pcm[i][w] = (*s->dpcm_macroblock)[i][idx++];
2197  dest_pcm[i] -= linesize[i] / 2;
2198  }
2199  }
2200  }
2201  }
2202  /* dct only in intra block */
2203  else if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2204  put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2205  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2206  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2207  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2208 
2209  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
2210  if(s->chroma_y_shift){
2211  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2212  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2213  }else{
2214  dct_offset >>=1;
2215  dct_linesize >>=1;
2216  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2217  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2218  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2219  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2220  }
2221  }
2222  }else{
2223  s->idsp.idct_put(dest_y, dct_linesize, block[0]);
2224  s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2225  s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
2226  s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2227 
2228  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
2229  if(s->chroma_y_shift){
2230  s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
2231  s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
2232  }else{
2233 
2234  dct_linesize = uvlinesize << s->interlaced_dct;
2235  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2236 
2237  s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
2238  s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
2239  s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2240  s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2241  if(!s->chroma_x_shift){//Chroma444
2242  s->idsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
2243  s->idsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
2244  s->idsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2245  s->idsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2246  }
2247  }
2248  }//gray
2249  }
2250  }
2251 skip_idct:
2252  if(!readable){
2253  s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2254  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
2255  s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2256  s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2257  }
2258  }
2259  }
2260 }
2261 
2263 {
2264 #if !CONFIG_SMALL
2265  if(s->out_format == FMT_MPEG1) {
2266  if(s->avctx->lowres) mpv_reconstruct_mb_internal(s, block, 1, 1);
2267  else mpv_reconstruct_mb_internal(s, block, 0, 1);
2268  } else
2269 #endif
2270  if(s->avctx->lowres) mpv_reconstruct_mb_internal(s, block, 1, 0);
2271  else mpv_reconstruct_mb_internal(s, block, 0, 0);
2272 }
2273 
2275 {
2276  ff_draw_horiz_band(s->avctx, s->current_picture_ptr->f,
2277  s->last_picture_ptr ? s->last_picture_ptr->f : NULL, y, h, s->picture_structure,
2278  s->first_field, s->low_delay);
2279 }
2280 
2281 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2282  const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
2283  const int uvlinesize = s->current_picture.f->linesize[1];
2284  const int width_of_mb = (4 + (s->avctx->bits_per_raw_sample > 8)) - s->avctx->lowres;
2285  const int height_of_mb = 4 - s->avctx->lowres;
2286 
2287  s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2288  s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2289  s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2290  s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2291  s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2292  s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2293  //block_index is not used by mpeg2, so it is not affected by chroma_format
2294 
2295  s->dest[0] = s->current_picture.f->data[0] + (int)((s->mb_x - 1U) << width_of_mb);
2296  s->dest[1] = s->current_picture.f->data[1] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
2297  s->dest[2] = s->current_picture.f->data[2] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
2298 
2299  if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2300  {
2301  if(s->picture_structure==PICT_FRAME){
2302  s->dest[0] += s->mb_y * linesize << height_of_mb;
2303  s->dest[1] += s->mb_y * uvlinesize << (height_of_mb - s->chroma_y_shift);
2304  s->dest[2] += s->mb_y * uvlinesize << (height_of_mb - s->chroma_y_shift);
2305  }else{
2306  s->dest[0] += (s->mb_y>>1) * linesize << height_of_mb;
2307  s->dest[1] += (s->mb_y>>1) * uvlinesize << (height_of_mb - s->chroma_y_shift);
2308  s->dest[2] += (s->mb_y>>1) * uvlinesize << (height_of_mb - s->chroma_y_shift);
2309  av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2310  }
2311  }
2312 }
2313 
2315  int i;
2316  MpegEncContext *s = avctx->priv_data;
2317 
2318  if (!s || !s->picture)
2319  return;
2320 
2321  for (i = 0; i < MAX_PICTURE_COUNT; i++)
2322  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
2323  s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2324 
2325  ff_mpeg_unref_picture(s->avctx, &s->current_picture);
2326  ff_mpeg_unref_picture(s->avctx, &s->last_picture);
2327  ff_mpeg_unref_picture(s->avctx, &s->next_picture);
2328 
2329  s->mb_x= s->mb_y= 0;
2330  s->closed_gop= 0;
2331 
2332  s->parse_context.state= -1;
2333  s->parse_context.frame_start_found= 0;
2334  s->parse_context.overread= 0;
2335  s->parse_context.overread_index= 0;
2336  s->parse_context.index= 0;
2337  s->parse_context.last_index= 0;
2338  s->bitstream_buffer_size=0;
2339  s->pp_time=0;
2340 }
2341 
2342 /**
2343  * set qscale and update qscale dependent variables.
2344  */
2345 void ff_set_qscale(MpegEncContext * s, int qscale)
2346 {
2347  if (qscale < 1)
2348  qscale = 1;
2349  else if (qscale > 31)
2350  qscale = 31;
2351 
2352  s->qscale = qscale;
2353  s->chroma_qscale= s->chroma_qscale_table[qscale];
2354 
2355  s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2356  s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2357 }
2358 
2360 {
2361  if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
2362  ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
2363 }
ff_h263_round_chroma
static int ff_h263_round_chroma(int x)
Definition: motion_est.h:101
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:39
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:894
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:2729
stride
int stride
Definition: mace.c:144
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:266
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
ff_init_scantable
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: idctdsp.c:29
level
uint8_t level
Definition: svq3.c:207
ff_mpeg_framesize_alloc
int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me, ScratchpadContext *sc, int linesize)
Definition: mpegpicture.c:58
blockdsp.h
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegutils.h:124
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVCodecContext::workaround_bugs
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
Definition: avcodec.h:2599
mpeg_motion_lowres
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t **ref_picture, h264_chroma_mc_func *pix_op, int motion_x, int motion_y, int h, int mb_y)
Definition: mpegvideo.c:1512
backup_duplicate_context
static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
Definition: mpegvideo.c:437
ff_mpv_common_defaults
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:648
hpel_motion_lowres
static int hpel_motion_lowres(MpegEncContext *s, uint8_t *dest, uint8_t *src, int field_based, int field_select, int src_x, int src_y, int width, int height, ptrdiff_t stride, int h_edge_pos, int v_edge_pos, int w, int h, h264_chroma_mc_func *pix_op, int motion_x, int motion_y)
Definition: mpegvideo.c:1465
FFSWAP
#define FFSWAP(type, a, b)
Definition: common.h:99
n
int n
Definition: avisynth_c.h:760
ff_thread_can_start_frame
int ff_thread_can_start_frame(AVCodecContext *avctx)
Definition: pthread_frame.c:872
ff_mpeg1_dc_scale_table
const uint8_t ff_mpeg1_dc_scale_table[128]
Definition: mpegvideodata.c:34
MV_TYPE_16X8
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:268
AVFrame::coded_picture_number
int coded_picture_number
picture number in bitstream order
Definition: frame.h:409
lowest_referenced_row
static int lowest_referenced_row(MpegEncContext *s, int dir)
find the lowest MB row referenced in the MVs
Definition: mpegvideo.c:1853
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: avcodec.h:230
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
avpriv_toupper4
unsigned int avpriv_toupper4(unsigned int x)
Definition: utils.c:1853
ff_mpv_common_frame_size_change
int ff_mpv_common_frame_size_change(MpegEncContext *s)
Definition: mpegvideo.c:1054
w
uint8_t w
Definition: llviddspenc.c:38
internal.h
ff_free_picture_tables
void ff_free_picture_tables(Picture *pic)
Definition: mpegpicture.c:465
last_picture
enum AVPictureType last_picture
Definition: movenc.c:68
ff_mpeg2_non_linear_qscale
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:27
REBASE_PICTURE
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
ff_clean_intra_table_entries
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
Definition: mpegvideo.c:1918
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:38
init_duplicate_context
static int init_duplicate_context(MpegEncContext *s)
Definition: mpegvideo.c:358
ff_mpv_common_init_arm
av_cold void ff_mpv_common_init_arm(MpegEncContext *s)
Definition: mpegvideo_arm.c:43
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:2281
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:98
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:887
Picture
Picture.
Definition: mpegpicture.h:45
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:917
mpegutils.h
thread.h
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
free_duplicate_context
static void free_duplicate_context(MpegEncContext *s)
Definition: mpegvideo.c:415
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:263
MV_TYPE_DMV
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:270
mpv_reconstruct_mb_internal
static av_always_inline void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
Definition: mpegvideo.c:1959
av_memdup
void * av_memdup(const void *p, size_t size)
Duplicate a buffer with av_malloc().
Definition: mem.c:283
chroma_4mv_motion_lowres
static void chroma_4mv_motion_lowres(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, h264_chroma_mc_func *pix_op, int mx, int my)
Definition: mpegvideo.c:1651
return
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a it should return
Definition: filter_design.txt:264
dct_unquantize_mpeg1_inter_c
static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:82
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:1574
U
#define U(x)
Definition: vp56_arith.h:37
fail
#define fail()
Definition: checkasm.h:120
wrap
#define wrap(func)
Definition: neontest.h:65
h264_chroma_mc_func
void(* h264_chroma_mc_func)(uint8_t *dst, uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: h264chroma.h:25
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:134
ff_print_debug_info2
void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table, uint32_t *mbtype_table, int8_t *qscale_table, int16_t(*motion_val[2])[2], int *low_delay, int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.
Definition: mpegutils.c:103
ff_mpeg_draw_horiz_band
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo.c:2274
MAX_THREADS
#define MAX_THREADS
Definition: frame_thread_encoder.c:34
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2550
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:1753
src
#define src
Definition: vp8dsp.c:254
MAX_PICTURE_COUNT
#define MAX_PICTURE_COUNT
Definition: mpegpicture.h:32
dct_unquantize_mpeg1_intra_c
static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:53
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:1140
MpegEncContext::pblocks
int16_t(*[12] pblocks)[64]
Definition: mpegvideo.h:506
avassert.h
gray16
static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
Definition: mpegvideo.c:270
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold
#define av_cold
Definition: attributes.h:84
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:557
motion_vector.h
width
#define width
add_dct
static void add_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size)
Definition: mpegvideo.c:1897
s
#define s(width, name)
Definition: cbs_vp9.c:257
ff_mpeg_unref_picture
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
Deallocate a picture.
Definition: mpegpicture.c:299
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
UPDATE_PICTURE
#define UPDATE_PICTURE(pic)
s1
#define s1
Definition: regdef.h:38
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: avcodec.h:236
dct_unquantize_mpeg2_intra_bitexact
static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:141
FMT_H261
@ FMT_H261
Definition: mpegutils.h:125
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
PICT_TOP_FIELD
#define PICT_TOP_FIELD
Definition: mpegutils.h:37
limits.h
ff_mpegvideodsp_init
av_cold void ff_mpegvideodsp_init(MpegVideoDSPContext *c)
Definition: mpegvideodsp.c:110
f
#define f(width, name)
Definition: cbs_vp9.c:255
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
Picture::reference
int reference
Definition: mpegpicture.h:87
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: avcodec.h:811
ff_find_unused_picture
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
Definition: mpegpicture.c:451
NULL
#define NULL
Definition: coverity.c:32
alloc_picture
static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
Definition: mpegvideo.c:350
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:330
me
#define me
Definition: vf_colormatrix.c:104
put_dct
static void put_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo.c:1889
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
ff_set_qscale
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:2345
mathops.h
timer.h
FF_BUG_IEDGE
#define FF_BUG_IEDGE
Definition: avcodec.h:2614
lowres
static int lowres
Definition: ffplay.c:335
qpeldsp.h
ff_alternate_horizontal_scan
const uint8_t ff_alternate_horizontal_scan[64]
Definition: mpegvideodata.c:89
MpegEncContext::me
MotionEstContext me
Definition: mpegvideo.h:282
ff_alloc_picture
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int shared, int encoding, int chroma_x_shift, int chroma_y_shift, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
Definition: mpegpicture.c:236
ME_MAP_SIZE
#define ME_MAP_SIZE
Definition: motion_est.h:38
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
wmv2.h
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: avcodec.h:219
dct_unquantize_mpeg2_intra_c
static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:111
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:267
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:29
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:2662
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: avcodec.h:810
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:883
ff_mpeg_ref_picture
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
Definition: mpegpicture.c:366
ff_draw_horiz_band
void ff_draw_horiz_band(AVCodecContext *avctx, AVFrame *cur, AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
Definition: mpegutils.c:51
ff_mpeg_er_init
int ff_mpeg_er_init(MpegEncContext *s)
Definition: mpeg_er.c:100
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: avcodec.h:222
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:974
Picture::qscale_table_buf
AVBufferRef * qscale_table_buf
Definition: mpegpicture.h:49
h264chroma.h
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:2836
ff_mpeg_flush
void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo.c:2314
height
#define height
init_context_frame
static int init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
Definition: mpegvideo.c:691
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
ff_mpeg_update_thread_context
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo.c:495
mpegvideodata.h
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
attributes.h
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:269
ff_mpv_export_qp_table
int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
Definition: mpegvideo.c:1453
ff_print_debug_info
void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
Definition: mpegvideo.c:1446
Picture::motion_val
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:53
dct_unquantize_mpeg2_inter_c
static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:175
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:2835
clear_context
static void clear_context(MpegEncContext *s)
Definition: mpegvideo.c:812
AVCodec::id
enum AVCodecID id
Definition: avcodec.h:3495
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
FMT_H263
@ FMT_H263
Definition: mpegutils.h:126
dct_unquantize_h263_inter_c
static void dct_unquantize_h263_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:243
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
ff_alternate_vertical_scan
const uint8_t ff_alternate_vertical_scan[64]
Definition: mpegvideodata.c:100
ff_h264chroma_init
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:41
internal.h
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
ff_mpv_common_init_ppc
void ff_mpv_common_init_ppc(MpegEncContext *s)
Definition: mpegvideo_altivec.c:118
av_always_inline
#define av_always_inline
Definition: attributes.h:43
MpegEncContext::sc
ScratchpadContext sc
Definition: mpegvideo.h:202
uint8_t
uint8_t
Definition: audio_convert.c:194
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:238
ff_mpv_decode_defaults
void ff_mpv_decode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for decoding.
Definition: mpegvideo.c:671
ff_wmv2_add_mb
void ff_wmv2_add_mb(MpegEncContext *s, int16_t block1[6][64], uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr)
Definition: wmv2.c:83
AVCodecContext::height
int height
Definition: avcodec.h:1738
ff_mpv_frame_end
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo.c:1438
FF_DEBUG_NOMC
#define FF_DEBUG_NOMC
Definition: avcodec.h:2675
idctdsp.h
avcodec.h
msmpeg4.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
Picture::qscale_table
int8_t * qscale_table
Definition: mpegpicture.h:50
ff_mpv_frame_start
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo.c:1214
free_context_frame
static void free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution.
Definition: mpegvideo.c:1005
dct_init
static av_cold int dct_init(MpegEncContext *s)
Definition: mpegvideo.c:283
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: avcodec.h:790
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:88
MpegEncContext::block
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:508
AVCodecContext
main external API structure.
Definition: avcodec.h:1565
ScratchpadContext::edge_emu_buffer
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegpicture.h:36
Picture::mb_type
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegpicture.h:56
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo.c:467
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
Picture::f
struct AVFrame * f
Definition: mpegpicture.h:46
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
dct_unquantize_h263_intra_c
static void dct_unquantize_h263_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:209
ff_mpv_common_init_x86
av_cold void ff_mpv_common_init_x86(MpegEncContext *s)
Definition: mpegvideo.c:454
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
MPV_motion_lowres
static void MPV_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, h264_chroma_mc_func *pix_op)
motion compensation of a single macroblock
Definition: mpegvideo.c:1719
ff_mpv_reconstruct_mb
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo.c:2262
ff_mpv_common_init_axp
av_cold void ff_mpv_common_init_axp(MpegEncContext *s)
Definition: mpegvideo_alpha.c:106
av_buffer_ref
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
FF_MB_DECISION_RD
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:2056
ff_mpv_common_init_mips
av_cold void ff_mpv_common_init_mips(MpegEncContext *s)
Definition: mpegvideo_init_mips.c:50
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:1753
FF_ALLOC_OR_GOTO
#define FF_ALLOC_OR_GOTO(ctx, p, size, label)
Definition: internal.h:140
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
ff_default_chroma_qscale_table
const uint8_t ff_default_chroma_qscale_table[32]
Definition: mpegvideodata.c:21
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:81
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:908
COPY
#define COPY(a)
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:1590
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:262
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:1592
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:500
mpeg_er.h
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:1738
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
ff_mpv_report_decode_progress
void ff_mpv_report_decode_progress(MpegEncContext *s)
Definition: mpegvideo.c:2359
add_dequant_dct
static void add_dequant_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo.c:1905
FF_ALLOCZ_OR_GOTO
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
Definition: internal.h:149
h
h
Definition: vp9dsp_template.c:2038
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:282
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
Definition: blockdsp.c:60
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: avcodec.h:239
ff_mpv_decode_init
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Definition: mpegvideo.c:676
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: avcodec.h:807
int
int
Definition: ffmpeg_filter.c:191
mjpegenc.h
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: avcodec.h:220
ff_mpv_common_init_neon
av_cold void ff_mpv_common_init_neon(MpegEncContext *s)
Definition: mpegvideo.c:126
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:81
gray8
static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
Definition: mpegvideo.c:276
gray_frame
static void gray_frame(AVFrame *frame)
Definition: mpegvideo.c:1194
av_frame_set_qp_table
int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
Definition: frame.c:54