FFmpeg
mpegvideo_dec.c
Go to the documentation of this file.
1 /*
2  * Common mpeg video decoding code
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include <limits.h>
24 
25 #include "config_components.h"
26 
27 #include "libavutil/avassert.h"
28 #include "libavutil/imgutils.h"
29 #include "libavutil/internal.h"
31 
32 #include "avcodec.h"
33 #include "h264chroma.h"
34 #include "internal.h"
35 #include "mpegutils.h"
36 #include "mpegvideo.h"
37 #include "mpegvideodec.h"
38 #include "mpeg4videodec.h"
39 #include "threadframe.h"
40 #include "wmv2dec.h"
41 
43 {
45 
46  s->avctx = avctx;
47  s->width = avctx->coded_width;
48  s->height = avctx->coded_height;
49  s->codec_id = avctx->codec->id;
50  s->workaround_bugs = avctx->workaround_bugs;
51 
52  /* convert fourcc to upper case */
53  s->codec_tag = ff_toupper4(avctx->codec_tag);
54 
55  ff_h264chroma_init(&s->h264chroma, 8); //for lowres
56 }
57 
59  const AVCodecContext *src)
60 {
61  MpegEncContext *const s1 = src->priv_data;
62  MpegEncContext *const s = dst->priv_data;
63  int ret;
64 
65  if (dst == src)
66  return 0;
67 
68  av_assert0(s != s1);
69 
70  // FIXME can parameters change on I-frames?
71  // in that case dst may need a reinit
72  if (!s->context_initialized) {
73  void *private_ctx = s->private_ctx;
74  int err;
75  memcpy(s, s1, sizeof(*s));
76 
77  s->avctx = dst;
78  s->private_ctx = private_ctx;
79  s->bitstream_buffer = NULL;
80  s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
81 
82  if (s1->context_initialized) {
84  if ((err = ff_mpv_common_init(s)) < 0) {
85  memset(s, 0, sizeof(*s));
86  s->avctx = dst;
87  s->private_ctx = private_ctx;
88  memcpy(&s->h264chroma, &s1->h264chroma, sizeof(s->h264chroma));
89  return err;
90  }
91  }
92  }
93 
94  if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
95  s->height = s1->height;
96  s->width = s1->width;
98  return ret;
99  }
100 
101  s->quarter_sample = s1->quarter_sample;
102 
103  s->coded_picture_number = s1->coded_picture_number;
104  s->picture_number = s1->picture_number;
105 
106  av_assert0(!s->picture || s->picture != s1->picture);
107  if (s->picture)
108  for (int i = 0; i < MAX_PICTURE_COUNT; i++) {
109  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
110  if (s1->picture && s1->picture[i].f->buf[0] &&
111  (ret = ff_mpeg_ref_picture(s->avctx, &s->picture[i], &s1->picture[i])) < 0)
112  return ret;
113  }
114 
115 #define UPDATE_PICTURE(pic)\
116 do {\
117  ff_mpeg_unref_picture(s->avctx, &s->pic);\
118  if (s1->pic.f && s1->pic.f->buf[0])\
119  ret = ff_mpeg_ref_picture(s->avctx, &s->pic, &s1->pic);\
120  else\
121  ret = ff_update_picture_tables(&s->pic, &s1->pic);\
122  if (ret < 0)\
123  return ret;\
124 } while (0)
125 
129 
130 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
131  ((pic && pic >= old_ctx->picture && \
132  pic < old_ctx->picture + MAX_PICTURE_COUNT) ? \
133  &new_ctx->picture[pic - old_ctx->picture] : NULL)
134 
135  s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
136  s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
137  s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
138 
139  // Error/bug resilience
140  s->workaround_bugs = s1->workaround_bugs;
141  s->padding_bug_score = s1->padding_bug_score;
142 
143  // MPEG-4 timing info
144  memcpy(&s->last_time_base, &s1->last_time_base,
145  (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
146  (char *) &s1->last_time_base);
147 
148  // B-frame info
149  s->max_b_frames = s1->max_b_frames;
150  s->low_delay = s1->low_delay;
151  s->droppable = s1->droppable;
152 
153  // DivX handling (doesn't work)
154  s->divx_packed = s1->divx_packed;
155 
156  if (s1->bitstream_buffer) {
157  if (s1->bitstream_buffer_size +
158  AV_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size) {
159  av_fast_malloc(&s->bitstream_buffer,
160  &s->allocated_bitstream_buffer_size,
161  s1->allocated_bitstream_buffer_size);
162  if (!s->bitstream_buffer) {
163  s->bitstream_buffer_size = 0;
164  return AVERROR(ENOMEM);
165  }
166  }
167  s->bitstream_buffer_size = s1->bitstream_buffer_size;
168  memcpy(s->bitstream_buffer, s1->bitstream_buffer,
169  s1->bitstream_buffer_size);
170  memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
172  }
173 
174  // linesize-dependent scratch buffer allocation
175  if (!s->sc.edge_emu_buffer)
176  if (s1->linesize) {
177  if (ff_mpeg_framesize_alloc(s->avctx, &s->me,
178  &s->sc, s1->linesize) < 0) {
179  av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
180  "scratch buffers.\n");
181  return AVERROR(ENOMEM);
182  }
183  } else {
184  av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
185  "be allocated due to unknown size.\n");
186  }
187 
188  // MPEG-2/interlacing info
189  memcpy(&s->progressive_sequence, &s1->progressive_sequence,
190  (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
191 
192  return 0;
193 }
194 
196 {
197  int err = 0;
198 
199  if (!s->context_initialized)
200  return AVERROR(EINVAL);
201 
203 
204  if (s->picture)
205  for (int i = 0; i < MAX_PICTURE_COUNT; i++)
206  s->picture[i].needs_realloc = 1;
207 
208  s->last_picture_ptr =
209  s->next_picture_ptr =
210  s->current_picture_ptr = NULL;
211 
212  if ((s->width || s->height) &&
213  (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
214  goto fail;
215 
216  /* set chroma shifts */
217  err = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
218  &s->chroma_x_shift,
219  &s->chroma_y_shift);
220  if (err < 0)
221  goto fail;
222 
223  if ((err = ff_mpv_init_context_frame(s)))
224  goto fail;
225 
226  memset(s->thread_context, 0, sizeof(s->thread_context));
227  s->thread_context[0] = s;
228 
229  if (s->width && s->height) {
231  if (err < 0)
232  goto fail;
233  }
234  s->context_reinit = 0;
235 
236  return 0;
237  fail:
239  s->context_reinit = 1;
240  return err;
241 }
242 
244 {
245  return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, 0, 0,
246  s->chroma_x_shift, s->chroma_y_shift, s->out_format,
247  s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
248  &s->linesize, &s->uvlinesize);
249 }
250 
251 static void gray_frame(AVFrame *frame)
252 {
253  int h_chroma_shift, v_chroma_shift;
254 
255  av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
256 
257  for (int i = 0; i < frame->height; i++)
258  memset(frame->data[0] + frame->linesize[0] * i, 0x80, frame->width);
259  for (int i = 0; i < AV_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
260  memset(frame->data[1] + frame->linesize[1] * i,
261  0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
262  memset(frame->data[2] + frame->linesize[2] * i,
263  0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
264  }
265 }
266 
267 /**
268  * generic function called after decoding
269  * the header and before a frame is decoded.
270  */
272 {
273  Picture *pic;
274  int idx, ret;
275 
276  s->mb_skipped = 0;
277 
279  av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
280  return -1;
281  }
282 
283  /* mark & release old frames */
284  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
285  s->last_picture_ptr != s->next_picture_ptr &&
286  s->last_picture_ptr->f->buf[0]) {
287  ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
288  }
289 
290  /* release non reference/forgotten frames */
291  for (int i = 0; i < MAX_PICTURE_COUNT; i++) {
292  if (!s->picture[i].reference ||
293  (&s->picture[i] != s->last_picture_ptr &&
294  &s->picture[i] != s->next_picture_ptr &&
295  !s->picture[i].needs_realloc)) {
296  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
297  }
298  }
299 
300  ff_mpeg_unref_picture(s->avctx, &s->current_picture);
301  ff_mpeg_unref_picture(s->avctx, &s->last_picture);
302  ff_mpeg_unref_picture(s->avctx, &s->next_picture);
303 
304  if (s->current_picture_ptr && !s->current_picture_ptr->f->buf[0]) {
305  // we already have an unused image
306  // (maybe it was set before reading the header)
307  pic = s->current_picture_ptr;
308  } else {
309  idx = ff_find_unused_picture(s->avctx, s->picture, 0);
310  if (idx < 0) {
311  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
312  return idx;
313  }
314  pic = &s->picture[idx];
315  }
316 
317  pic->reference = 0;
318  if (!s->droppable) {
319  if (s->pict_type != AV_PICTURE_TYPE_B)
320  pic->reference = 3;
321  }
322 
323 #if FF_API_FRAME_PICTURE_NUMBER
325  pic->f->coded_picture_number = s->coded_picture_number++;
327 #endif
328 
329  if (alloc_picture(s, pic) < 0)
330  return -1;
331 
332  s->current_picture_ptr = pic;
333  // FIXME use only the vars from current_pic
334  s->current_picture_ptr->f->top_field_first = s->top_field_first;
335  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
336  s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
337  if (s->picture_structure != PICT_FRAME)
338  s->current_picture_ptr->f->top_field_first =
339  (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
340  }
341  s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
342  !s->progressive_sequence;
343  s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
344 
345  s->current_picture_ptr->f->pict_type = s->pict_type;
346  s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
347 
348  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
349  s->current_picture_ptr)) < 0)
350  return ret;
351 
352  if (s->pict_type != AV_PICTURE_TYPE_B) {
353  s->last_picture_ptr = s->next_picture_ptr;
354  if (!s->droppable)
355  s->next_picture_ptr = s->current_picture_ptr;
356  }
357  ff_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
358  s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
359  s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
360  s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
361  s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
362  s->pict_type, s->droppable);
363 
364  if ((!s->last_picture_ptr || !s->last_picture_ptr->f->buf[0]) &&
365  (s->pict_type != AV_PICTURE_TYPE_I)) {
366  int h_chroma_shift, v_chroma_shift;
367  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
368  &h_chroma_shift, &v_chroma_shift);
369  if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f->buf[0])
371  "allocating dummy last picture for B frame\n");
372  else if (s->pict_type != AV_PICTURE_TYPE_I)
374  "warning: first frame is no keyframe\n");
375 
376  /* Allocate a dummy frame */
377  idx = ff_find_unused_picture(s->avctx, s->picture, 0);
378  if (idx < 0) {
379  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
380  return idx;
381  }
382  s->last_picture_ptr = &s->picture[idx];
383 
384  s->last_picture_ptr->reference = 3;
385  s->last_picture_ptr->f->key_frame = 0;
386  s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
387 
388  if (alloc_picture(s, s->last_picture_ptr) < 0) {
389  s->last_picture_ptr = NULL;
390  return -1;
391  }
392 
393  if (!avctx->hwaccel) {
394  for (int i = 0; i < avctx->height; i++)
395  memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
396  0x80, avctx->width);
397  if (s->last_picture_ptr->f->data[2]) {
398  for (int i = 0; i < AV_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
399  memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i,
400  0x80, AV_CEIL_RSHIFT(avctx->width, h_chroma_shift));
401  memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i,
402  0x80, AV_CEIL_RSHIFT(avctx->width, h_chroma_shift));
403  }
404  }
405 
406  if (s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263) {
407  for (int i = 0; i < avctx->height; i++)
408  memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0] * i,
409  16, avctx->width);
410  }
411  }
412 
413  ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
414  ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
415  }
416  if ((!s->next_picture_ptr || !s->next_picture_ptr->f->buf[0]) &&
417  s->pict_type == AV_PICTURE_TYPE_B) {
418  /* Allocate a dummy frame */
419  idx = ff_find_unused_picture(s->avctx, s->picture, 0);
420  if (idx < 0) {
421  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
422  return idx;
423  }
424  s->next_picture_ptr = &s->picture[idx];
425 
426  s->next_picture_ptr->reference = 3;
427  s->next_picture_ptr->f->key_frame = 0;
428  s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
429 
430  if (alloc_picture(s, s->next_picture_ptr) < 0) {
431  s->next_picture_ptr = NULL;
432  return -1;
433  }
434  ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
435  ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
436  }
437 
438 #if 0 // BUFREF-FIXME
439  memset(s->last_picture.f->data, 0, sizeof(s->last_picture.f->data));
440  memset(s->next_picture.f->data, 0, sizeof(s->next_picture.f->data));
441 #endif
442  if (s->last_picture_ptr) {
443  if (s->last_picture_ptr->f->buf[0] &&
444  (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
445  s->last_picture_ptr)) < 0)
446  return ret;
447  }
448  if (s->next_picture_ptr) {
449  if (s->next_picture_ptr->f->buf[0] &&
450  (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
451  s->next_picture_ptr)) < 0)
452  return ret;
453  }
454 
455  av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
456  s->last_picture_ptr->f->buf[0]));
457 
458  if (s->picture_structure != PICT_FRAME) {
459  for (int i = 0; i < 4; i++) {
460  if (s->picture_structure == PICT_BOTTOM_FIELD) {
461  s->current_picture.f->data[i] = FF_PTR_ADD(s->current_picture.f->data[i],
462  s->current_picture.f->linesize[i]);
463  }
464  s->current_picture.f->linesize[i] *= 2;
465  s->last_picture.f->linesize[i] *= 2;
466  s->next_picture.f->linesize[i] *= 2;
467  }
468  }
469 
470  /* set dequantizer, we can't do it during init as
471  * it might change for MPEG-4 and we can't do it in the header
472  * decode as init is not called for MPEG-4 there yet */
473  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
474  s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
475  s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
476  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
477  s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
478  s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
479  } else {
480  s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
481  s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
482  }
483 
484  if (s->avctx->debug & FF_DEBUG_NOMC)
485  gray_frame(s->current_picture_ptr->f);
486 
487  return 0;
488 }
489 
490 /* called after a frame has been decoded. */
492 {
493  emms_c();
494 
495  if (s->current_picture.reference)
496  ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
497 }
498 
499 void ff_print_debug_info(const MpegEncContext *s, const Picture *p, AVFrame *pict)
500 {
501  ff_print_debug_info2(s->avctx, pict, s->mbskip_table, p->mb_type,
502  p->qscale_table, p->motion_val,
503  s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
504 }
505 
506 int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const Picture *p, int qp_type)
507 {
508  AVVideoEncParams *par;
509  int mult = (qp_type == FF_MPV_QSCALE_TYPE_MPEG1) ? 2 : 1;
510  unsigned int nb_mb = p->alloc_mb_height * p->alloc_mb_width;
511 
512  if (!(s->avctx->export_side_data & AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS))
513  return 0;
514 
516  if (!par)
517  return AVERROR(ENOMEM);
518 
519  for (unsigned y = 0; y < p->alloc_mb_height; y++)
520  for (unsigned x = 0; x < p->alloc_mb_width; x++) {
521  const unsigned int block_idx = y * p->alloc_mb_width + x;
522  const unsigned int mb_xy = y * p->alloc_mb_stride + x;
523  AVVideoBlockParams *const b = av_video_enc_params_block(par, block_idx);
524 
525  b->src_x = x * 16;
526  b->src_y = y * 16;
527  b->w = 16;
528  b->h = 16;
529 
530  b->delta_qp = p->qscale_table[mb_xy] * mult;
531  }
532 
533  return 0;
534 }
535 
537 {
538  ff_draw_horiz_band(s->avctx, s->current_picture_ptr->f,
539  s->last_picture_ptr ? s->last_picture_ptr->f : NULL,
540  y, h, s->picture_structure,
541  s->first_field, s->low_delay);
542 }
543 
545 {
546  MpegEncContext *const s = avctx->priv_data;
547 
548  if (!s->picture)
549  return;
550 
551  for (int i = 0; i < MAX_PICTURE_COUNT; i++)
552  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
553  s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
554 
555  ff_mpeg_unref_picture(s->avctx, &s->current_picture);
556  ff_mpeg_unref_picture(s->avctx, &s->last_picture);
557  ff_mpeg_unref_picture(s->avctx, &s->next_picture);
558 
559  s->mb_x = s->mb_y = 0;
560 
561  s->bitstream_buffer_size = 0;
562  s->pp_time = 0;
563 }
564 
566 {
567  if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
568  ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
569 }
570 
571 
573  uint8_t *dest, const uint8_t *src,
574  int field_based, int field_select,
575  int src_x, int src_y,
576  int width, int height, ptrdiff_t stride,
577  int h_edge_pos, int v_edge_pos,
578  int w, int h, const h264_chroma_mc_func *pix_op,
579  int motion_x, int motion_y)
580 {
581  const int lowres = s->avctx->lowres;
582  const int op_index = FFMIN(lowres, 3);
583  const int s_mask = (2 << lowres) - 1;
584  int emu = 0;
585  int sx, sy;
586 
587  if (s->quarter_sample) {
588  motion_x /= 2;
589  motion_y /= 2;
590  }
591 
592  sx = motion_x & s_mask;
593  sy = motion_y & s_mask;
594  src_x += motion_x >> lowres + 1;
595  src_y += motion_y >> lowres + 1;
596 
597  src += src_y * stride + src_x;
598 
599  if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
600  (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
601  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src,
602  s->linesize, s->linesize,
603  w + 1, (h + 1) << field_based,
604  src_x, src_y * (1 << field_based),
606  src = s->sc.edge_emu_buffer;
607  emu = 1;
608  }
609 
610  sx = (sx << 2) >> lowres;
611  sy = (sy << 2) >> lowres;
612  if (field_select)
613  src += s->linesize;
614  pix_op[op_index](dest, src, stride, h, sx, sy);
615  return emu;
616 }
617 
618 /* apply one mpeg motion vector to the three components */
620  uint8_t *dest_y,
621  uint8_t *dest_cb,
622  uint8_t *dest_cr,
623  int field_based,
624  int bottom_field,
625  int field_select,
626  uint8_t *const *ref_picture,
627  const h264_chroma_mc_func *pix_op,
628  int motion_x, int motion_y,
629  int h, int mb_y)
630 {
631  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
632  int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
633  ptrdiff_t uvlinesize, linesize;
634  const int lowres = s->avctx->lowres;
635  const int op_index = FFMIN(lowres - 1 + s->chroma_x_shift, 3);
636  const int block_s = 8 >> lowres;
637  const int s_mask = (2 << lowres) - 1;
638  const int h_edge_pos = s->h_edge_pos >> lowres;
639  const int v_edge_pos = s->v_edge_pos >> lowres;
640  linesize = s->current_picture.f->linesize[0] << field_based;
641  uvlinesize = s->current_picture.f->linesize[1] << field_based;
642 
643  // FIXME obviously not perfect but qpel will not work in lowres anyway
644  if (s->quarter_sample) {
645  motion_x /= 2;
646  motion_y /= 2;
647  }
648 
649  if (field_based) {
650  motion_y += (bottom_field - field_select)*((1 << lowres)-1);
651  }
652 
653  sx = motion_x & s_mask;
654  sy = motion_y & s_mask;
655  src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
656  src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
657 
658  if (s->out_format == FMT_H263) {
659  uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
660  uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
661  uvsrc_x = src_x >> 1;
662  uvsrc_y = src_y >> 1;
663  } else if (s->out_format == FMT_H261) {
664  // even chroma mv's are full pel in H261
665  mx = motion_x / 4;
666  my = motion_y / 4;
667  uvsx = (2 * mx) & s_mask;
668  uvsy = (2 * my) & s_mask;
669  uvsrc_x = s->mb_x * block_s + (mx >> lowres);
670  uvsrc_y = mb_y * block_s + (my >> lowres);
671  } else {
672  if (s->chroma_y_shift) {
673  mx = motion_x / 2;
674  my = motion_y / 2;
675  uvsx = mx & s_mask;
676  uvsy = my & s_mask;
677  uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
678  uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
679  } else {
680  if (s->chroma_x_shift) {
681  //Chroma422
682  mx = motion_x / 2;
683  uvsx = mx & s_mask;
684  uvsy = motion_y & s_mask;
685  uvsrc_y = src_y;
686  uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
687  } else {
688  //Chroma444
689  uvsx = motion_x & s_mask;
690  uvsy = motion_y & s_mask;
691  uvsrc_x = src_x;
692  uvsrc_y = src_y;
693  }
694  }
695  }
696 
697  ptr_y = ref_picture[0] + src_y * linesize + src_x;
698  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
699  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
700 
701  if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
702  (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
703  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
704  linesize >> field_based, linesize >> field_based,
705  17, 17 + field_based,
706  src_x, src_y * (1 << field_based), h_edge_pos,
707  v_edge_pos);
708  ptr_y = s->sc.edge_emu_buffer;
709  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
710  uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
711  uint8_t *vbuf =ubuf + 10 * s->uvlinesize;
712  if (s->workaround_bugs & FF_BUG_IEDGE)
713  vbuf -= s->uvlinesize;
714  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
715  uvlinesize >> field_based, uvlinesize >> field_based,
716  9, 9 + field_based,
717  uvsrc_x, uvsrc_y * (1 << field_based),
718  h_edge_pos >> 1, v_edge_pos >> 1);
719  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
720  uvlinesize >> field_based,uvlinesize >> field_based,
721  9, 9 + field_based,
722  uvsrc_x, uvsrc_y * (1 << field_based),
723  h_edge_pos >> 1, v_edge_pos >> 1);
724  ptr_cb = ubuf;
725  ptr_cr = vbuf;
726  }
727  }
728 
729  // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
730  if (bottom_field) {
731  dest_y += s->linesize;
732  dest_cb += s->uvlinesize;
733  dest_cr += s->uvlinesize;
734  }
735 
736  if (field_select) {
737  ptr_y += s->linesize;
738  ptr_cb += s->uvlinesize;
739  ptr_cr += s->uvlinesize;
740  }
741 
742  sx = (sx << 2) >> lowres;
743  sy = (sy << 2) >> lowres;
744  pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
745 
746  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
747  int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
748  uvsx = (uvsx << 2) >> lowres;
749  uvsy = (uvsy << 2) >> lowres;
750  if (hc) {
751  pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
752  pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
753  }
754  }
755  // FIXME h261 lowres loop filter
756 }
757 
759  uint8_t *dest_cb, uint8_t *dest_cr,
760  uint8_t *const *ref_picture,
761  const h264_chroma_mc_func * pix_op,
762  int mx, int my)
763 {
764  const int lowres = s->avctx->lowres;
765  const int op_index = FFMIN(lowres, 3);
766  const int block_s = 8 >> lowres;
767  const int s_mask = (2 << lowres) - 1;
768  const int h_edge_pos = s->h_edge_pos >> lowres + 1;
769  const int v_edge_pos = s->v_edge_pos >> lowres + 1;
770  int emu = 0, src_x, src_y, sx, sy;
771  ptrdiff_t offset;
772  const uint8_t *ptr;
773 
774  if (s->quarter_sample) {
775  mx /= 2;
776  my /= 2;
777  }
778 
779  /* In case of 8X8, we construct a single chroma motion vector
780  with a special rounding */
781  mx = ff_h263_round_chroma(mx);
782  my = ff_h263_round_chroma(my);
783 
784  sx = mx & s_mask;
785  sy = my & s_mask;
786  src_x = s->mb_x * block_s + (mx >> lowres + 1);
787  src_y = s->mb_y * block_s + (my >> lowres + 1);
788 
789  offset = src_y * s->uvlinesize + src_x;
790  ptr = ref_picture[1] + offset;
791  if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
792  (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
793  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
794  s->uvlinesize, s->uvlinesize,
795  9, 9,
796  src_x, src_y, h_edge_pos, v_edge_pos);
797  ptr = s->sc.edge_emu_buffer;
798  emu = 1;
799  }
800  sx = (sx << 2) >> lowres;
801  sy = (sy << 2) >> lowres;
802  pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
803 
804  ptr = ref_picture[2] + offset;
805  if (emu) {
806  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
807  s->uvlinesize, s->uvlinesize,
808  9, 9,
809  src_x, src_y, h_edge_pos, v_edge_pos);
810  ptr = s->sc.edge_emu_buffer;
811  }
812  pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
813 }
814 
815 /**
816  * motion compensation of a single macroblock
817  * @param s context
818  * @param dest_y luma destination pointer
819  * @param dest_cb chroma cb/u destination pointer
820  * @param dest_cr chroma cr/v destination pointer
821  * @param dir direction (0->forward, 1->backward)
822  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
823  * @param pix_op halfpel motion compensation function (average or put normally)
824  * the motion vectors are taken from s->mv and the MV type from s->mv_type
825  */
826 static inline void MPV_motion_lowres(MpegEncContext *s,
827  uint8_t *dest_y, uint8_t *dest_cb,
828  uint8_t *dest_cr,
829  int dir, uint8_t *const *ref_picture,
830  const h264_chroma_mc_func *pix_op)
831 {
832  int mx, my;
833  int mb_x, mb_y;
834  const int lowres = s->avctx->lowres;
835  const int block_s = 8 >>lowres;
836 
837  mb_x = s->mb_x;
838  mb_y = s->mb_y;
839 
840  switch (s->mv_type) {
841  case MV_TYPE_16X16:
842  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
843  0, 0, 0,
844  ref_picture, pix_op,
845  s->mv[dir][0][0], s->mv[dir][0][1],
846  2 * block_s, mb_y);
847  break;
848  case MV_TYPE_8X8:
849  mx = 0;
850  my = 0;
851  for (int i = 0; i < 4; i++) {
852  hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
853  s->linesize) * block_s,
854  ref_picture[0], 0, 0,
855  (2 * mb_x + (i & 1)) * block_s,
856  (2 * mb_y + (i >> 1)) * block_s,
857  s->width, s->height, s->linesize,
858  s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
859  block_s, block_s, pix_op,
860  s->mv[dir][i][0], s->mv[dir][i][1]);
861 
862  mx += s->mv[dir][i][0];
863  my += s->mv[dir][i][1];
864  }
865 
866  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
867  chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
868  pix_op, mx, my);
869  break;
870  case MV_TYPE_FIELD:
871  if (s->picture_structure == PICT_FRAME) {
872  /* top field */
873  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
874  1, 0, s->field_select[dir][0],
875  ref_picture, pix_op,
876  s->mv[dir][0][0], s->mv[dir][0][1],
877  block_s, mb_y);
878  /* bottom field */
879  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
880  1, 1, s->field_select[dir][1],
881  ref_picture, pix_op,
882  s->mv[dir][1][0], s->mv[dir][1][1],
883  block_s, mb_y);
884  } else {
885  if (s->picture_structure != s->field_select[dir][0] + 1 &&
886  s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
887  ref_picture = s->current_picture_ptr->f->data;
888 
889  }
890  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
891  0, 0, s->field_select[dir][0],
892  ref_picture, pix_op,
893  s->mv[dir][0][0],
894  s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
895  }
896  break;
897  case MV_TYPE_16X8:
898  for (int i = 0; i < 2; i++) {
899  uint8_t *const *ref2picture;
900 
901  if (s->picture_structure == s->field_select[dir][i] + 1 ||
902  s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
903  ref2picture = ref_picture;
904  } else {
905  ref2picture = s->current_picture_ptr->f->data;
906  }
907 
908  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
909  0, 0, s->field_select[dir][i],
910  ref2picture, pix_op,
911  s->mv[dir][i][0], s->mv[dir][i][1] +
912  2 * block_s * i, block_s, mb_y >> 1);
913 
914  dest_y += 2 * block_s * s->linesize;
915  dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
916  dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
917  }
918  break;
919  case MV_TYPE_DMV:
920  if (s->picture_structure == PICT_FRAME) {
921  for (int i = 0; i < 2; i++) {
922  for (int j = 0; j < 2; j++) {
923  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
924  1, j, j ^ i,
925  ref_picture, pix_op,
926  s->mv[dir][2 * i + j][0],
927  s->mv[dir][2 * i + j][1],
928  block_s, mb_y);
929  }
930  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
931  }
932  } else {
933  for (int i = 0; i < 2; i++) {
934  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
935  0, 0, s->picture_structure != i + 1,
936  ref_picture, pix_op,
937  s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
938  2 * block_s, mb_y >> 1);
939 
940  // after put we make avg of the same block
941  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
942 
943  // opposite parity is always in the same
944  // frame if this is second field
945  if (!s->first_field) {
946  ref_picture = s->current_picture_ptr->f->data;
947  }
948  }
949  }
950  break;
951  default:
952  av_assert2(0);
953  }
954 }
955 
956 /**
957  * find the lowest MB row referenced in the MVs
958  */
960 {
961  int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
962  int off, mvs;
963 
964  if (s->picture_structure != PICT_FRAME || s->mcsel)
965  goto unhandled;
966 
967  switch (s->mv_type) {
968  case MV_TYPE_16X16:
969  mvs = 1;
970  break;
971  case MV_TYPE_16X8:
972  mvs = 2;
973  break;
974  case MV_TYPE_8X8:
975  mvs = 4;
976  break;
977  default:
978  goto unhandled;
979  }
980 
981  for (int i = 0; i < mvs; i++) {
982  int my = s->mv[dir][i][1];
983  my_max = FFMAX(my_max, my);
984  my_min = FFMIN(my_min, my);
985  }
986 
987  off = ((FFMAX(-my_min, my_max) << qpel_shift) + 63) >> 6;
988 
989  return av_clip(s->mb_y + off, 0, s->mb_height - 1);
990 unhandled:
991  return s->mb_height - 1;
992 }
993 
994 /* add block[] to dest[] */
995 static inline void add_dct(MpegEncContext *s,
996  int16_t *block, int i, uint8_t *dest, int line_size)
997 {
998  if (s->block_last_index[i] >= 0) {
999  s->idsp.idct_add(dest, line_size, block);
1000  }
1001 }
1002 
1003 #define IS_ENCODER 0
1005 
1007 {
1008  if (s->avctx->debug & FF_DEBUG_DCT_COEFF) {
1009  /* print DCT coefficients */
1010  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1011  for (int i = 0; i < 6; i++) {
1012  for (int j = 0; j < 64; j++) {
1013  av_log(s->avctx, AV_LOG_DEBUG, "%5d",
1014  block[i][s->idsp.idct_permutation[j]]);
1015  }
1016  av_log(s->avctx, AV_LOG_DEBUG, "\n");
1017  }
1018  }
1019 
1020  if (!s->avctx->lowres) {
1021 #if !CONFIG_SMALL
1022  if (s->out_format == FMT_MPEG1)
1024  else
1026 #else
1028 #endif
1029  } else
1031 }
ff_h263_round_chroma
static int ff_h263_round_chroma(int x)
Definition: motion_est.h:98
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:38
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:682
ff_draw_horiz_band
void ff_draw_horiz_band(AVCodecContext *avctx, const AVFrame *cur, const AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
Definition: mpegutils.c:51
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1405
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:82
h264_chroma_mc_func
void(* h264_chroma_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: h264chroma.h:25
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:262
av_clip
#define av_clip
Definition: common.h:95
ff_mpeg_framesize_alloc
int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me, ScratchpadContext *sc, int linesize)
Definition: mpegpicture.c:87
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegutils.h:117
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVCodecContext::workaround_bugs
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
Definition: avcodec.h:1311
ff_mpv_export_qp_table
int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const Picture *p, int qp_type)
Definition: mpegvideo_dec.c:506
ff_mpv_init_context_frame
int ff_mpv_init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
Definition: mpegvideo.c:539
ff_mpv_common_defaults
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:521
MAY_BE_MPEG12
#define MAY_BE_MPEG12
Definition: mpv_reconstruct_mb_template.c:24
mpeg4videodec.h
MV_TYPE_16X8
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:264
ff_thread_can_start_frame
int ff_thread_can_start_frame(AVCodecContext *avctx)
Definition: pthread_frame.c:885
NOT_MPEG12
#define NOT_MPEG12
Definition: mpv_reconstruct_mb_template.c:23
mpv_reconstruct_mb_template.c
Picture::alloc_mb_width
int alloc_mb_width
mb_width used to allocate tables
Definition: mpegpicture.h:65
MpegEncContext::current_picture
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:170
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
ff_mpv_report_decode_progress
void ff_mpv_report_decode_progress(MpegEncContext *s)
Definition: mpegvideo_dec.c:565
w
uint8_t w
Definition: llviddspenc.c:38
internal.h
last_picture
enum AVPictureType last_picture
Definition: movenc.c:69
b
#define b
Definition: input.c:41
MpegEncContext::next_picture
Picture next_picture
copy of the next picture structure.
Definition: mpegvideo.h:158
ff_toupper4
unsigned int ff_toupper4(unsigned int x)
Definition: to_upper4.h:29
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:37
MpegEncContext::dest
uint8_t * dest[3]
Definition: mpegvideo.h:291
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:85
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Picture
Picture.
Definition: mpegpicture.h:46
mpegutils.h
MV_TYPE_DMV
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:266
AV_VIDEO_ENC_PARAMS_MPEG2
@ AV_VIDEO_ENC_PARAMS_MPEG2
Definition: video_enc_params.h:65
DEFINITELY_MPEG12
#define DEFINITELY_MPEG12
Definition: mpv_reconstruct_mb_template.c:25
gray_frame
static void gray_frame(AVFrame *frame)
Definition: mpegvideo_dec.c:251
ff_mpv_reconstruct_mb
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo_dec.c:1006
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:435
fail
#define fail()
Definition: checkasm.h:134
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:124
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2916
hpel_motion_lowres
static int hpel_motion_lowres(MpegEncContext *s, uint8_t *dest, const uint8_t *src, int field_based, int field_select, int src_x, int src_y, int width, int height, ptrdiff_t stride, int h_edge_pos, int v_edge_pos, int w, int h, const h264_chroma_mc_func *pix_op, int motion_x, int motion_y)
Definition: mpegvideo_dec.c:572
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:613
ff_print_debug_info
void ff_print_debug_info(const MpegEncContext *s, const Picture *p, AVFrame *pict)
Definition: mpegvideo_dec.c:499
AVVideoEncParams
Video encoding parameters for a given frame.
Definition: video_enc_params.h:73
MAX_PICTURE_COUNT
#define MAX_PICTURE_COUNT
Definition: mpegpicture.h:33
mult
static int16_t mult(Float11 *f1, Float11 *f2)
Definition: g726.c:60
avassert.h
mpegvideodec.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:544
ff_mpv_common_frame_size_change
int ff_mpv_common_frame_size_change(MpegEncContext *s)
Definition: mpegvideo_dec.c:195
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:256
ff_mpeg_unref_picture
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
Deallocate a picture; frees the picture tables in case they need to be reallocated anyway.
Definition: mpegpicture.c:312
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:50
MpegEncContext::h_edge_pos
int h_edge_pos
Definition: mpegvideo.h:122
s1
#define s1
Definition: regdef.h:38
REBASE_PICTURE
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
FMT_H261
@ FMT_H261
Definition: mpegutils.h:118
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
UPDATE_PICTURE
#define UPDATE_PICTURE(pic)
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
PICT_TOP_FIELD
#define PICT_TOP_FIELD
Definition: mpegutils.h:36
limits.h
if
if(ret)
Definition: filter_design.txt:179
Picture::reference
int reference
Definition: mpegpicture.h:77
ff_find_unused_picture
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
Definition: mpegpicture.c:453
threadframe.h
MpegEncContext::field_select
int field_select[2][2]
Definition: mpegvideo.h:273
NULL
#define NULL
Definition: coverity.c:32
mpv_reconstruct_mb_internal
static av_always_inline void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
Definition: mpv_reconstruct_mb_template.c:56
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:284
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:342
AVFrame::coded_picture_number
attribute_deprecated int coded_picture_number
picture number in bitstream order
Definition: frame.h:459
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
FF_PTR_ADD
#define FF_PTR_ADD(ptr, off)
Definition: internal.h:99
FF_BUG_IEDGE
#define FF_BUG_IEDGE
Definition: avcodec.h:1326
lowres
static int lowres
Definition: ffplay.c:335
ff_alloc_picture
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int shared, int encoding, int chroma_x_shift, int chroma_y_shift, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
Definition: mpegpicture.c:254
FF_MPV_QSCALE_TYPE_MPEG1
#define FF_MPV_QSCALE_TYPE_MPEG1
Definition: mpegvideodec.h:40
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
MpegEncContext::private_ctx
void * private_ctx
Definition: mpegvideo.h:88
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:263
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
add_dct
static void add_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size)
Definition: mpegvideo_dec.c:995
av_video_enc_params_create_side_data
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
Definition: video_enc_params.c:58
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1364
f
f
Definition: af_crystalizer.c:122
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:301
ff_mpeg_ref_picture
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
Definition: mpegpicture.c:371
MpegEncContext::v_edge_pos
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:122
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:56
h264chroma.h
ff_mpeg_draw_horiz_band
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo_dec.c:536
Picture::alloc_mb_height
int alloc_mb_height
mb_height used to allocate tables
Definition: mpegpicture.h:66
ff_mpv_frame_start
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo_dec.c:271
ff_mpeg_flush
void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo_dec.c:544
height
#define height
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:265
Picture::motion_val
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:54
lowest_referenced_row
static int lowest_referenced_row(MpegEncContext *s, int dir)
find the lowest MB row referenced in the MVs
Definition: mpegvideo_dec.c:959
AVCodec::id
enum AVCodecID id
Definition: codec.h:198
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
MpegEncContext::uvlinesize
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:125
FMT_H263
@ FMT_H263
Definition: mpegutils.h:119
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
ff_h264chroma_init
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:41
internal.h
ff_mpv_decode_init
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
Definition: mpegvideo_dec.c:42
mpeg_motion_lowres
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op, int motion_x, int motion_y, int h, int mb_y)
Definition: mpegvideo_dec.c:619
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:284
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AVVideoBlockParams
Data structure for storing block-level encoding information.
Definition: video_enc_params.h:120
AVCodecContext::height
int height
Definition: avcodec.h:598
MPV_motion_lowres
static void MPV_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op)
motion compensation of a single macroblock
Definition: mpegvideo_dec.c:826
FF_DEBUG_NOMC
#define FF_DEBUG_NOMC
Definition: avcodec.h:1373
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
chroma_4mv_motion_lowres
static void chroma_4mv_motion_lowres(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op, int mx, int my)
Definition: mpegvideo_dec.c:758
ret
ret
Definition: filter_design.txt:187
wmv2dec.h
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
Picture::qscale_table
int8_t * qscale_table
Definition: mpegpicture.h:51
alloc_picture
static int alloc_picture(MpegEncContext *s, Picture *pic)
Definition: mpegvideo_dec.c:243
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
Definition: avcodec.h:394
ff_mpv_free_context_frame
void ff_mpv_free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution as well as the slice thread contex...
Definition: mpegvideo.c:760
ff_mpeg_update_thread_context
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo_dec.c:58
AVCodecContext
main external API structure.
Definition: avcodec.h:426
Picture::mb_type
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegpicture.h:57
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
Picture::f
struct AVFrame * f
Definition: mpegpicture.h:47
ff_mpv_frame_end
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo_dec.c:491
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:81
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:613
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
ff_mpv_init_duplicate_contexts
int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
Initialize an MpegEncContext's thread contexts.
Definition: mpegvideo.c:410
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:451
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:453
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:555
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:598
imgutils.h
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
av_video_enc_params_block
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
Definition: video_enc_params.h:143
h
h
Definition: vp9dsp_template.c:2038
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:73
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:67
Picture::alloc_mb_stride
int alloc_mb_stride
mb_stride used to allocate tables
Definition: mpegpicture.h:67
video_enc_params.h
ff_print_debug_info2
void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, const uint8_t *mbskip_table, const uint32_t *mbtype_table, const int8_t *qscale_table, int16_t(*const motion_val[2])[2], int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.
Definition: mpegutils.c:157