FFmpeg
mpegvideo_dec.c
Go to the documentation of this file.
1 /*
2  * Common mpeg video decoding code
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include <limits.h>
24 
25 #include "config_components.h"
26 
27 #include "libavutil/avassert.h"
28 #include "libavutil/emms.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/internal.h"
32 
33 #include "avcodec.h"
34 #include "decode.h"
35 #include "h264chroma.h"
36 #include "internal.h"
37 #include "mpegutils.h"
38 #include "mpegvideo.h"
39 #include "mpegvideodec.h"
40 #include "mpeg4videodec.h"
41 #include "thread.h"
42 #include "threadframe.h"
43 #include "wmv2dec.h"
44 
46 {
48 
49  s->avctx = avctx;
50  s->width = avctx->coded_width;
51  s->height = avctx->coded_height;
52  s->codec_id = avctx->codec->id;
53  s->workaround_bugs = avctx->workaround_bugs;
54 
55  /* convert fourcc to upper case */
56  s->codec_tag = ff_toupper4(avctx->codec_tag);
57 
59  ff_h264chroma_init(&s->h264chroma, 8); //for lowres
60 }
61 
63  const AVCodecContext *src)
64 {
65  MpegEncContext *const s1 = src->priv_data;
66  MpegEncContext *const s = dst->priv_data;
67  int ret;
68 
69  if (dst == src)
70  return 0;
71 
72  av_assert0(s != s1);
73 
74  // FIXME can parameters change on I-frames?
75  // in that case dst may need a reinit
76  if (!s->context_initialized) {
77  void *private_ctx = s->private_ctx;
78  int err;
79  memcpy(s, s1, sizeof(*s));
80 
81  s->context_initialized = 0;
82  s->context_reinit = 0;
83  s->avctx = dst;
84  s->private_ctx = private_ctx;
85  s->bitstream_buffer = NULL;
86  s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
87 
88  if (s1->context_initialized) {
89  if ((err = ff_mpv_common_init(s)) < 0)
90  return err;
91  }
92  }
93 
94  if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
95  s->height = s1->height;
96  s->width = s1->width;
98  return ret;
99  }
100 
101  s->quarter_sample = s1->quarter_sample;
102 
103  s->coded_picture_number = s1->coded_picture_number;
104  s->picture_number = s1->picture_number;
105 
106  av_assert0(!s->picture || s->picture != s1->picture);
107  if (s->picture)
108  for (int i = 0; i < MAX_PICTURE_COUNT; i++) {
109  ff_mpeg_unref_picture(&s->picture[i]);
110  if (s1->picture && s1->picture[i].f->buf[0] &&
111  (ret = ff_mpeg_ref_picture(&s->picture[i], &s1->picture[i])) < 0)
112  return ret;
113  }
114 
115 #define UPDATE_PICTURE(pic)\
116 do {\
117  ff_mpeg_unref_picture(&s->pic);\
118  if (s1->pic.f && s1->pic.f->buf[0])\
119  ret = ff_mpeg_ref_picture(&s->pic, &s1->pic);\
120  else\
121  ret = ff_update_picture_tables(&s->pic, &s1->pic);\
122  if (ret < 0)\
123  return ret;\
124 } while (0)
125 
129 
130 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
131  ((pic && pic >= old_ctx->picture && \
132  pic < old_ctx->picture + MAX_PICTURE_COUNT) ? \
133  &new_ctx->picture[pic - old_ctx->picture] : NULL)
134 
135  s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
136  s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
137  s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
138 
139  // Error/bug resilience
140  s->workaround_bugs = s1->workaround_bugs;
141  s->padding_bug_score = s1->padding_bug_score;
142 
143  // MPEG-4 timing info
144  memcpy(&s->last_time_base, &s1->last_time_base,
145  (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
146  (char *) &s1->last_time_base);
147 
148  // B-frame info
149  s->max_b_frames = s1->max_b_frames;
150  s->low_delay = s1->low_delay;
151  s->droppable = s1->droppable;
152 
153  // DivX handling (doesn't work)
154  s->divx_packed = s1->divx_packed;
155 
156  if (s1->bitstream_buffer) {
157  av_fast_padded_malloc(&s->bitstream_buffer,
158  &s->allocated_bitstream_buffer_size,
159  s1->bitstream_buffer_size);
160  if (!s->bitstream_buffer) {
161  s->bitstream_buffer_size = 0;
162  return AVERROR(ENOMEM);
163  }
164  s->bitstream_buffer_size = s1->bitstream_buffer_size;
165  memcpy(s->bitstream_buffer, s1->bitstream_buffer,
166  s1->bitstream_buffer_size);
167  }
168 
169  // linesize-dependent scratch buffer allocation
170  if (!s->sc.edge_emu_buffer)
171  if (s1->linesize) {
172  if (ff_mpeg_framesize_alloc(s->avctx, &s->me,
173  &s->sc, s1->linesize) < 0) {
174  av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
175  "scratch buffers.\n");
176  return AVERROR(ENOMEM);
177  }
178  } else {
179  av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
180  "be allocated due to unknown size.\n");
181  }
182 
183  // MPEG-2/interlacing info
184  memcpy(&s->progressive_sequence, &s1->progressive_sequence,
185  (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
186 
187  return 0;
188 }
189 
191 {
192  int err = 0;
193 
194  if (!s->context_initialized)
195  return AVERROR(EINVAL);
196 
198 
199  if (s->picture)
200  for (int i = 0; i < MAX_PICTURE_COUNT; i++)
201  s->picture[i].needs_realloc = 1;
202 
203  s->last_picture_ptr =
204  s->next_picture_ptr =
205  s->current_picture_ptr = NULL;
206 
207  if ((s->width || s->height) &&
208  (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
209  goto fail;
210 
211  /* set chroma shifts */
212  err = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
213  &s->chroma_x_shift,
214  &s->chroma_y_shift);
215  if (err < 0)
216  goto fail;
217 
218  if ((err = ff_mpv_init_context_frame(s)))
219  goto fail;
220 
221  memset(s->thread_context, 0, sizeof(s->thread_context));
222  s->thread_context[0] = s;
223 
224  if (s->width && s->height) {
226  if (err < 0)
227  goto fail;
228  }
229  s->context_reinit = 0;
230 
231  return 0;
232  fail:
234  s->context_reinit = 1;
235  return err;
236 }
237 
239 {
240  AVCodecContext *avctx = s->avctx;
241  int ret;
242 
243  pic->tf.f = pic->f;
244 
245  /* WM Image / Screen codecs allocate internal buffers with different
246  * dimensions / colorspaces; ignore user-defined callbacks for these. */
251  pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
252  } else {
253  pic->f->width = avctx->width;
254  pic->f->height = avctx->height;
255  pic->f->format = avctx->pix_fmt;
257  }
258  if (ret < 0)
259  goto fail;
260 
262  if (ret < 0)
263  goto fail;
264 
265  return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, 0, s->out_format,
266  s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
267  &s->linesize, &s->uvlinesize);
268 fail:
270  return ret;
271 }
272 
273 static void color_frame(AVFrame *frame, int luma)
274 {
275  int h_chroma_shift, v_chroma_shift;
276 
277  for (int i = 0; i < frame->height; i++)
278  memset(frame->data[0] + frame->linesize[0] * i, luma, frame->width);
279 
280  if (!frame->data[1])
281  return;
282  av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
283  for (int i = 0; i < AV_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
284  memset(frame->data[1] + frame->linesize[1] * i,
285  0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
286  memset(frame->data[2] + frame->linesize[2] * i,
287  0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
288  }
289 }
290 
291 /**
292  * generic function called after decoding
293  * the header and before a frame is decoded.
294  */
296 {
297  Picture *pic;
298  int idx, ret;
299 
300  s->mb_skipped = 0;
301 
303  av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
304  return -1;
305  }
306 
307  /* mark & release old frames */
308  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
309  s->last_picture_ptr != s->next_picture_ptr &&
310  s->last_picture_ptr->f->buf[0]) {
311  ff_mpeg_unref_picture(s->last_picture_ptr);
312  }
313 
314  /* release non reference/forgotten frames */
315  for (int i = 0; i < MAX_PICTURE_COUNT; i++) {
316  if (!s->picture[i].reference ||
317  (&s->picture[i] != s->last_picture_ptr &&
318  &s->picture[i] != s->next_picture_ptr &&
319  !s->picture[i].needs_realloc)) {
320  ff_mpeg_unref_picture(&s->picture[i]);
321  }
322  }
323 
324  ff_mpeg_unref_picture(&s->current_picture);
325  ff_mpeg_unref_picture(&s->last_picture);
326  ff_mpeg_unref_picture(&s->next_picture);
327 
328  if (s->current_picture_ptr && !s->current_picture_ptr->f->buf[0]) {
329  // we already have an unused image
330  // (maybe it was set before reading the header)
331  pic = s->current_picture_ptr;
332  } else {
333  idx = ff_find_unused_picture(s->avctx, s->picture, 0);
334  if (idx < 0) {
335  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
336  return idx;
337  }
338  pic = &s->picture[idx];
339  }
340 
341  pic->reference = 0;
342  if (!s->droppable) {
343  if (s->pict_type != AV_PICTURE_TYPE_B)
344  pic->reference = 3;
345  }
346 
347 #if FF_API_FRAME_PICTURE_NUMBER
349  pic->f->coded_picture_number = s->coded_picture_number++;
351 #endif
352 
353  if (alloc_picture(s, pic) < 0)
354  return -1;
355 
356  s->current_picture_ptr = pic;
357  // FIXME use only the vars from current_pic
358  s->current_picture_ptr->f->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST * !!s->top_field_first;
359  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
360  s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
361  if (s->picture_structure != PICT_FRAME)
362  s->current_picture_ptr->f->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST *
363  ((s->picture_structure == PICT_TOP_FIELD) == s->first_field);
364  }
365  s->current_picture_ptr->f->flags |= AV_FRAME_FLAG_INTERLACED * (!s->progressive_frame &&
366  !s->progressive_sequence);
367  s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
368 
369  s->current_picture_ptr->f->pict_type = s->pict_type;
370  if (s->pict_type == AV_PICTURE_TYPE_I)
371  s->current_picture_ptr->f->flags |= AV_FRAME_FLAG_KEY;
372  else
373  s->current_picture_ptr->f->flags &= ~AV_FRAME_FLAG_KEY;
374 
375  if ((ret = ff_mpeg_ref_picture(&s->current_picture,
376  s->current_picture_ptr)) < 0)
377  return ret;
378 
379  if (s->pict_type != AV_PICTURE_TYPE_B) {
380  s->last_picture_ptr = s->next_picture_ptr;
381  if (!s->droppable)
382  s->next_picture_ptr = s->current_picture_ptr;
383  }
384  ff_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
385  s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
386  s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
387  s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
388  s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
389  s->pict_type, s->droppable);
390 
391  if ((!s->last_picture_ptr || !s->last_picture_ptr->f->buf[0]) &&
392  (s->pict_type != AV_PICTURE_TYPE_I)) {
393  if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f->buf[0])
395  "allocating dummy last picture for B frame\n");
396  else if (s->pict_type != AV_PICTURE_TYPE_I)
398  "warning: first frame is no keyframe\n");
399 
400  /* Allocate a dummy frame */
401  idx = ff_find_unused_picture(s->avctx, s->picture, 0);
402  if (idx < 0) {
403  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
404  return idx;
405  }
406  s->last_picture_ptr = &s->picture[idx];
407 
408  s->last_picture_ptr->reference = 3;
409  s->last_picture_ptr->f->flags &= ~AV_FRAME_FLAG_KEY;
410  s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
411 
412  if (alloc_picture(s, s->last_picture_ptr) < 0) {
413  s->last_picture_ptr = NULL;
414  return -1;
415  }
416 
417  if (!avctx->hwaccel) {
418  int luma_val = s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263 ? 16 : 0x80;
419  color_frame(s->last_picture_ptr->f, luma_val);
420  }
421 
422  ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
423  ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
424  }
425  if ((!s->next_picture_ptr || !s->next_picture_ptr->f->buf[0]) &&
426  s->pict_type == AV_PICTURE_TYPE_B) {
427  /* Allocate a dummy frame */
428  idx = ff_find_unused_picture(s->avctx, s->picture, 0);
429  if (idx < 0) {
430  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
431  return idx;
432  }
433  s->next_picture_ptr = &s->picture[idx];
434 
435  s->next_picture_ptr->reference = 3;
436  s->next_picture_ptr->f->flags &= ~AV_FRAME_FLAG_KEY;
437  s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
438 
439  if (alloc_picture(s, s->next_picture_ptr) < 0) {
440  s->next_picture_ptr = NULL;
441  return -1;
442  }
443  ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
444  ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
445  }
446 
447  if (s->last_picture_ptr) {
448  if (s->last_picture_ptr->f->buf[0] &&
449  (ret = ff_mpeg_ref_picture(&s->last_picture,
450  s->last_picture_ptr)) < 0)
451  return ret;
452  }
453  if (s->next_picture_ptr) {
454  if (s->next_picture_ptr->f->buf[0] &&
455  (ret = ff_mpeg_ref_picture(&s->next_picture,
456  s->next_picture_ptr)) < 0)
457  return ret;
458  }
459 
460  av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
461  s->last_picture_ptr->f->buf[0]));
462 
463  if (s->picture_structure != PICT_FRAME) {
464  for (int i = 0; i < 4; i++) {
465  if (s->picture_structure == PICT_BOTTOM_FIELD) {
466  s->current_picture.f->data[i] = FF_PTR_ADD(s->current_picture.f->data[i],
467  s->current_picture.f->linesize[i]);
468  }
469  s->current_picture.f->linesize[i] *= 2;
470  s->last_picture.f->linesize[i] *= 2;
471  s->next_picture.f->linesize[i] *= 2;
472  }
473  }
474 
475  /* set dequantizer, we can't do it during init as
476  * it might change for MPEG-4 and we can't do it in the header
477  * decode as init is not called for MPEG-4 there yet */
478  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
479  s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
480  s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
481  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
482  s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
483  s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
484  } else {
485  s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
486  s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
487  }
488 
489  if (s->avctx->debug & FF_DEBUG_NOMC)
490  color_frame(s->current_picture_ptr->f, 0x80);
491 
492  return 0;
493 }
494 
495 /* called after a frame has been decoded. */
497 {
498  emms_c();
499 
500  if (s->current_picture.reference)
501  ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
502 }
503 
504 void ff_print_debug_info(const MpegEncContext *s, const Picture *p, AVFrame *pict)
505 {
506  ff_print_debug_info2(s->avctx, pict, s->mbskip_table, p->mb_type,
507  p->qscale_table, p->motion_val,
508  s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
509 }
510 
511 int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const Picture *p, int qp_type)
512 {
513  AVVideoEncParams *par;
514  int mult = (qp_type == FF_MPV_QSCALE_TYPE_MPEG1) ? 2 : 1;
515  unsigned int nb_mb = p->alloc_mb_height * p->alloc_mb_width;
516 
517  if (!(s->avctx->export_side_data & AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS))
518  return 0;
519 
521  if (!par)
522  return AVERROR(ENOMEM);
523 
524  for (unsigned y = 0; y < p->alloc_mb_height; y++)
525  for (unsigned x = 0; x < p->alloc_mb_width; x++) {
526  const unsigned int block_idx = y * p->alloc_mb_width + x;
527  const unsigned int mb_xy = y * p->alloc_mb_stride + x;
528  AVVideoBlockParams *const b = av_video_enc_params_block(par, block_idx);
529 
530  b->src_x = x * 16;
531  b->src_y = y * 16;
532  b->w = 16;
533  b->h = 16;
534 
535  b->delta_qp = p->qscale_table[mb_xy] * mult;
536  }
537 
538  return 0;
539 }
540 
542 {
543  ff_draw_horiz_band(s->avctx, s->current_picture_ptr->f,
544  s->last_picture_ptr ? s->last_picture_ptr->f : NULL,
545  y, h, s->picture_structure,
546  s->first_field, s->low_delay);
547 }
548 
550 {
551  MpegEncContext *const s = avctx->priv_data;
552 
553  if (!s->picture)
554  return;
555 
556  for (int i = 0; i < MAX_PICTURE_COUNT; i++)
557  ff_mpeg_unref_picture(&s->picture[i]);
558  s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
559 
560  ff_mpeg_unref_picture(&s->current_picture);
561  ff_mpeg_unref_picture(&s->last_picture);
562  ff_mpeg_unref_picture(&s->next_picture);
563 
564  s->mb_x = s->mb_y = 0;
565 
566  s->bitstream_buffer_size = 0;
567  s->pp_time = 0;
568 }
569 
571 {
572  if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
573  ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
574 }
575 
576 
578  uint8_t *dest, const uint8_t *src,
579  int field_based, int field_select,
580  int src_x, int src_y,
581  int width, int height, ptrdiff_t stride,
582  int h_edge_pos, int v_edge_pos,
583  int w, int h, const h264_chroma_mc_func *pix_op,
584  int motion_x, int motion_y)
585 {
586  const int lowres = s->avctx->lowres;
587  const int op_index = FFMIN(lowres, 3);
588  const int s_mask = (2 << lowres) - 1;
589  int emu = 0;
590  int sx, sy;
591 
592  if (s->quarter_sample) {
593  motion_x /= 2;
594  motion_y /= 2;
595  }
596 
597  sx = motion_x & s_mask;
598  sy = motion_y & s_mask;
599  src_x += motion_x >> lowres + 1;
600  src_y += motion_y >> lowres + 1;
601 
602  src += src_y * stride + src_x;
603 
604  if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
605  (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
606  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src,
607  s->linesize, s->linesize,
608  w + 1, (h + 1) << field_based,
609  src_x, src_y * (1 << field_based),
611  src = s->sc.edge_emu_buffer;
612  emu = 1;
613  }
614 
615  sx = (sx << 2) >> lowres;
616  sy = (sy << 2) >> lowres;
617  if (field_select)
618  src += s->linesize;
619  pix_op[op_index](dest, src, stride, h, sx, sy);
620  return emu;
621 }
622 
623 /* apply one mpeg motion vector to the three components */
625  uint8_t *dest_y,
626  uint8_t *dest_cb,
627  uint8_t *dest_cr,
628  int field_based,
629  int bottom_field,
630  int field_select,
631  uint8_t *const *ref_picture,
632  const h264_chroma_mc_func *pix_op,
633  int motion_x, int motion_y,
634  int h, int mb_y)
635 {
636  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
637  int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
638  ptrdiff_t uvlinesize, linesize;
639  const int lowres = s->avctx->lowres;
640  const int op_index = FFMIN(lowres - 1 + s->chroma_x_shift, 3);
641  const int block_s = 8 >> lowres;
642  const int s_mask = (2 << lowres) - 1;
643  const int h_edge_pos = s->h_edge_pos >> lowres;
644  const int v_edge_pos = s->v_edge_pos >> lowres;
645  int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
646  linesize = s->current_picture.f->linesize[0] << field_based;
647  uvlinesize = s->current_picture.f->linesize[1] << field_based;
648 
649  // FIXME obviously not perfect but qpel will not work in lowres anyway
650  if (s->quarter_sample) {
651  motion_x /= 2;
652  motion_y /= 2;
653  }
654 
655  if (field_based) {
656  motion_y += (bottom_field - field_select)*((1 << lowres)-1);
657  }
658 
659  sx = motion_x & s_mask;
660  sy = motion_y & s_mask;
661  src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
662  src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
663 
664  if (s->out_format == FMT_H263) {
665  uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
666  uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
667  uvsrc_x = src_x >> 1;
668  uvsrc_y = src_y >> 1;
669  } else if (s->out_format == FMT_H261) {
670  // even chroma mv's are full pel in H261
671  mx = motion_x / 4;
672  my = motion_y / 4;
673  uvsx = (2 * mx) & s_mask;
674  uvsy = (2 * my) & s_mask;
675  uvsrc_x = s->mb_x * block_s + (mx >> lowres);
676  uvsrc_y = mb_y * block_s + (my >> lowres);
677  } else {
678  if (s->chroma_y_shift) {
679  mx = motion_x / 2;
680  my = motion_y / 2;
681  uvsx = mx & s_mask;
682  uvsy = my & s_mask;
683  uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
684  uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
685  } else {
686  if (s->chroma_x_shift) {
687  //Chroma422
688  mx = motion_x / 2;
689  uvsx = mx & s_mask;
690  uvsy = motion_y & s_mask;
691  uvsrc_y = src_y;
692  uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
693  } else {
694  //Chroma444
695  uvsx = motion_x & s_mask;
696  uvsy = motion_y & s_mask;
697  uvsrc_x = src_x;
698  uvsrc_y = src_y;
699  }
700  }
701  }
702 
703  ptr_y = ref_picture[0] + src_y * linesize + src_x;
704  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
705  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
706 
707  if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
708  (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - FFMAX(h, hc<<s->chroma_y_shift), 0)) {
709  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
710  linesize >> field_based, linesize >> field_based,
711  17, 17 + field_based,
712  src_x, src_y * (1 << field_based), h_edge_pos,
713  v_edge_pos);
714  ptr_y = s->sc.edge_emu_buffer;
715  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
716  uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
717  uint8_t *vbuf =ubuf + 10 * s->uvlinesize;
718  if (s->workaround_bugs & FF_BUG_IEDGE)
719  vbuf -= s->uvlinesize;
720  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
721  uvlinesize >> field_based, uvlinesize >> field_based,
722  9, 9 + field_based,
723  uvsrc_x, uvsrc_y * (1 << field_based),
724  h_edge_pos >> 1, v_edge_pos >> 1);
725  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
726  uvlinesize >> field_based,uvlinesize >> field_based,
727  9, 9 + field_based,
728  uvsrc_x, uvsrc_y * (1 << field_based),
729  h_edge_pos >> 1, v_edge_pos >> 1);
730  ptr_cb = ubuf;
731  ptr_cr = vbuf;
732  }
733  }
734 
735  // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
736  if (bottom_field) {
737  dest_y += s->linesize;
738  dest_cb += s->uvlinesize;
739  dest_cr += s->uvlinesize;
740  }
741 
742  if (field_select) {
743  ptr_y += s->linesize;
744  ptr_cb += s->uvlinesize;
745  ptr_cr += s->uvlinesize;
746  }
747 
748  sx = (sx << 2) >> lowres;
749  sy = (sy << 2) >> lowres;
750  pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
751 
752  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
753  uvsx = (uvsx << 2) >> lowres;
754  uvsy = (uvsy << 2) >> lowres;
755  if (hc) {
756  pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
757  pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
758  }
759  }
760  // FIXME h261 lowres loop filter
761 }
762 
764  uint8_t *dest_cb, uint8_t *dest_cr,
765  uint8_t *const *ref_picture,
766  const h264_chroma_mc_func * pix_op,
767  int mx, int my)
768 {
769  const int lowres = s->avctx->lowres;
770  const int op_index = FFMIN(lowres, 3);
771  const int block_s = 8 >> lowres;
772  const int s_mask = (2 << lowres) - 1;
773  const int h_edge_pos = s->h_edge_pos >> lowres + 1;
774  const int v_edge_pos = s->v_edge_pos >> lowres + 1;
775  int emu = 0, src_x, src_y, sx, sy;
776  ptrdiff_t offset;
777  const uint8_t *ptr;
778 
779  if (s->quarter_sample) {
780  mx /= 2;
781  my /= 2;
782  }
783 
784  /* In case of 8X8, we construct a single chroma motion vector
785  with a special rounding */
786  mx = ff_h263_round_chroma(mx);
787  my = ff_h263_round_chroma(my);
788 
789  sx = mx & s_mask;
790  sy = my & s_mask;
791  src_x = s->mb_x * block_s + (mx >> lowres + 1);
792  src_y = s->mb_y * block_s + (my >> lowres + 1);
793 
794  offset = src_y * s->uvlinesize + src_x;
795  ptr = ref_picture[1] + offset;
796  if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
797  (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
798  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
799  s->uvlinesize, s->uvlinesize,
800  9, 9,
801  src_x, src_y, h_edge_pos, v_edge_pos);
802  ptr = s->sc.edge_emu_buffer;
803  emu = 1;
804  }
805  sx = (sx << 2) >> lowres;
806  sy = (sy << 2) >> lowres;
807  pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
808 
809  ptr = ref_picture[2] + offset;
810  if (emu) {
811  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
812  s->uvlinesize, s->uvlinesize,
813  9, 9,
814  src_x, src_y, h_edge_pos, v_edge_pos);
815  ptr = s->sc.edge_emu_buffer;
816  }
817  pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
818 }
819 
820 /**
821  * motion compensation of a single macroblock
822  * @param s context
823  * @param dest_y luma destination pointer
824  * @param dest_cb chroma cb/u destination pointer
825  * @param dest_cr chroma cr/v destination pointer
826  * @param dir direction (0->forward, 1->backward)
827  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
828  * @param pix_op halfpel motion compensation function (average or put normally)
829  * the motion vectors are taken from s->mv and the MV type from s->mv_type
830  */
831 static inline void MPV_motion_lowres(MpegEncContext *s,
832  uint8_t *dest_y, uint8_t *dest_cb,
833  uint8_t *dest_cr,
834  int dir, uint8_t *const *ref_picture,
835  const h264_chroma_mc_func *pix_op)
836 {
837  int mx, my;
838  int mb_x, mb_y;
839  const int lowres = s->avctx->lowres;
840  const int block_s = 8 >>lowres;
841 
842  mb_x = s->mb_x;
843  mb_y = s->mb_y;
844 
845  switch (s->mv_type) {
846  case MV_TYPE_16X16:
847  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
848  0, 0, 0,
849  ref_picture, pix_op,
850  s->mv[dir][0][0], s->mv[dir][0][1],
851  2 * block_s, mb_y);
852  break;
853  case MV_TYPE_8X8:
854  mx = 0;
855  my = 0;
856  for (int i = 0; i < 4; i++) {
857  hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
858  s->linesize) * block_s,
859  ref_picture[0], 0, 0,
860  (2 * mb_x + (i & 1)) * block_s,
861  (2 * mb_y + (i >> 1)) * block_s,
862  s->width, s->height, s->linesize,
863  s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
864  block_s, block_s, pix_op,
865  s->mv[dir][i][0], s->mv[dir][i][1]);
866 
867  mx += s->mv[dir][i][0];
868  my += s->mv[dir][i][1];
869  }
870 
871  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
872  chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
873  pix_op, mx, my);
874  break;
875  case MV_TYPE_FIELD:
876  if (s->picture_structure == PICT_FRAME) {
877  /* top field */
878  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
879  1, 0, s->field_select[dir][0],
880  ref_picture, pix_op,
881  s->mv[dir][0][0], s->mv[dir][0][1],
882  block_s, mb_y);
883  /* bottom field */
884  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
885  1, 1, s->field_select[dir][1],
886  ref_picture, pix_op,
887  s->mv[dir][1][0], s->mv[dir][1][1],
888  block_s, mb_y);
889  } else {
890  if ( s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field
891  || !ref_picture[0]) {
892  ref_picture = s->current_picture_ptr->f->data;
893  }
894  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
895  0, 0, s->field_select[dir][0],
896  ref_picture, pix_op,
897  s->mv[dir][0][0],
898  s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
899  }
900  break;
901  case MV_TYPE_16X8:
902  for (int i = 0; i < 2; i++) {
903  uint8_t *const *ref2picture;
904 
905  if ((s->picture_structure == s->field_select[dir][i] + 1 ||
906  s->pict_type == AV_PICTURE_TYPE_B || s->first_field) &&
907  ref_picture[0]) {
908  ref2picture = ref_picture;
909  } else {
910  ref2picture = s->current_picture_ptr->f->data;
911  }
912 
913  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
914  0, 0, s->field_select[dir][i],
915  ref2picture, pix_op,
916  s->mv[dir][i][0], s->mv[dir][i][1] +
917  2 * block_s * i, block_s, mb_y >> 1);
918 
919  dest_y += 2 * block_s * s->linesize;
920  dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
921  dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
922  }
923  break;
924  case MV_TYPE_DMV:
925  if (s->picture_structure == PICT_FRAME) {
926  for (int i = 0; i < 2; i++) {
927  for (int j = 0; j < 2; j++) {
928  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
929  1, j, j ^ i,
930  ref_picture, pix_op,
931  s->mv[dir][2 * i + j][0],
932  s->mv[dir][2 * i + j][1],
933  block_s, mb_y);
934  }
935  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
936  }
937  } else {
938  if (!ref_picture[0]) {
939  ref_picture = s->current_picture_ptr->f->data;
940  }
941  for (int i = 0; i < 2; i++) {
942  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
943  0, 0, s->picture_structure != i + 1,
944  ref_picture, pix_op,
945  s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
946  2 * block_s, mb_y >> 1);
947 
948  // after put we make avg of the same block
949  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
950 
951  // opposite parity is always in the same
952  // frame if this is second field
953  if (!s->first_field) {
954  ref_picture = s->current_picture_ptr->f->data;
955  }
956  }
957  }
958  break;
959  default:
960  av_assert2(0);
961  }
962 }
963 
964 /**
965  * find the lowest MB row referenced in the MVs
966  */
968 {
969  int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
970  int off, mvs;
971 
972  if (s->picture_structure != PICT_FRAME || s->mcsel)
973  goto unhandled;
974 
975  switch (s->mv_type) {
976  case MV_TYPE_16X16:
977  mvs = 1;
978  break;
979  case MV_TYPE_16X8:
980  mvs = 2;
981  break;
982  case MV_TYPE_8X8:
983  mvs = 4;
984  break;
985  default:
986  goto unhandled;
987  }
988 
989  for (int i = 0; i < mvs; i++) {
990  int my = s->mv[dir][i][1];
991  my_max = FFMAX(my_max, my);
992  my_min = FFMIN(my_min, my);
993  }
994 
995  off = ((FFMAX(-my_min, my_max) << qpel_shift) + 63) >> 6;
996 
997  return av_clip(s->mb_y + off, 0, s->mb_height - 1);
998 unhandled:
999  return s->mb_height - 1;
1000 }
1001 
1002 /* add block[] to dest[] */
1003 static inline void add_dct(MpegEncContext *s,
1004  int16_t *block, int i, uint8_t *dest, int line_size)
1005 {
1006  if (s->block_last_index[i] >= 0) {
1007  s->idsp.idct_add(dest, line_size, block);
1008  }
1009 }
1010 
1011 #define IS_ENCODER 0
1013 
1015 {
1016  if (s->avctx->debug & FF_DEBUG_DCT_COEFF) {
1017  /* print DCT coefficients */
1018  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1019  for (int i = 0; i < 6; i++) {
1020  for (int j = 0; j < 64; j++) {
1021  av_log(s->avctx, AV_LOG_DEBUG, "%5d",
1022  block[i][s->idsp.idct_permutation[j]]);
1023  }
1024  av_log(s->avctx, AV_LOG_DEBUG, "\n");
1025  }
1026  }
1027 
1028  if (!s->avctx->lowres) {
1029 #if !CONFIG_SMALL
1030  if (s->out_format == FMT_MPEG1)
1032  else
1034 #else
1036 #endif
1037  } else
1039 }
ff_h263_round_chroma
static int ff_h263_round_chroma(int x)
Definition: motion_est.h:98
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:38
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:681
ff_draw_horiz_band
void ff_draw_horiz_band(AVCodecContext *avctx, const AVFrame *cur, const AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
Definition: mpegutils.c:53
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1439
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
h264_chroma_mc_func
void(* h264_chroma_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: h264chroma.h:25
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:261
av_clip
#define av_clip
Definition: common.h:98
ff_mpeg_framesize_alloc
int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me, ScratchpadContext *sc, int linesize)
Definition: mpegpicture.c:87
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegutils.h:117
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVCodecContext::workaround_bugs
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
Definition: avcodec.h:1345
ff_mpv_export_qp_table
int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const Picture *p, int qp_type)
Definition: mpegvideo_dec.c:511
ff_mpv_init_context_frame
int ff_mpv_init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
Definition: mpegvideo.c:539
ff_mpv_common_defaults
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:521
MAY_BE_MPEG12
#define MAY_BE_MPEG12
Definition: mpv_reconstruct_mb_template.c:24
mpeg4videodec.h
MV_TYPE_16X8
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:263
ff_thread_can_start_frame
int ff_thread_can_start_frame(AVCodecContext *avctx)
Definition: pthread_frame.c:940
NOT_MPEG12
#define NOT_MPEG12
Definition: mpv_reconstruct_mb_template.c:23
mpv_reconstruct_mb_template.c
Picture::alloc_mb_width
int alloc_mb_width
mb_width used to allocate tables
Definition: mpegpicture.h:65
MpegEncContext::current_picture
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:169
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
ff_mpv_report_decode_progress
void ff_mpv_report_decode_progress(MpegEncContext *s)
Definition: mpegvideo_dec.c:570
AVFrame::width
int width
Definition: frame.h:412
w
uint8_t w
Definition: llviddspenc.c:38
internal.h
last_picture
enum AVPictureType last_picture
Definition: movenc.c:69
b
#define b
Definition: input.c:41
MpegEncContext::next_picture
Picture next_picture
copy of the next picture structure.
Definition: mpegvideo.h:157
ff_toupper4
unsigned int ff_toupper4(unsigned int x)
Definition: to_upper4.h:29
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:37
MpegEncContext::dest
uint8_t * dest[3]
Definition: mpegvideo.h:290
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:85
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Picture
Picture.
Definition: mpegpicture.h:46
mpegutils.h
thread.h
ThreadFrame::f
AVFrame * f
Definition: threadframe.h:28
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
MV_TYPE_DMV
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:265
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:641
AV_VIDEO_ENC_PARAMS_MPEG2
@ AV_VIDEO_ENC_PARAMS_MPEG2
Definition: video_enc_params.h:65
DEFINITELY_MPEG12
#define DEFINITELY_MPEG12
Definition: mpv_reconstruct_mb_template.c:25
ff_mpeg_unref_picture
void ff_mpeg_unref_picture(Picture *pic)
Deallocate a picture; frees the picture tables in case they need to be reallocated anyway.
Definition: mpegpicture.c:253
ff_mpv_reconstruct_mb
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo_dec.c:1014
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:454
fail
#define fail()
Definition: checkasm.h:179
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:123
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2996
hpel_motion_lowres
static int hpel_motion_lowres(MpegEncContext *s, uint8_t *dest, const uint8_t *src, int field_based, int field_select, int src_x, int src_y, int width, int height, ptrdiff_t stride, int h_edge_pos, int v_edge_pos, int w, int h, const h264_chroma_mc_func *pix_op, int motion_x, int motion_y)
Definition: mpegvideo_dec.c:577
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:640
ff_print_debug_info
void ff_print_debug_info(const MpegEncContext *s, const Picture *p, AVFrame *pict)
Definition: mpegvideo_dec.c:504
AVVideoEncParams
Video encoding parameters for a given frame.
Definition: video_enc_params.h:73
MAX_PICTURE_COUNT
#define MAX_PICTURE_COUNT
Definition: mpegpicture.h:33
mult
static int16_t mult(Float11 *f1, Float11 *f2)
Definition: g726.c:60
avassert.h
mpegvideodec.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:628
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:589
emms_c
#define emms_c()
Definition: emms.h:63
ff_mpv_common_frame_size_change
int ff_mpv_common_frame_size_change(MpegEncContext *s)
Definition: mpegvideo_dec.c:190
width
#define width
ff_hwaccel_frame_priv_alloc
int ff_hwaccel_frame_priv_alloc(AVCodecContext *avctx, void **hwaccel_picture_private)
Allocate a hwaccel frame private data if the provided avctx uses a hwaccel method that needs it.
Definition: decode.c:1839
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
MpegEncContext::h_edge_pos
int h_edge_pos
Definition: mpegvideo.h:121
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:425
s1
#define s1
Definition: regdef.h:38
REBASE_PICTURE
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
FMT_H261
@ FMT_H261
Definition: mpegutils.h:118
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
UPDATE_PICTURE
#define UPDATE_PICTURE(pic)
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
PICT_TOP_FIELD
#define PICT_TOP_FIELD
Definition: mpegutils.h:36
decode.h
limits.h
AV_CODEC_ID_VC1IMAGE
@ AV_CODEC_ID_VC1IMAGE
Definition: codec_id.h:204
Picture::hwaccel_picture_private
void * hwaccel_picture_private
RefStruct reference for hardware accelerator private data.
Definition: mpegpicture.h:70
frame
static AVFrame * frame
Definition: demux_decode.c:54
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:455
if
if(ret)
Definition: filter_design.txt:179
Picture::reference
int reference
Definition: mpegpicture.h:77
ff_find_unused_picture
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
Definition: mpegpicture.c:380
threadframe.h
MpegEncContext::field_select
int field_select[2][2]
Definition: mpegvideo.h:272
NULL
#define NULL
Definition: coverity.c:32
mpv_reconstruct_mb_internal
static av_always_inline void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
Definition: mpv_reconstruct_mb_template.c:56
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:283
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:342
ff_mpeg_ref_picture
int ff_mpeg_ref_picture(Picture *dst, Picture *src)
Definition: mpegpicture.c:304
AVFrame::coded_picture_number
attribute_deprecated int coded_picture_number
picture number in bitstream order
Definition: frame.h:474
Picture::tf
ThreadFrame tf
Definition: mpegpicture.h:48
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
FF_PTR_ADD
#define FF_PTR_ADD(ptr, off)
Definition: internal.h:90
FF_BUG_IEDGE
#define FF_BUG_IEDGE
Definition: avcodec.h:1360
lowres
static int lowres
Definition: ffplay.c:333
FF_MPV_QSCALE_TYPE_MPEG1
#define FF_MPV_QSCALE_TYPE_MPEG1
Definition: mpegvideodec.h:40
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
MpegEncContext::private_ctx
void * private_ctx
Definition: mpegvideo.h:88
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:262
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
add_dct
static void add_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size)
Definition: mpegvideo_dec.c:1003
av_video_enc_params_create_side_data
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
Definition: video_enc_params.c:58
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1398
f
f
Definition: af_crystalizer.c:121
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:322
MpegEncContext::v_edge_pos
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:121
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:56
h264chroma.h
ff_mpeg_draw_horiz_band
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo_dec.c:541
Picture::alloc_mb_height
int alloc_mb_height
mb_height used to allocate tables
Definition: mpegpicture.h:66
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:427
ff_mpv_frame_start
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo_dec.c:295
ff_mpeg_flush
void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo_dec.c:549
height
#define height
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:264
Picture::motion_val
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:54
color_frame
static void color_frame(AVFrame *frame, int luma)
Definition: mpegvideo_dec.c:273
lowest_referenced_row
static int lowest_referenced_row(MpegEncContext *s, int dir)
find the lowest MB row referenced in the MVs
Definition: mpegvideo_dec.c:967
AV_CODEC_ID_MSS2
@ AV_CODEC_ID_MSS2
Definition: codec_id.h:219
AVCodec::id
enum AVCodecID id
Definition: codec.h:201
emms.h
avcodec_default_get_buffer2
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: get_buffer.c:260
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
MpegEncContext::uvlinesize
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:124
FMT_H263
@ FMT_H263
Definition: mpegutils.h:119
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
internal.h
ff_mpv_decode_init
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
Definition: mpegvideo_dec.c:45
mpeg_motion_lowres
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op, int motion_x, int motion_y, int h, int mb_y)
Definition: mpegvideo_dec.c:624
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:283
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:52
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
ff_alloc_picture
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int encoding, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
Definition: mpegpicture.c:203
AVVideoBlockParams
Data structure for storing block-level encoding information.
Definition: video_enc_params.h:120
AVCodecContext::height
int height
Definition: avcodec.h:625
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:662
MPV_motion_lowres
static void MPV_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op)
motion compensation of a single macroblock
Definition: mpegvideo_dec.c:831
ff_thread_get_ext_buffer
int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around ff_get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:984
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:636
FF_DEBUG_NOMC
#define FF_DEBUG_NOMC
Definition: avcodec.h:1407
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
chroma_4mv_motion_lowres
static void chroma_4mv_motion_lowres(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op, int mx, int my)
Definition: mpegvideo_dec.c:763
ret
ret
Definition: filter_design.txt:187
wmv2dec.h
Picture::qscale_table
int8_t * qscale_table
Definition: mpegpicture.h:51
alloc_picture
static int alloc_picture(MpegEncContext *s, Picture *pic)
Definition: mpegvideo_dec.c:238
AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
Definition: avcodec.h:415
ff_mpv_free_context_frame
void ff_mpv_free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution as well as the slice thread contex...
Definition: mpegvideo.c:758
ff_mpeg_update_thread_context
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo_dec.c:62
AVCodecContext
main external API structure.
Definition: avcodec.h:445
AVFrame::height
int height
Definition: frame.h:412
Picture::mb_type
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegpicture.h:57
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:281
Picture::f
struct AVFrame * f
Definition: mpegpicture.h:47
ff_mpv_frame_end
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo_dec.c:496
ff_h264chroma_init
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:41
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:640
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
ff_mpv_init_duplicate_contexts
int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
Initialize an MpegEncContext's thread contexts.
Definition: mpegvideo.c:410
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:470
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:625
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:385
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
av_video_enc_params_block
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
Get the block at the specified.
Definition: video_enc_params.h:143
h
h
Definition: vp9dsp_template.c:2038
AV_CODEC_ID_WMV3IMAGE
@ AV_CODEC_ID_WMV3IMAGE
Definition: codec_id.h:203
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:73
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:67
Picture::alloc_mb_stride
int alloc_mb_stride
mb_stride used to allocate tables
Definition: mpegpicture.h:67
video_enc_params.h
ff_print_debug_info2
void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, const uint8_t *mbskip_table, const uint32_t *mbtype_table, const int8_t *qscale_table, int16_t(*const motion_val[2])[2], int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.
Definition: mpegutils.c:159