FFmpeg
mpegvideo_dec.c
Go to the documentation of this file.
1 /*
2  * Common mpeg video decoding code
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include <limits.h>
24 
25 #include "config_components.h"
26 
27 #include "libavutil/avassert.h"
28 #include "libavutil/imgutils.h"
29 #include "libavutil/internal.h"
31 
32 #include "avcodec.h"
33 #include "h264chroma.h"
34 #include "internal.h"
35 #include "mpegutils.h"
36 #include "mpegvideo.h"
37 #include "mpegvideodec.h"
38 #include "mpeg4videodec.h"
39 #include "threadframe.h"
40 #include "wmv2dec.h"
41 
43 {
45 
46  s->avctx = avctx;
47  s->width = avctx->coded_width;
48  s->height = avctx->coded_height;
49  s->codec_id = avctx->codec->id;
50  s->workaround_bugs = avctx->workaround_bugs;
51 
52  /* convert fourcc to upper case */
53  s->codec_tag = ff_toupper4(avctx->codec_tag);
54 
55  ff_h264chroma_init(&s->h264chroma, 8); //for lowres
56 }
57 
59  const AVCodecContext *src)
60 {
61  MpegEncContext *const s1 = src->priv_data;
62  MpegEncContext *const s = dst->priv_data;
63  int ret;
64 
65  if (dst == src)
66  return 0;
67 
68  av_assert0(s != s1);
69 
70  // FIXME can parameters change on I-frames?
71  // in that case dst may need a reinit
72  if (!s->context_initialized) {
73  void *private_ctx = s->private_ctx;
74  int err;
75  memcpy(s, s1, sizeof(*s));
76 
77  s->avctx = dst;
78  s->private_ctx = private_ctx;
79  s->bitstream_buffer = NULL;
80  s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
81 
82  if (s1->context_initialized) {
84  if ((err = ff_mpv_common_init(s)) < 0) {
85  memset(s, 0, sizeof(*s));
86  s->avctx = dst;
87  s->private_ctx = private_ctx;
88  memcpy(&s->h264chroma, &s1->h264chroma, sizeof(s->h264chroma));
89  return err;
90  }
91  }
92  }
93 
94  if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
95  s->height = s1->height;
96  s->width = s1->width;
98  return ret;
99  }
100 
101  s->quarter_sample = s1->quarter_sample;
102 
103  s->coded_picture_number = s1->coded_picture_number;
104  s->picture_number = s1->picture_number;
105 
106  av_assert0(!s->picture || s->picture != s1->picture);
107  if (s->picture)
108  for (int i = 0; i < MAX_PICTURE_COUNT; i++) {
109  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
110  if (s1->picture && s1->picture[i].f->buf[0] &&
111  (ret = ff_mpeg_ref_picture(s->avctx, &s->picture[i], &s1->picture[i])) < 0)
112  return ret;
113  }
114 
115 #define UPDATE_PICTURE(pic)\
116 do {\
117  ff_mpeg_unref_picture(s->avctx, &s->pic);\
118  if (s1->pic.f && s1->pic.f->buf[0])\
119  ret = ff_mpeg_ref_picture(s->avctx, &s->pic, &s1->pic);\
120  else\
121  ret = ff_update_picture_tables(&s->pic, &s1->pic);\
122  if (ret < 0)\
123  return ret;\
124 } while (0)
125 
129 
130 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
131  ((pic && pic >= old_ctx->picture && \
132  pic < old_ctx->picture + MAX_PICTURE_COUNT) ? \
133  &new_ctx->picture[pic - old_ctx->picture] : NULL)
134 
135  s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
136  s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
137  s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
138 
139  // Error/bug resilience
140  s->workaround_bugs = s1->workaround_bugs;
141  s->padding_bug_score = s1->padding_bug_score;
142 
143  // MPEG-4 timing info
144  memcpy(&s->last_time_base, &s1->last_time_base,
145  (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
146  (char *) &s1->last_time_base);
147 
148  // B-frame info
149  s->max_b_frames = s1->max_b_frames;
150  s->low_delay = s1->low_delay;
151  s->droppable = s1->droppable;
152 
153  // DivX handling (doesn't work)
154  s->divx_packed = s1->divx_packed;
155 
156  if (s1->bitstream_buffer) {
157  if (s1->bitstream_buffer_size +
158  AV_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size) {
159  av_fast_malloc(&s->bitstream_buffer,
160  &s->allocated_bitstream_buffer_size,
161  s1->allocated_bitstream_buffer_size);
162  if (!s->bitstream_buffer) {
163  s->bitstream_buffer_size = 0;
164  return AVERROR(ENOMEM);
165  }
166  }
167  s->bitstream_buffer_size = s1->bitstream_buffer_size;
168  memcpy(s->bitstream_buffer, s1->bitstream_buffer,
169  s1->bitstream_buffer_size);
170  memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
172  }
173 
174  // linesize-dependent scratch buffer allocation
175  if (!s->sc.edge_emu_buffer)
176  if (s1->linesize) {
177  if (ff_mpeg_framesize_alloc(s->avctx, &s->me,
178  &s->sc, s1->linesize) < 0) {
179  av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
180  "scratch buffers.\n");
181  return AVERROR(ENOMEM);
182  }
183  } else {
184  av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
185  "be allocated due to unknown size.\n");
186  }
187 
188  // MPEG-2/interlacing info
189  memcpy(&s->progressive_sequence, &s1->progressive_sequence,
190  (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
191 
192  return 0;
193 }
194 
196 {
197  int err = 0;
198 
199  if (!s->context_initialized)
200  return AVERROR(EINVAL);
201 
203 
204  if (s->picture)
205  for (int i = 0; i < MAX_PICTURE_COUNT; i++)
206  s->picture[i].needs_realloc = 1;
207 
208  s->last_picture_ptr =
209  s->next_picture_ptr =
210  s->current_picture_ptr = NULL;
211 
212  if ((s->width || s->height) &&
213  (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
214  goto fail;
215 
216  /* set chroma shifts */
217  err = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
218  &s->chroma_x_shift,
219  &s->chroma_y_shift);
220  if (err < 0)
221  goto fail;
222 
223  if ((err = ff_mpv_init_context_frame(s)))
224  goto fail;
225 
226  memset(s->thread_context, 0, sizeof(s->thread_context));
227  s->thread_context[0] = s;
228 
229  if (s->width && s->height) {
231  if (err < 0)
232  goto fail;
233  }
234  s->context_reinit = 0;
235 
236  return 0;
237  fail:
239  s->context_reinit = 1;
240  return err;
241 }
242 
244 {
245  return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, 0, 0,
246  s->chroma_x_shift, s->chroma_y_shift, s->out_format,
247  s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
248  &s->linesize, &s->uvlinesize);
249 }
250 
251 static void gray_frame(AVFrame *frame)
252 {
253  int h_chroma_shift, v_chroma_shift;
254 
255  av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
256 
257  for (int i = 0; i < frame->height; i++)
258  memset(frame->data[0] + frame->linesize[0] * i, 0x80, frame->width);
259  for (int i = 0; i < AV_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
260  memset(frame->data[1] + frame->linesize[1] * i,
261  0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
262  memset(frame->data[2] + frame->linesize[2] * i,
263  0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
264  }
265 }
266 
267 /**
268  * generic function called after decoding
269  * the header and before a frame is decoded.
270  */
272 {
273  Picture *pic;
274  int idx, ret;
275 
276  s->mb_skipped = 0;
277 
279  av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
280  return -1;
281  }
282 
283  /* mark & release old frames */
284  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
285  s->last_picture_ptr != s->next_picture_ptr &&
286  s->last_picture_ptr->f->buf[0]) {
287  ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
288  }
289 
290  /* release non reference/forgotten frames */
291  for (int i = 0; i < MAX_PICTURE_COUNT; i++) {
292  if (!s->picture[i].reference ||
293  (&s->picture[i] != s->last_picture_ptr &&
294  &s->picture[i] != s->next_picture_ptr &&
295  !s->picture[i].needs_realloc)) {
296  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
297  }
298  }
299 
300  ff_mpeg_unref_picture(s->avctx, &s->current_picture);
301  ff_mpeg_unref_picture(s->avctx, &s->last_picture);
302  ff_mpeg_unref_picture(s->avctx, &s->next_picture);
303 
304  if (s->current_picture_ptr && !s->current_picture_ptr->f->buf[0]) {
305  // we already have an unused image
306  // (maybe it was set before reading the header)
307  pic = s->current_picture_ptr;
308  } else {
309  idx = ff_find_unused_picture(s->avctx, s->picture, 0);
310  if (idx < 0) {
311  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
312  return idx;
313  }
314  pic = &s->picture[idx];
315  }
316 
317  pic->reference = 0;
318  if (!s->droppable) {
319  if (s->pict_type != AV_PICTURE_TYPE_B)
320  pic->reference = 3;
321  }
322 
323  pic->f->coded_picture_number = s->coded_picture_number++;
324 
325  if (alloc_picture(s, pic) < 0)
326  return -1;
327 
328  s->current_picture_ptr = pic;
329  // FIXME use only the vars from current_pic
330  s->current_picture_ptr->f->top_field_first = s->top_field_first;
331  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
332  s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
333  if (s->picture_structure != PICT_FRAME)
334  s->current_picture_ptr->f->top_field_first =
335  (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
336  }
337  s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
338  !s->progressive_sequence;
339  s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
340 
341  s->current_picture_ptr->f->pict_type = s->pict_type;
342  s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
343 
344  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
345  s->current_picture_ptr)) < 0)
346  return ret;
347 
348  if (s->pict_type != AV_PICTURE_TYPE_B) {
349  s->last_picture_ptr = s->next_picture_ptr;
350  if (!s->droppable)
351  s->next_picture_ptr = s->current_picture_ptr;
352  }
353  ff_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
354  s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
355  s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
356  s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
357  s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
358  s->pict_type, s->droppable);
359 
360  if ((!s->last_picture_ptr || !s->last_picture_ptr->f->buf[0]) &&
361  (s->pict_type != AV_PICTURE_TYPE_I)) {
362  int h_chroma_shift, v_chroma_shift;
363  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
364  &h_chroma_shift, &v_chroma_shift);
365  if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f->buf[0])
367  "allocating dummy last picture for B frame\n");
368  else if (s->pict_type != AV_PICTURE_TYPE_I)
370  "warning: first frame is no keyframe\n");
371 
372  /* Allocate a dummy frame */
373  idx = ff_find_unused_picture(s->avctx, s->picture, 0);
374  if (idx < 0) {
375  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
376  return idx;
377  }
378  s->last_picture_ptr = &s->picture[idx];
379 
380  s->last_picture_ptr->reference = 3;
381  s->last_picture_ptr->f->key_frame = 0;
382  s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
383 
384  if (alloc_picture(s, s->last_picture_ptr) < 0) {
385  s->last_picture_ptr = NULL;
386  return -1;
387  }
388 
389  if (!avctx->hwaccel) {
390  for (int i = 0; i < avctx->height; i++)
391  memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
392  0x80, avctx->width);
393  if (s->last_picture_ptr->f->data[2]) {
394  for (int i = 0; i < AV_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
395  memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i,
396  0x80, AV_CEIL_RSHIFT(avctx->width, h_chroma_shift));
397  memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i,
398  0x80, AV_CEIL_RSHIFT(avctx->width, h_chroma_shift));
399  }
400  }
401 
402  if (s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263) {
403  for (int i = 0; i < avctx->height; i++)
404  memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0] * i,
405  16, avctx->width);
406  }
407  }
408 
409  ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
410  ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
411  }
412  if ((!s->next_picture_ptr || !s->next_picture_ptr->f->buf[0]) &&
413  s->pict_type == AV_PICTURE_TYPE_B) {
414  /* Allocate a dummy frame */
415  idx = ff_find_unused_picture(s->avctx, s->picture, 0);
416  if (idx < 0) {
417  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
418  return idx;
419  }
420  s->next_picture_ptr = &s->picture[idx];
421 
422  s->next_picture_ptr->reference = 3;
423  s->next_picture_ptr->f->key_frame = 0;
424  s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
425 
426  if (alloc_picture(s, s->next_picture_ptr) < 0) {
427  s->next_picture_ptr = NULL;
428  return -1;
429  }
430  ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
431  ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
432  }
433 
434 #if 0 // BUFREF-FIXME
435  memset(s->last_picture.f->data, 0, sizeof(s->last_picture.f->data));
436  memset(s->next_picture.f->data, 0, sizeof(s->next_picture.f->data));
437 #endif
438  if (s->last_picture_ptr) {
439  if (s->last_picture_ptr->f->buf[0] &&
440  (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
441  s->last_picture_ptr)) < 0)
442  return ret;
443  }
444  if (s->next_picture_ptr) {
445  if (s->next_picture_ptr->f->buf[0] &&
446  (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
447  s->next_picture_ptr)) < 0)
448  return ret;
449  }
450 
451  av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
452  s->last_picture_ptr->f->buf[0]));
453 
454  if (s->picture_structure != PICT_FRAME) {
455  for (int i = 0; i < 4; i++) {
456  if (s->picture_structure == PICT_BOTTOM_FIELD) {
457  s->current_picture.f->data[i] = FF_PTR_ADD(s->current_picture.f->data[i],
458  s->current_picture.f->linesize[i]);
459  }
460  s->current_picture.f->linesize[i] *= 2;
461  s->last_picture.f->linesize[i] *= 2;
462  s->next_picture.f->linesize[i] *= 2;
463  }
464  }
465 
466  /* set dequantizer, we can't do it during init as
467  * it might change for MPEG-4 and we can't do it in the header
468  * decode as init is not called for MPEG-4 there yet */
469  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
470  s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
471  s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
472  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
473  s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
474  s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
475  } else {
476  s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
477  s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
478  }
479 
480  if (s->avctx->debug & FF_DEBUG_NOMC)
481  gray_frame(s->current_picture_ptr->f);
482 
483  return 0;
484 }
485 
486 /* called after a frame has been decoded. */
488 {
489  emms_c();
490 
491  if (s->current_picture.reference)
492  ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
493 }
494 
495 void ff_print_debug_info(const MpegEncContext *s, const Picture *p, AVFrame *pict)
496 {
497  ff_print_debug_info2(s->avctx, pict, s->mbskip_table, p->mb_type,
498  p->qscale_table, p->motion_val,
499  s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
500 }
501 
502 int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const Picture *p, int qp_type)
503 {
504  AVVideoEncParams *par;
505  int mult = (qp_type == FF_MPV_QSCALE_TYPE_MPEG1) ? 2 : 1;
506  unsigned int nb_mb = p->alloc_mb_height * p->alloc_mb_width;
507 
508  if (!(s->avctx->export_side_data & AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS))
509  return 0;
510 
512  if (!par)
513  return AVERROR(ENOMEM);
514 
515  for (unsigned y = 0; y < p->alloc_mb_height; y++)
516  for (unsigned x = 0; x < p->alloc_mb_width; x++) {
517  const unsigned int block_idx = y * p->alloc_mb_width + x;
518  const unsigned int mb_xy = y * p->alloc_mb_stride + x;
519  AVVideoBlockParams *const b = av_video_enc_params_block(par, block_idx);
520 
521  b->src_x = x * 16;
522  b->src_y = y * 16;
523  b->w = 16;
524  b->h = 16;
525 
526  b->delta_qp = p->qscale_table[mb_xy] * mult;
527  }
528 
529  return 0;
530 }
531 
533 {
534  ff_draw_horiz_band(s->avctx, s->current_picture_ptr->f,
535  s->last_picture_ptr ? s->last_picture_ptr->f : NULL,
536  y, h, s->picture_structure,
537  s->first_field, s->low_delay);
538 }
539 
541 {
542  MpegEncContext *const s = avctx->priv_data;
543 
544  if (!s->picture)
545  return;
546 
547  for (int i = 0; i < MAX_PICTURE_COUNT; i++)
548  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
549  s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
550 
551  ff_mpeg_unref_picture(s->avctx, &s->current_picture);
552  ff_mpeg_unref_picture(s->avctx, &s->last_picture);
553  ff_mpeg_unref_picture(s->avctx, &s->next_picture);
554 
555  s->mb_x = s->mb_y = 0;
556 
557 #if FF_API_FLAG_TRUNCATED
558  s->parse_context.state = -1;
559  s->parse_context.frame_start_found = 0;
560  s->parse_context.overread = 0;
561  s->parse_context.overread_index = 0;
562  s->parse_context.index = 0;
563  s->parse_context.last_index = 0;
564 #endif
565  s->bitstream_buffer_size = 0;
566  s->pp_time = 0;
567 }
568 
570 {
571  if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
572  ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
573 }
574 
575 
577  uint8_t *dest, const uint8_t *src,
578  int field_based, int field_select,
579  int src_x, int src_y,
580  int width, int height, ptrdiff_t stride,
581  int h_edge_pos, int v_edge_pos,
582  int w, int h, const h264_chroma_mc_func *pix_op,
583  int motion_x, int motion_y)
584 {
585  const int lowres = s->avctx->lowres;
586  const int op_index = FFMIN(lowres, 3);
587  const int s_mask = (2 << lowres) - 1;
588  int emu = 0;
589  int sx, sy;
590 
591  if (s->quarter_sample) {
592  motion_x /= 2;
593  motion_y /= 2;
594  }
595 
596  sx = motion_x & s_mask;
597  sy = motion_y & s_mask;
598  src_x += motion_x >> lowres + 1;
599  src_y += motion_y >> lowres + 1;
600 
601  src += src_y * stride + src_x;
602 
603  if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
604  (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
605  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src,
606  s->linesize, s->linesize,
607  w + 1, (h + 1) << field_based,
608  src_x, src_y * (1 << field_based),
610  src = s->sc.edge_emu_buffer;
611  emu = 1;
612  }
613 
614  sx = (sx << 2) >> lowres;
615  sy = (sy << 2) >> lowres;
616  if (field_select)
617  src += s->linesize;
618  pix_op[op_index](dest, src, stride, h, sx, sy);
619  return emu;
620 }
621 
622 /* apply one mpeg motion vector to the three components */
624  uint8_t *dest_y,
625  uint8_t *dest_cb,
626  uint8_t *dest_cr,
627  int field_based,
628  int bottom_field,
629  int field_select,
630  uint8_t *const *ref_picture,
631  const h264_chroma_mc_func *pix_op,
632  int motion_x, int motion_y,
633  int h, int mb_y)
634 {
635  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
636  int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
637  ptrdiff_t uvlinesize, linesize;
638  const int lowres = s->avctx->lowres;
639  const int op_index = FFMIN(lowres - 1 + s->chroma_x_shift, 3);
640  const int block_s = 8 >> lowres;
641  const int s_mask = (2 << lowres) - 1;
642  const int h_edge_pos = s->h_edge_pos >> lowres;
643  const int v_edge_pos = s->v_edge_pos >> lowres;
644  linesize = s->current_picture.f->linesize[0] << field_based;
645  uvlinesize = s->current_picture.f->linesize[1] << field_based;
646 
647  // FIXME obviously not perfect but qpel will not work in lowres anyway
648  if (s->quarter_sample) {
649  motion_x /= 2;
650  motion_y /= 2;
651  }
652 
653  if (field_based) {
654  motion_y += (bottom_field - field_select)*((1 << lowres)-1);
655  }
656 
657  sx = motion_x & s_mask;
658  sy = motion_y & s_mask;
659  src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
660  src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
661 
662  if (s->out_format == FMT_H263) {
663  uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
664  uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
665  uvsrc_x = src_x >> 1;
666  uvsrc_y = src_y >> 1;
667  } else if (s->out_format == FMT_H261) {
668  // even chroma mv's are full pel in H261
669  mx = motion_x / 4;
670  my = motion_y / 4;
671  uvsx = (2 * mx) & s_mask;
672  uvsy = (2 * my) & s_mask;
673  uvsrc_x = s->mb_x * block_s + (mx >> lowres);
674  uvsrc_y = mb_y * block_s + (my >> lowres);
675  } else {
676  if (s->chroma_y_shift) {
677  mx = motion_x / 2;
678  my = motion_y / 2;
679  uvsx = mx & s_mask;
680  uvsy = my & s_mask;
681  uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
682  uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
683  } else {
684  if (s->chroma_x_shift) {
685  //Chroma422
686  mx = motion_x / 2;
687  uvsx = mx & s_mask;
688  uvsy = motion_y & s_mask;
689  uvsrc_y = src_y;
690  uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
691  } else {
692  //Chroma444
693  uvsx = motion_x & s_mask;
694  uvsy = motion_y & s_mask;
695  uvsrc_x = src_x;
696  uvsrc_y = src_y;
697  }
698  }
699  }
700 
701  ptr_y = ref_picture[0] + src_y * linesize + src_x;
702  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
703  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
704 
705  if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
706  (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
707  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
708  linesize >> field_based, linesize >> field_based,
709  17, 17 + field_based,
710  src_x, src_y * (1 << field_based), h_edge_pos,
711  v_edge_pos);
712  ptr_y = s->sc.edge_emu_buffer;
713  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
714  uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
715  uint8_t *vbuf =ubuf + 10 * s->uvlinesize;
716  if (s->workaround_bugs & FF_BUG_IEDGE)
717  vbuf -= s->uvlinesize;
718  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
719  uvlinesize >> field_based, uvlinesize >> field_based,
720  9, 9 + field_based,
721  uvsrc_x, uvsrc_y * (1 << field_based),
722  h_edge_pos >> 1, v_edge_pos >> 1);
723  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
724  uvlinesize >> field_based,uvlinesize >> field_based,
725  9, 9 + field_based,
726  uvsrc_x, uvsrc_y * (1 << field_based),
727  h_edge_pos >> 1, v_edge_pos >> 1);
728  ptr_cb = ubuf;
729  ptr_cr = vbuf;
730  }
731  }
732 
733  // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
734  if (bottom_field) {
735  dest_y += s->linesize;
736  dest_cb += s->uvlinesize;
737  dest_cr += s->uvlinesize;
738  }
739 
740  if (field_select) {
741  ptr_y += s->linesize;
742  ptr_cb += s->uvlinesize;
743  ptr_cr += s->uvlinesize;
744  }
745 
746  sx = (sx << 2) >> lowres;
747  sy = (sy << 2) >> lowres;
748  pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
749 
750  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
751  int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
752  uvsx = (uvsx << 2) >> lowres;
753  uvsy = (uvsy << 2) >> lowres;
754  if (hc) {
755  pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
756  pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
757  }
758  }
759  // FIXME h261 lowres loop filter
760 }
761 
763  uint8_t *dest_cb, uint8_t *dest_cr,
764  uint8_t *const *ref_picture,
765  const h264_chroma_mc_func * pix_op,
766  int mx, int my)
767 {
768  const int lowres = s->avctx->lowres;
769  const int op_index = FFMIN(lowres, 3);
770  const int block_s = 8 >> lowres;
771  const int s_mask = (2 << lowres) - 1;
772  const int h_edge_pos = s->h_edge_pos >> lowres + 1;
773  const int v_edge_pos = s->v_edge_pos >> lowres + 1;
774  int emu = 0, src_x, src_y, sx, sy;
775  ptrdiff_t offset;
776  const uint8_t *ptr;
777 
778  if (s->quarter_sample) {
779  mx /= 2;
780  my /= 2;
781  }
782 
783  /* In case of 8X8, we construct a single chroma motion vector
784  with a special rounding */
785  mx = ff_h263_round_chroma(mx);
786  my = ff_h263_round_chroma(my);
787 
788  sx = mx & s_mask;
789  sy = my & s_mask;
790  src_x = s->mb_x * block_s + (mx >> lowres + 1);
791  src_y = s->mb_y * block_s + (my >> lowres + 1);
792 
793  offset = src_y * s->uvlinesize + src_x;
794  ptr = ref_picture[1] + offset;
795  if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
796  (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
797  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
798  s->uvlinesize, s->uvlinesize,
799  9, 9,
800  src_x, src_y, h_edge_pos, v_edge_pos);
801  ptr = s->sc.edge_emu_buffer;
802  emu = 1;
803  }
804  sx = (sx << 2) >> lowres;
805  sy = (sy << 2) >> lowres;
806  pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
807 
808  ptr = ref_picture[2] + offset;
809  if (emu) {
810  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
811  s->uvlinesize, s->uvlinesize,
812  9, 9,
813  src_x, src_y, h_edge_pos, v_edge_pos);
814  ptr = s->sc.edge_emu_buffer;
815  }
816  pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
817 }
818 
819 /**
820  * motion compensation of a single macroblock
821  * @param s context
822  * @param dest_y luma destination pointer
823  * @param dest_cb chroma cb/u destination pointer
824  * @param dest_cr chroma cr/v destination pointer
825  * @param dir direction (0->forward, 1->backward)
826  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
827  * @param pix_op halfpel motion compensation function (average or put normally)
828  * the motion vectors are taken from s->mv and the MV type from s->mv_type
829  */
830 static inline void MPV_motion_lowres(MpegEncContext *s,
831  uint8_t *dest_y, uint8_t *dest_cb,
832  uint8_t *dest_cr,
833  int dir, uint8_t *const *ref_picture,
834  const h264_chroma_mc_func *pix_op)
835 {
836  int mx, my;
837  int mb_x, mb_y;
838  const int lowres = s->avctx->lowres;
839  const int block_s = 8 >>lowres;
840 
841  mb_x = s->mb_x;
842  mb_y = s->mb_y;
843 
844  switch (s->mv_type) {
845  case MV_TYPE_16X16:
846  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
847  0, 0, 0,
848  ref_picture, pix_op,
849  s->mv[dir][0][0], s->mv[dir][0][1],
850  2 * block_s, mb_y);
851  break;
852  case MV_TYPE_8X8:
853  mx = 0;
854  my = 0;
855  for (int i = 0; i < 4; i++) {
856  hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
857  s->linesize) * block_s,
858  ref_picture[0], 0, 0,
859  (2 * mb_x + (i & 1)) * block_s,
860  (2 * mb_y + (i >> 1)) * block_s,
861  s->width, s->height, s->linesize,
862  s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
863  block_s, block_s, pix_op,
864  s->mv[dir][i][0], s->mv[dir][i][1]);
865 
866  mx += s->mv[dir][i][0];
867  my += s->mv[dir][i][1];
868  }
869 
870  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
871  chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
872  pix_op, mx, my);
873  break;
874  case MV_TYPE_FIELD:
875  if (s->picture_structure == PICT_FRAME) {
876  /* top field */
877  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
878  1, 0, s->field_select[dir][0],
879  ref_picture, pix_op,
880  s->mv[dir][0][0], s->mv[dir][0][1],
881  block_s, mb_y);
882  /* bottom field */
883  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
884  1, 1, s->field_select[dir][1],
885  ref_picture, pix_op,
886  s->mv[dir][1][0], s->mv[dir][1][1],
887  block_s, mb_y);
888  } else {
889  if (s->picture_structure != s->field_select[dir][0] + 1 &&
890  s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
891  ref_picture = s->current_picture_ptr->f->data;
892 
893  }
894  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
895  0, 0, s->field_select[dir][0],
896  ref_picture, pix_op,
897  s->mv[dir][0][0],
898  s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
899  }
900  break;
901  case MV_TYPE_16X8:
902  for (int i = 0; i < 2; i++) {
903  uint8_t *const *ref2picture;
904 
905  if (s->picture_structure == s->field_select[dir][i] + 1 ||
906  s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
907  ref2picture = ref_picture;
908  } else {
909  ref2picture = s->current_picture_ptr->f->data;
910  }
911 
912  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
913  0, 0, s->field_select[dir][i],
914  ref2picture, pix_op,
915  s->mv[dir][i][0], s->mv[dir][i][1] +
916  2 * block_s * i, block_s, mb_y >> 1);
917 
918  dest_y += 2 * block_s * s->linesize;
919  dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
920  dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
921  }
922  break;
923  case MV_TYPE_DMV:
924  if (s->picture_structure == PICT_FRAME) {
925  for (int i = 0; i < 2; i++) {
926  for (int j = 0; j < 2; j++) {
927  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
928  1, j, j ^ i,
929  ref_picture, pix_op,
930  s->mv[dir][2 * i + j][0],
931  s->mv[dir][2 * i + j][1],
932  block_s, mb_y);
933  }
934  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
935  }
936  } else {
937  for (int i = 0; i < 2; i++) {
938  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
939  0, 0, s->picture_structure != i + 1,
940  ref_picture, pix_op,
941  s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
942  2 * block_s, mb_y >> 1);
943 
944  // after put we make avg of the same block
945  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
946 
947  // opposite parity is always in the same
948  // frame if this is second field
949  if (!s->first_field) {
950  ref_picture = s->current_picture_ptr->f->data;
951  }
952  }
953  }
954  break;
955  default:
956  av_assert2(0);
957  }
958 }
959 
960 /**
961  * find the lowest MB row referenced in the MVs
962  */
964 {
965  int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
966  int off, mvs;
967 
968  if (s->picture_structure != PICT_FRAME || s->mcsel)
969  goto unhandled;
970 
971  switch (s->mv_type) {
972  case MV_TYPE_16X16:
973  mvs = 1;
974  break;
975  case MV_TYPE_16X8:
976  mvs = 2;
977  break;
978  case MV_TYPE_8X8:
979  mvs = 4;
980  break;
981  default:
982  goto unhandled;
983  }
984 
985  for (int i = 0; i < mvs; i++) {
986  int my = s->mv[dir][i][1];
987  my_max = FFMAX(my_max, my);
988  my_min = FFMIN(my_min, my);
989  }
990 
991  off = ((FFMAX(-my_min, my_max) << qpel_shift) + 63) >> 6;
992 
993  return av_clip(s->mb_y + off, 0, s->mb_height - 1);
994 unhandled:
995  return s->mb_height - 1;
996 }
997 
998 /* add block[] to dest[] */
999 static inline void add_dct(MpegEncContext *s,
1000  int16_t *block, int i, uint8_t *dest, int line_size)
1001 {
1002  if (s->block_last_index[i] >= 0) {
1003  s->idsp.idct_add(dest, line_size, block);
1004  }
1005 }
1006 
1007 #define IS_ENCODER 0
1009 
1011 {
1012  if (s->avctx->debug & FF_DEBUG_DCT_COEFF) {
1013  /* print DCT coefficients */
1014  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1015  for (int i = 0; i < 6; i++) {
1016  for (int j = 0; j < 64; j++) {
1017  av_log(s->avctx, AV_LOG_DEBUG, "%5d",
1018  block[i][s->idsp.idct_permutation[j]]);
1019  }
1020  av_log(s->avctx, AV_LOG_DEBUG, "\n");
1021  }
1022  }
1023 
1024  if (!s->avctx->lowres) {
1025 #if !CONFIG_SMALL
1026  if (s->out_format == FMT_MPEG1)
1028  else
1030 #else
1032 #endif
1033  } else
1035 }
ff_h263_round_chroma
static int ff_h263_round_chroma(int x)
Definition: motion_est.h:98
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:38
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:682
ff_draw_horiz_band
void ff_draw_horiz_band(AVCodecContext *avctx, const AVFrame *cur, const AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
Definition: mpegutils.c:51
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1369
h264_chroma_mc_func
void(* h264_chroma_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: h264chroma.h:25
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:265
av_clip
#define av_clip
Definition: common.h:95
ff_mpeg_framesize_alloc
int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me, ScratchpadContext *sc, int linesize)
Definition: mpegpicture.c:87
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegutils.h:117
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVCodecContext::workaround_bugs
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
Definition: avcodec.h:1280
ff_mpv_export_qp_table
int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const Picture *p, int qp_type)
Definition: mpegvideo_dec.c:502
ff_mpv_init_context_frame
int ff_mpv_init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
Definition: mpegvideo.c:539
ff_mpv_common_defaults
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:521
MAY_BE_MPEG12
#define MAY_BE_MPEG12
Definition: mpv_reconstruct_mb_template.c:24
mpeg4videodec.h
MV_TYPE_16X8
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:267
ff_thread_can_start_frame
int ff_thread_can_start_frame(AVCodecContext *avctx)
Definition: pthread_frame.c:983
NOT_MPEG12
#define NOT_MPEG12
Definition: mpv_reconstruct_mb_template.c:23
AVFrame::coded_picture_number
int coded_picture_number
picture number in bitstream order
Definition: frame.h:457
mpv_reconstruct_mb_template.c
Picture::alloc_mb_width
int alloc_mb_width
mb_width used to allocate tables
Definition: mpegpicture.h:65
MpegEncContext::current_picture
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:173
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
ff_mpv_report_decode_progress
void ff_mpv_report_decode_progress(MpegEncContext *s)
Definition: mpegvideo_dec.c:569
w
uint8_t w
Definition: llviddspenc.c:38
internal.h
last_picture
enum AVPictureType last_picture
Definition: movenc.c:69
b
#define b
Definition: input.c:41
MpegEncContext::next_picture
Picture next_picture
copy of the next picture structure.
Definition: mpegvideo.h:161
ff_toupper4
unsigned int ff_toupper4(unsigned int x)
Definition: to_upper4.h:29
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:37
MpegEncContext::dest
uint8_t * dest[3]
Definition: mpegvideo.h:294
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:88
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Picture
Picture.
Definition: mpegpicture.h:46
mpegutils.h
MV_TYPE_DMV
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:269
AV_VIDEO_ENC_PARAMS_MPEG2
@ AV_VIDEO_ENC_PARAMS_MPEG2
Definition: video_enc_params.h:65
DEFINITELY_MPEG12
#define DEFINITELY_MPEG12
Definition: mpv_reconstruct_mb_template.c:25
gray_frame
static void gray_frame(AVFrame *frame)
Definition: mpegvideo_dec.c:251
ff_mpv_reconstruct_mb
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo_dec.c:1010
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:407
fail
#define fail()
Definition: checkasm.h:134
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:127
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2916
hpel_motion_lowres
static int hpel_motion_lowres(MpegEncContext *s, uint8_t *dest, const uint8_t *src, int field_based, int field_select, int src_x, int src_y, int width, int height, ptrdiff_t stride, int h_edge_pos, int v_edge_pos, int w, int h, const h264_chroma_mc_func *pix_op, int motion_x, int motion_y)
Definition: mpegvideo_dec.c:576
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:586
ff_print_debug_info
void ff_print_debug_info(const MpegEncContext *s, const Picture *p, AVFrame *pict)
Definition: mpegvideo_dec.c:495
AVVideoEncParams
Video encoding parameters for a given frame.
Definition: video_enc_params.h:73
MAX_PICTURE_COUNT
#define MAX_PICTURE_COUNT
Definition: mpegpicture.h:33
mult
static int16_t mult(Float11 *f1, Float11 *f2)
Definition: g726.c:60
avassert.h
mpegvideodec.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:632
ff_mpv_common_frame_size_change
int ff_mpv_common_frame_size_change(MpegEncContext *s)
Definition: mpegvideo_dec.c:195
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:256
ff_mpeg_unref_picture
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
Deallocate a picture; frees the picture tables in case they need to be reallocated anyway.
Definition: mpegpicture.c:312
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:50
MpegEncContext::h_edge_pos
int h_edge_pos
Definition: mpegvideo.h:125
s1
#define s1
Definition: regdef.h:38
REBASE_PICTURE
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
FMT_H261
@ FMT_H261
Definition: mpegutils.h:118
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
UPDATE_PICTURE
#define UPDATE_PICTURE(pic)
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
PICT_TOP_FIELD
#define PICT_TOP_FIELD
Definition: mpegutils.h:36
limits.h
if
if(ret)
Definition: filter_design.txt:179
Picture::reference
int reference
Definition: mpegpicture.h:77
ff_find_unused_picture
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
Definition: mpegpicture.c:449
threadframe.h
MpegEncContext::field_select
int field_select[2][2]
Definition: mpegvideo.h:276
NULL
#define NULL
Definition: coverity.c:32
mpv_reconstruct_mb_internal
static av_always_inline void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
Definition: mpv_reconstruct_mb_template.c:56
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:287
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:342
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
FF_PTR_ADD
#define FF_PTR_ADD(ptr, off)
Definition: internal.h:99
FF_BUG_IEDGE
#define FF_BUG_IEDGE
Definition: avcodec.h:1295
lowres
static int lowres
Definition: ffplay.c:335
ff_alloc_picture
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int shared, int encoding, int chroma_x_shift, int chroma_y_shift, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
Definition: mpegpicture.c:254
FF_MPV_QSCALE_TYPE_MPEG1
#define FF_MPV_QSCALE_TYPE_MPEG1
Definition: mpegvideodec.h:40
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
MpegEncContext::private_ctx
void * private_ctx
Definition: mpegvideo.h:91
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:266
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
add_dct
static void add_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size)
Definition: mpegvideo_dec.c:999
av_video_enc_params_create_side_data
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
Definition: video_enc_params.c:58
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1333
f
f
Definition: af_crystalizer.c:122
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:259
ff_mpeg_ref_picture
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
Definition: mpegpicture.c:369
MpegEncContext::v_edge_pos
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:125
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:56
h264chroma.h
ff_mpeg_draw_horiz_band
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo_dec.c:532
Picture::alloc_mb_height
int alloc_mb_height
mb_height used to allocate tables
Definition: mpegpicture.h:66
ff_mpv_frame_start
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo_dec.c:271
ff_mpeg_flush
void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo_dec.c:540
height
#define height
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:268
Picture::motion_val
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:54
lowest_referenced_row
static int lowest_referenced_row(MpegEncContext *s, int dir)
find the lowest MB row referenced in the MVs
Definition: mpegvideo_dec.c:963
AVCodec::id
enum AVCodecID id
Definition: codec.h:218
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
MpegEncContext::uvlinesize
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:128
FMT_H263
@ FMT_H263
Definition: mpegutils.h:119
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
ff_h264chroma_init
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:41
internal.h
ff_mpv_decode_init
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
Definition: mpegvideo_dec.c:42
mpeg_motion_lowres
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op, int motion_x, int motion_y, int h, int mb_y)
Definition: mpegvideo_dec.c:623
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:287
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AVVideoBlockParams
Data structure for storing block-level encoding information.
Definition: video_enc_params.h:120
AVCodecContext::height
int height
Definition: avcodec.h:571
MPV_motion_lowres
static void MPV_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op)
motion compensation of a single macroblock
Definition: mpegvideo_dec.c:830
FF_DEBUG_NOMC
#define FF_DEBUG_NOMC
Definition: avcodec.h:1342
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
chroma_4mv_motion_lowres
static void chroma_4mv_motion_lowres(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op, int mx, int my)
Definition: mpegvideo_dec.c:762
ret
ret
Definition: filter_design.txt:187
wmv2dec.h
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
Picture::qscale_table
int8_t * qscale_table
Definition: mpegpicture.h:51
alloc_picture
static int alloc_picture(MpegEncContext *s, Picture *pic)
Definition: mpegvideo_dec.c:243
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
Definition: avcodec.h:366
ff_mpv_free_context_frame
void ff_mpv_free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution as well as the slice thread contex...
Definition: mpegvideo.c:764
ff_mpeg_update_thread_context
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo_dec.c:58
AVCodecContext
main external API structure.
Definition: avcodec.h:398
Picture::mb_type
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegpicture.h:57
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
Picture::f
struct AVFrame * f
Definition: mpegpicture.h:47
ff_mpv_frame_end
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo_dec.c:487
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:586
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
ff_mpv_init_duplicate_contexts
int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
Initialize an MpegEncContext's thread contexts.
Definition: mpegvideo.c:410
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:423
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:425
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:565
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:571
imgutils.h
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
av_video_enc_params_block
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
Definition: video_enc_params.h:143
h
h
Definition: vp9dsp_template.c:2038
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:73
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:70
Picture::alloc_mb_stride
int alloc_mb_stride
mb_stride used to allocate tables
Definition: mpegpicture.h:67
video_enc_params.h
ff_print_debug_info2
void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, const uint8_t *mbskip_table, const uint32_t *mbtype_table, const int8_t *qscale_table, int16_t(*const motion_val[2])[2], int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.
Definition: mpegutils.c:157