FFmpeg
mpegvideo_dec.c
Go to the documentation of this file.
1 /*
2  * Common mpeg video decoding code
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include <limits.h>
24 
25 #include "config_components.h"
26 
27 #include "libavutil/avassert.h"
28 #include "libavutil/emms.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/internal.h"
32 
33 #include "avcodec.h"
34 #include "h264chroma.h"
35 #include "internal.h"
36 #include "mpegutils.h"
37 #include "mpegvideo.h"
38 #include "mpegvideodec.h"
39 #include "mpeg4videodec.h"
40 #include "thread.h"
41 #include "threadframe.h"
42 #include "wmv2dec.h"
43 
45 {
47 
48  s->avctx = avctx;
49  s->width = avctx->coded_width;
50  s->height = avctx->coded_height;
51  s->codec_id = avctx->codec->id;
52  s->workaround_bugs = avctx->workaround_bugs;
53 
54  /* convert fourcc to upper case */
55  s->codec_tag = ff_toupper4(avctx->codec_tag);
56 
57  ff_h264chroma_init(&s->h264chroma, 8); //for lowres
58 }
59 
61  const AVCodecContext *src)
62 {
63  MpegEncContext *const s1 = src->priv_data;
64  MpegEncContext *const s = dst->priv_data;
65  int ret;
66 
67  if (dst == src)
68  return 0;
69 
70  av_assert0(s != s1);
71 
72  // FIXME can parameters change on I-frames?
73  // in that case dst may need a reinit
74  if (!s->context_initialized) {
75  void *private_ctx = s->private_ctx;
76  int err;
77  memcpy(s, s1, sizeof(*s));
78 
79  s->avctx = dst;
80  s->private_ctx = private_ctx;
81  s->bitstream_buffer = NULL;
82  s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
83 
84  if (s1->context_initialized) {
86  if ((err = ff_mpv_common_init(s)) < 0) {
87  memset(s, 0, sizeof(*s));
88  s->avctx = dst;
89  s->private_ctx = private_ctx;
90  memcpy(&s->h264chroma, &s1->h264chroma, sizeof(s->h264chroma));
91  return err;
92  }
93  }
94  }
95 
96  if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
97  s->height = s1->height;
98  s->width = s1->width;
100  return ret;
101  }
102 
103  s->quarter_sample = s1->quarter_sample;
104 
105  s->coded_picture_number = s1->coded_picture_number;
106  s->picture_number = s1->picture_number;
107 
108  av_assert0(!s->picture || s->picture != s1->picture);
109  if (s->picture)
110  for (int i = 0; i < MAX_PICTURE_COUNT; i++) {
111  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
112  if (s1->picture && s1->picture[i].f->buf[0] &&
113  (ret = ff_mpeg_ref_picture(s->avctx, &s->picture[i], &s1->picture[i])) < 0)
114  return ret;
115  }
116 
117 #define UPDATE_PICTURE(pic)\
118 do {\
119  ff_mpeg_unref_picture(s->avctx, &s->pic);\
120  if (s1->pic.f && s1->pic.f->buf[0])\
121  ret = ff_mpeg_ref_picture(s->avctx, &s->pic, &s1->pic);\
122  else\
123  ret = ff_update_picture_tables(&s->pic, &s1->pic);\
124  if (ret < 0)\
125  return ret;\
126 } while (0)
127 
131 
132 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
133  ((pic && pic >= old_ctx->picture && \
134  pic < old_ctx->picture + MAX_PICTURE_COUNT) ? \
135  &new_ctx->picture[pic - old_ctx->picture] : NULL)
136 
137  s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
138  s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
139  s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
140 
141  // Error/bug resilience
142  s->workaround_bugs = s1->workaround_bugs;
143  s->padding_bug_score = s1->padding_bug_score;
144 
145  // MPEG-4 timing info
146  memcpy(&s->last_time_base, &s1->last_time_base,
147  (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
148  (char *) &s1->last_time_base);
149 
150  // B-frame info
151  s->max_b_frames = s1->max_b_frames;
152  s->low_delay = s1->low_delay;
153  s->droppable = s1->droppable;
154 
155  // DivX handling (doesn't work)
156  s->divx_packed = s1->divx_packed;
157 
158  if (s1->bitstream_buffer) {
159  av_fast_padded_malloc(&s->bitstream_buffer,
160  &s->allocated_bitstream_buffer_size,
161  s1->bitstream_buffer_size);
162  if (!s->bitstream_buffer) {
163  s->bitstream_buffer_size = 0;
164  return AVERROR(ENOMEM);
165  }
166  s->bitstream_buffer_size = s1->bitstream_buffer_size;
167  memcpy(s->bitstream_buffer, s1->bitstream_buffer,
168  s1->bitstream_buffer_size);
169  }
170 
171  // linesize-dependent scratch buffer allocation
172  if (!s->sc.edge_emu_buffer)
173  if (s1->linesize) {
174  if (ff_mpeg_framesize_alloc(s->avctx, &s->me,
175  &s->sc, s1->linesize) < 0) {
176  av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
177  "scratch buffers.\n");
178  return AVERROR(ENOMEM);
179  }
180  } else {
181  av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
182  "be allocated due to unknown size.\n");
183  }
184 
185  // MPEG-2/interlacing info
186  memcpy(&s->progressive_sequence, &s1->progressive_sequence,
187  (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
188 
189  return 0;
190 }
191 
193 {
194  int err = 0;
195 
196  if (!s->context_initialized)
197  return AVERROR(EINVAL);
198 
200 
201  if (s->picture)
202  for (int i = 0; i < MAX_PICTURE_COUNT; i++)
203  s->picture[i].needs_realloc = 1;
204 
205  s->last_picture_ptr =
206  s->next_picture_ptr =
207  s->current_picture_ptr = NULL;
208 
209  if ((s->width || s->height) &&
210  (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
211  goto fail;
212 
213  /* set chroma shifts */
214  err = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
215  &s->chroma_x_shift,
216  &s->chroma_y_shift);
217  if (err < 0)
218  goto fail;
219 
220  if ((err = ff_mpv_init_context_frame(s)))
221  goto fail;
222 
223  memset(s->thread_context, 0, sizeof(s->thread_context));
224  s->thread_context[0] = s;
225 
226  if (s->width && s->height) {
228  if (err < 0)
229  goto fail;
230  }
231  s->context_reinit = 0;
232 
233  return 0;
234  fail:
236  s->context_reinit = 1;
237  return err;
238 }
239 
241 {
242  return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, 0, 0,
243  s->chroma_x_shift, s->chroma_y_shift, s->out_format,
244  s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
245  &s->linesize, &s->uvlinesize);
246 }
247 
248 static void gray_frame(AVFrame *frame)
249 {
250  int h_chroma_shift, v_chroma_shift;
251 
252  av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
253 
254  for (int i = 0; i < frame->height; i++)
255  memset(frame->data[0] + frame->linesize[0] * i, 0x80, frame->width);
256  for (int i = 0; i < AV_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
257  memset(frame->data[1] + frame->linesize[1] * i,
258  0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
259  memset(frame->data[2] + frame->linesize[2] * i,
260  0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
261  }
262 }
263 
264 /**
265  * generic function called after decoding
266  * the header and before a frame is decoded.
267  */
269 {
270  Picture *pic;
271  int idx, ret;
272 
273  s->mb_skipped = 0;
274 
276  av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
277  return -1;
278  }
279 
280  /* mark & release old frames */
281  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
282  s->last_picture_ptr != s->next_picture_ptr &&
283  s->last_picture_ptr->f->buf[0]) {
284  ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
285  }
286 
287  /* release non reference/forgotten frames */
288  for (int i = 0; i < MAX_PICTURE_COUNT; i++) {
289  if (!s->picture[i].reference ||
290  (&s->picture[i] != s->last_picture_ptr &&
291  &s->picture[i] != s->next_picture_ptr &&
292  !s->picture[i].needs_realloc)) {
293  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
294  }
295  }
296 
297  ff_mpeg_unref_picture(s->avctx, &s->current_picture);
298  ff_mpeg_unref_picture(s->avctx, &s->last_picture);
299  ff_mpeg_unref_picture(s->avctx, &s->next_picture);
300 
301  if (s->current_picture_ptr && !s->current_picture_ptr->f->buf[0]) {
302  // we already have an unused image
303  // (maybe it was set before reading the header)
304  pic = s->current_picture_ptr;
305  } else {
306  idx = ff_find_unused_picture(s->avctx, s->picture, 0);
307  if (idx < 0) {
308  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
309  return idx;
310  }
311  pic = &s->picture[idx];
312  }
313 
314  pic->reference = 0;
315  if (!s->droppable) {
316  if (s->pict_type != AV_PICTURE_TYPE_B)
317  pic->reference = 3;
318  }
319 
320 #if FF_API_FRAME_PICTURE_NUMBER
322  pic->f->coded_picture_number = s->coded_picture_number++;
324 #endif
325 
326  if (alloc_picture(s, pic) < 0)
327  return -1;
328 
329  s->current_picture_ptr = pic;
330  // FIXME use only the vars from current_pic
331  s->current_picture_ptr->f->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST * !!s->top_field_first;
332  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
333  s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
334  if (s->picture_structure != PICT_FRAME)
335  s->current_picture_ptr->f->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST *
336  ((s->picture_structure == PICT_TOP_FIELD) == s->first_field);
337  }
338  s->current_picture_ptr->f->flags |= AV_FRAME_FLAG_INTERLACED * (!s->progressive_frame &&
339  !s->progressive_sequence);
340  s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
341 
342  s->current_picture_ptr->f->pict_type = s->pict_type;
343  if (s->pict_type == AV_PICTURE_TYPE_I)
344  s->current_picture_ptr->f->flags |= AV_FRAME_FLAG_KEY;
345  else
346  s->current_picture_ptr->f->flags &= ~AV_FRAME_FLAG_KEY;
347 
348  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
349  s->current_picture_ptr)) < 0)
350  return ret;
351 
352  if (s->pict_type != AV_PICTURE_TYPE_B) {
353  s->last_picture_ptr = s->next_picture_ptr;
354  if (!s->droppable)
355  s->next_picture_ptr = s->current_picture_ptr;
356  }
357  ff_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
358  s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
359  s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
360  s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
361  s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
362  s->pict_type, s->droppable);
363 
364  if ((!s->last_picture_ptr || !s->last_picture_ptr->f->buf[0]) &&
365  (s->pict_type != AV_PICTURE_TYPE_I)) {
366  int h_chroma_shift, v_chroma_shift;
367  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
368  &h_chroma_shift, &v_chroma_shift);
369  if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f->buf[0])
371  "allocating dummy last picture for B frame\n");
372  else if (s->pict_type != AV_PICTURE_TYPE_I)
374  "warning: first frame is no keyframe\n");
375 
376  /* Allocate a dummy frame */
377  idx = ff_find_unused_picture(s->avctx, s->picture, 0);
378  if (idx < 0) {
379  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
380  return idx;
381  }
382  s->last_picture_ptr = &s->picture[idx];
383 
384  s->last_picture_ptr->reference = 3;
385  s->last_picture_ptr->f->flags &= ~AV_FRAME_FLAG_KEY;
386  s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
387 
388  if (alloc_picture(s, s->last_picture_ptr) < 0) {
389  s->last_picture_ptr = NULL;
390  return -1;
391  }
392 
393  if (!avctx->hwaccel) {
394  for (int i = 0; i < avctx->height; i++)
395  memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
396  0x80, avctx->width);
397  if (s->last_picture_ptr->f->data[2]) {
398  for (int i = 0; i < AV_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
399  memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i,
400  0x80, AV_CEIL_RSHIFT(avctx->width, h_chroma_shift));
401  memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i,
402  0x80, AV_CEIL_RSHIFT(avctx->width, h_chroma_shift));
403  }
404  }
405 
406  if (s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263) {
407  for (int i = 0; i < avctx->height; i++)
408  memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0] * i,
409  16, avctx->width);
410  }
411  }
412 
413  ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
414  ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
415  }
416  if ((!s->next_picture_ptr || !s->next_picture_ptr->f->buf[0]) &&
417  s->pict_type == AV_PICTURE_TYPE_B) {
418  /* Allocate a dummy frame */
419  idx = ff_find_unused_picture(s->avctx, s->picture, 0);
420  if (idx < 0) {
421  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
422  return idx;
423  }
424  s->next_picture_ptr = &s->picture[idx];
425 
426  s->next_picture_ptr->reference = 3;
427  s->next_picture_ptr->f->flags &= ~AV_FRAME_FLAG_KEY;
428  s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
429 
430  if (alloc_picture(s, s->next_picture_ptr) < 0) {
431  s->next_picture_ptr = NULL;
432  return -1;
433  }
434  ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
435  ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
436  }
437 
438 #if 0 // BUFREF-FIXME
439  memset(s->last_picture.f->data, 0, sizeof(s->last_picture.f->data));
440  memset(s->next_picture.f->data, 0, sizeof(s->next_picture.f->data));
441 #endif
442  if (s->last_picture_ptr) {
443  if (s->last_picture_ptr->f->buf[0] &&
444  (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
445  s->last_picture_ptr)) < 0)
446  return ret;
447  }
448  if (s->next_picture_ptr) {
449  if (s->next_picture_ptr->f->buf[0] &&
450  (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
451  s->next_picture_ptr)) < 0)
452  return ret;
453  }
454 
455  av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
456  s->last_picture_ptr->f->buf[0]));
457 
458  if (s->picture_structure != PICT_FRAME) {
459  for (int i = 0; i < 4; i++) {
460  if (s->picture_structure == PICT_BOTTOM_FIELD) {
461  s->current_picture.f->data[i] = FF_PTR_ADD(s->current_picture.f->data[i],
462  s->current_picture.f->linesize[i]);
463  }
464  s->current_picture.f->linesize[i] *= 2;
465  s->last_picture.f->linesize[i] *= 2;
466  s->next_picture.f->linesize[i] *= 2;
467  }
468  }
469 
470  /* set dequantizer, we can't do it during init as
471  * it might change for MPEG-4 and we can't do it in the header
472  * decode as init is not called for MPEG-4 there yet */
473  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
474  s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
475  s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
476  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
477  s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
478  s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
479  } else {
480  s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
481  s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
482  }
483 
484  if (s->avctx->debug & FF_DEBUG_NOMC)
485  gray_frame(s->current_picture_ptr->f);
486 
487  return 0;
488 }
489 
490 /* called after a frame has been decoded. */
492 {
493  emms_c();
494 
495  if (s->current_picture.reference)
496  ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
497 }
498 
499 void ff_print_debug_info(const MpegEncContext *s, const Picture *p, AVFrame *pict)
500 {
501  ff_print_debug_info2(s->avctx, pict, s->mbskip_table, p->mb_type,
502  p->qscale_table, p->motion_val,
503  s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
504 }
505 
506 int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const Picture *p, int qp_type)
507 {
508  AVVideoEncParams *par;
509  int mult = (qp_type == FF_MPV_QSCALE_TYPE_MPEG1) ? 2 : 1;
510  unsigned int nb_mb = p->alloc_mb_height * p->alloc_mb_width;
511 
512  if (!(s->avctx->export_side_data & AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS))
513  return 0;
514 
516  if (!par)
517  return AVERROR(ENOMEM);
518 
519  for (unsigned y = 0; y < p->alloc_mb_height; y++)
520  for (unsigned x = 0; x < p->alloc_mb_width; x++) {
521  const unsigned int block_idx = y * p->alloc_mb_width + x;
522  const unsigned int mb_xy = y * p->alloc_mb_stride + x;
523  AVVideoBlockParams *const b = av_video_enc_params_block(par, block_idx);
524 
525  b->src_x = x * 16;
526  b->src_y = y * 16;
527  b->w = 16;
528  b->h = 16;
529 
530  b->delta_qp = p->qscale_table[mb_xy] * mult;
531  }
532 
533  return 0;
534 }
535 
537 {
538  ff_draw_horiz_band(s->avctx, s->current_picture_ptr->f,
539  s->last_picture_ptr ? s->last_picture_ptr->f : NULL,
540  y, h, s->picture_structure,
541  s->first_field, s->low_delay);
542 }
543 
545 {
546  MpegEncContext *const s = avctx->priv_data;
547 
548  if (!s->picture)
549  return;
550 
551  for (int i = 0; i < MAX_PICTURE_COUNT; i++)
552  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
553  s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
554 
555  ff_mpeg_unref_picture(s->avctx, &s->current_picture);
556  ff_mpeg_unref_picture(s->avctx, &s->last_picture);
557  ff_mpeg_unref_picture(s->avctx, &s->next_picture);
558 
559  s->mb_x = s->mb_y = 0;
560 
561  s->bitstream_buffer_size = 0;
562  s->pp_time = 0;
563 }
564 
566 {
567  if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
568  ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
569 }
570 
571 
573  uint8_t *dest, const uint8_t *src,
574  int field_based, int field_select,
575  int src_x, int src_y,
576  int width, int height, ptrdiff_t stride,
577  int h_edge_pos, int v_edge_pos,
578  int w, int h, const h264_chroma_mc_func *pix_op,
579  int motion_x, int motion_y)
580 {
581  const int lowres = s->avctx->lowres;
582  const int op_index = FFMIN(lowres, 3);
583  const int s_mask = (2 << lowres) - 1;
584  int emu = 0;
585  int sx, sy;
586 
587  if (s->quarter_sample) {
588  motion_x /= 2;
589  motion_y /= 2;
590  }
591 
592  sx = motion_x & s_mask;
593  sy = motion_y & s_mask;
594  src_x += motion_x >> lowres + 1;
595  src_y += motion_y >> lowres + 1;
596 
597  src += src_y * stride + src_x;
598 
599  if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
600  (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
601  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src,
602  s->linesize, s->linesize,
603  w + 1, (h + 1) << field_based,
604  src_x, src_y * (1 << field_based),
606  src = s->sc.edge_emu_buffer;
607  emu = 1;
608  }
609 
610  sx = (sx << 2) >> lowres;
611  sy = (sy << 2) >> lowres;
612  if (field_select)
613  src += s->linesize;
614  pix_op[op_index](dest, src, stride, h, sx, sy);
615  return emu;
616 }
617 
618 /* apply one mpeg motion vector to the three components */
620  uint8_t *dest_y,
621  uint8_t *dest_cb,
622  uint8_t *dest_cr,
623  int field_based,
624  int bottom_field,
625  int field_select,
626  uint8_t *const *ref_picture,
627  const h264_chroma_mc_func *pix_op,
628  int motion_x, int motion_y,
629  int h, int mb_y)
630 {
631  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
632  int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
633  ptrdiff_t uvlinesize, linesize;
634  const int lowres = s->avctx->lowres;
635  const int op_index = FFMIN(lowres - 1 + s->chroma_x_shift, 3);
636  const int block_s = 8 >> lowres;
637  const int s_mask = (2 << lowres) - 1;
638  const int h_edge_pos = s->h_edge_pos >> lowres;
639  const int v_edge_pos = s->v_edge_pos >> lowres;
640  int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
641  linesize = s->current_picture.f->linesize[0] << field_based;
642  uvlinesize = s->current_picture.f->linesize[1] << field_based;
643 
644  // FIXME obviously not perfect but qpel will not work in lowres anyway
645  if (s->quarter_sample) {
646  motion_x /= 2;
647  motion_y /= 2;
648  }
649 
650  if (field_based) {
651  motion_y += (bottom_field - field_select)*((1 << lowres)-1);
652  }
653 
654  sx = motion_x & s_mask;
655  sy = motion_y & s_mask;
656  src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
657  src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
658 
659  if (s->out_format == FMT_H263) {
660  uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
661  uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
662  uvsrc_x = src_x >> 1;
663  uvsrc_y = src_y >> 1;
664  } else if (s->out_format == FMT_H261) {
665  // even chroma mv's are full pel in H261
666  mx = motion_x / 4;
667  my = motion_y / 4;
668  uvsx = (2 * mx) & s_mask;
669  uvsy = (2 * my) & s_mask;
670  uvsrc_x = s->mb_x * block_s + (mx >> lowres);
671  uvsrc_y = mb_y * block_s + (my >> lowres);
672  } else {
673  if (s->chroma_y_shift) {
674  mx = motion_x / 2;
675  my = motion_y / 2;
676  uvsx = mx & s_mask;
677  uvsy = my & s_mask;
678  uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
679  uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
680  } else {
681  if (s->chroma_x_shift) {
682  //Chroma422
683  mx = motion_x / 2;
684  uvsx = mx & s_mask;
685  uvsy = motion_y & s_mask;
686  uvsrc_y = src_y;
687  uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
688  } else {
689  //Chroma444
690  uvsx = motion_x & s_mask;
691  uvsy = motion_y & s_mask;
692  uvsrc_x = src_x;
693  uvsrc_y = src_y;
694  }
695  }
696  }
697 
698  ptr_y = ref_picture[0] + src_y * linesize + src_x;
699  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
700  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
701 
702  if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
703  (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - FFMAX(h, hc<<s->chroma_y_shift), 0)) {
704  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
705  linesize >> field_based, linesize >> field_based,
706  17, 17 + field_based,
707  src_x, src_y * (1 << field_based), h_edge_pos,
708  v_edge_pos);
709  ptr_y = s->sc.edge_emu_buffer;
710  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
711  uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
712  uint8_t *vbuf =ubuf + 10 * s->uvlinesize;
713  if (s->workaround_bugs & FF_BUG_IEDGE)
714  vbuf -= s->uvlinesize;
715  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
716  uvlinesize >> field_based, uvlinesize >> field_based,
717  9, 9 + field_based,
718  uvsrc_x, uvsrc_y * (1 << field_based),
719  h_edge_pos >> 1, v_edge_pos >> 1);
720  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
721  uvlinesize >> field_based,uvlinesize >> field_based,
722  9, 9 + field_based,
723  uvsrc_x, uvsrc_y * (1 << field_based),
724  h_edge_pos >> 1, v_edge_pos >> 1);
725  ptr_cb = ubuf;
726  ptr_cr = vbuf;
727  }
728  }
729 
730  // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
731  if (bottom_field) {
732  dest_y += s->linesize;
733  dest_cb += s->uvlinesize;
734  dest_cr += s->uvlinesize;
735  }
736 
737  if (field_select) {
738  ptr_y += s->linesize;
739  ptr_cb += s->uvlinesize;
740  ptr_cr += s->uvlinesize;
741  }
742 
743  sx = (sx << 2) >> lowres;
744  sy = (sy << 2) >> lowres;
745  pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
746 
747  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
748  uvsx = (uvsx << 2) >> lowres;
749  uvsy = (uvsy << 2) >> lowres;
750  if (hc) {
751  pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
752  pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
753  }
754  }
755  // FIXME h261 lowres loop filter
756 }
757 
759  uint8_t *dest_cb, uint8_t *dest_cr,
760  uint8_t *const *ref_picture,
761  const h264_chroma_mc_func * pix_op,
762  int mx, int my)
763 {
764  const int lowres = s->avctx->lowres;
765  const int op_index = FFMIN(lowres, 3);
766  const int block_s = 8 >> lowres;
767  const int s_mask = (2 << lowres) - 1;
768  const int h_edge_pos = s->h_edge_pos >> lowres + 1;
769  const int v_edge_pos = s->v_edge_pos >> lowres + 1;
770  int emu = 0, src_x, src_y, sx, sy;
771  ptrdiff_t offset;
772  const uint8_t *ptr;
773 
774  if (s->quarter_sample) {
775  mx /= 2;
776  my /= 2;
777  }
778 
779  /* In case of 8X8, we construct a single chroma motion vector
780  with a special rounding */
781  mx = ff_h263_round_chroma(mx);
782  my = ff_h263_round_chroma(my);
783 
784  sx = mx & s_mask;
785  sy = my & s_mask;
786  src_x = s->mb_x * block_s + (mx >> lowres + 1);
787  src_y = s->mb_y * block_s + (my >> lowres + 1);
788 
789  offset = src_y * s->uvlinesize + src_x;
790  ptr = ref_picture[1] + offset;
791  if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
792  (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
793  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
794  s->uvlinesize, s->uvlinesize,
795  9, 9,
796  src_x, src_y, h_edge_pos, v_edge_pos);
797  ptr = s->sc.edge_emu_buffer;
798  emu = 1;
799  }
800  sx = (sx << 2) >> lowres;
801  sy = (sy << 2) >> lowres;
802  pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
803 
804  ptr = ref_picture[2] + offset;
805  if (emu) {
806  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
807  s->uvlinesize, s->uvlinesize,
808  9, 9,
809  src_x, src_y, h_edge_pos, v_edge_pos);
810  ptr = s->sc.edge_emu_buffer;
811  }
812  pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
813 }
814 
815 /**
816  * motion compensation of a single macroblock
817  * @param s context
818  * @param dest_y luma destination pointer
819  * @param dest_cb chroma cb/u destination pointer
820  * @param dest_cr chroma cr/v destination pointer
821  * @param dir direction (0->forward, 1->backward)
822  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
823  * @param pix_op halfpel motion compensation function (average or put normally)
824  * the motion vectors are taken from s->mv and the MV type from s->mv_type
825  */
826 static inline void MPV_motion_lowres(MpegEncContext *s,
827  uint8_t *dest_y, uint8_t *dest_cb,
828  uint8_t *dest_cr,
829  int dir, uint8_t *const *ref_picture,
830  const h264_chroma_mc_func *pix_op)
831 {
832  int mx, my;
833  int mb_x, mb_y;
834  const int lowres = s->avctx->lowres;
835  const int block_s = 8 >>lowres;
836 
837  mb_x = s->mb_x;
838  mb_y = s->mb_y;
839 
840  switch (s->mv_type) {
841  case MV_TYPE_16X16:
842  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
843  0, 0, 0,
844  ref_picture, pix_op,
845  s->mv[dir][0][0], s->mv[dir][0][1],
846  2 * block_s, mb_y);
847  break;
848  case MV_TYPE_8X8:
849  mx = 0;
850  my = 0;
851  for (int i = 0; i < 4; i++) {
852  hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
853  s->linesize) * block_s,
854  ref_picture[0], 0, 0,
855  (2 * mb_x + (i & 1)) * block_s,
856  (2 * mb_y + (i >> 1)) * block_s,
857  s->width, s->height, s->linesize,
858  s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
859  block_s, block_s, pix_op,
860  s->mv[dir][i][0], s->mv[dir][i][1]);
861 
862  mx += s->mv[dir][i][0];
863  my += s->mv[dir][i][1];
864  }
865 
866  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
867  chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
868  pix_op, mx, my);
869  break;
870  case MV_TYPE_FIELD:
871  if (s->picture_structure == PICT_FRAME) {
872  /* top field */
873  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
874  1, 0, s->field_select[dir][0],
875  ref_picture, pix_op,
876  s->mv[dir][0][0], s->mv[dir][0][1],
877  block_s, mb_y);
878  /* bottom field */
879  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
880  1, 1, s->field_select[dir][1],
881  ref_picture, pix_op,
882  s->mv[dir][1][0], s->mv[dir][1][1],
883  block_s, mb_y);
884  } else {
885  if ( s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field
886  || !ref_picture[0]) {
887  ref_picture = s->current_picture_ptr->f->data;
888  }
889  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
890  0, 0, s->field_select[dir][0],
891  ref_picture, pix_op,
892  s->mv[dir][0][0],
893  s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
894  }
895  break;
896  case MV_TYPE_16X8:
897  for (int i = 0; i < 2; i++) {
898  uint8_t *const *ref2picture;
899 
900  if ((s->picture_structure == s->field_select[dir][i] + 1 ||
901  s->pict_type == AV_PICTURE_TYPE_B || s->first_field) &&
902  ref_picture[0]) {
903  ref2picture = ref_picture;
904  } else {
905  ref2picture = s->current_picture_ptr->f->data;
906  }
907 
908  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
909  0, 0, s->field_select[dir][i],
910  ref2picture, pix_op,
911  s->mv[dir][i][0], s->mv[dir][i][1] +
912  2 * block_s * i, block_s, mb_y >> 1);
913 
914  dest_y += 2 * block_s * s->linesize;
915  dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
916  dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
917  }
918  break;
919  case MV_TYPE_DMV:
920  if (s->picture_structure == PICT_FRAME) {
921  for (int i = 0; i < 2; i++) {
922  for (int j = 0; j < 2; j++) {
923  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
924  1, j, j ^ i,
925  ref_picture, pix_op,
926  s->mv[dir][2 * i + j][0],
927  s->mv[dir][2 * i + j][1],
928  block_s, mb_y);
929  }
930  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
931  }
932  } else {
933  if (!ref_picture[0]) {
934  ref_picture = s->current_picture_ptr->f->data;
935  }
936  for (int i = 0; i < 2; i++) {
937  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
938  0, 0, s->picture_structure != i + 1,
939  ref_picture, pix_op,
940  s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
941  2 * block_s, mb_y >> 1);
942 
943  // after put we make avg of the same block
944  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
945 
946  // opposite parity is always in the same
947  // frame if this is second field
948  if (!s->first_field) {
949  ref_picture = s->current_picture_ptr->f->data;
950  }
951  }
952  }
953  break;
954  default:
955  av_assert2(0);
956  }
957 }
958 
959 /**
960  * find the lowest MB row referenced in the MVs
961  */
963 {
964  int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
965  int off, mvs;
966 
967  if (s->picture_structure != PICT_FRAME || s->mcsel)
968  goto unhandled;
969 
970  switch (s->mv_type) {
971  case MV_TYPE_16X16:
972  mvs = 1;
973  break;
974  case MV_TYPE_16X8:
975  mvs = 2;
976  break;
977  case MV_TYPE_8X8:
978  mvs = 4;
979  break;
980  default:
981  goto unhandled;
982  }
983 
984  for (int i = 0; i < mvs; i++) {
985  int my = s->mv[dir][i][1];
986  my_max = FFMAX(my_max, my);
987  my_min = FFMIN(my_min, my);
988  }
989 
990  off = ((FFMAX(-my_min, my_max) << qpel_shift) + 63) >> 6;
991 
992  return av_clip(s->mb_y + off, 0, s->mb_height - 1);
993 unhandled:
994  return s->mb_height - 1;
995 }
996 
997 /* add block[] to dest[] */
998 static inline void add_dct(MpegEncContext *s,
999  int16_t *block, int i, uint8_t *dest, int line_size)
1000 {
1001  if (s->block_last_index[i] >= 0) {
1002  s->idsp.idct_add(dest, line_size, block);
1003  }
1004 }
1005 
1006 #define IS_ENCODER 0
1008 
1010 {
1011  if (s->avctx->debug & FF_DEBUG_DCT_COEFF) {
1012  /* print DCT coefficients */
1013  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1014  for (int i = 0; i < 6; i++) {
1015  for (int j = 0; j < 64; j++) {
1016  av_log(s->avctx, AV_LOG_DEBUG, "%5d",
1017  block[i][s->idsp.idct_permutation[j]]);
1018  }
1019  av_log(s->avctx, AV_LOG_DEBUG, "\n");
1020  }
1021  }
1022 
1023  if (!s->avctx->lowres) {
1024 #if !CONFIG_SMALL
1025  if (s->out_format == FMT_MPEG1)
1027  else
1029 #else
1031 #endif
1032  } else
1034 }
ff_h263_round_chroma
static int ff_h263_round_chroma(int x)
Definition: motion_est.h:98
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:38
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:682
ff_draw_horiz_band
void ff_draw_horiz_band(AVCodecContext *avctx, const AVFrame *cur, const AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
Definition: mpegutils.c:52
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1431
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
h264_chroma_mc_func
void(* h264_chroma_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: h264chroma.h:25
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:262
av_clip
#define av_clip
Definition: common.h:96
ff_mpeg_framesize_alloc
int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me, ScratchpadContext *sc, int linesize)
Definition: mpegpicture.c:89
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegutils.h:117
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVCodecContext::workaround_bugs
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
Definition: avcodec.h:1337
ff_mpv_export_qp_table
int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const Picture *p, int qp_type)
Definition: mpegvideo_dec.c:506
ff_mpv_init_context_frame
int ff_mpv_init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
Definition: mpegvideo.c:539
ff_mpv_common_defaults
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:521
MAY_BE_MPEG12
#define MAY_BE_MPEG12
Definition: mpv_reconstruct_mb_template.c:24
mpeg4videodec.h
MV_TYPE_16X8
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:264
ff_thread_can_start_frame
int ff_thread_can_start_frame(AVCodecContext *avctx)
Definition: pthread_frame.c:937
NOT_MPEG12
#define NOT_MPEG12
Definition: mpv_reconstruct_mb_template.c:23
mpv_reconstruct_mb_template.c
Picture::alloc_mb_width
int alloc_mb_width
mb_width used to allocate tables
Definition: mpegpicture.h:65
MpegEncContext::current_picture
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:170
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
ff_mpv_report_decode_progress
void ff_mpv_report_decode_progress(MpegEncContext *s)
Definition: mpegvideo_dec.c:565
w
uint8_t w
Definition: llviddspenc.c:38
internal.h
last_picture
enum AVPictureType last_picture
Definition: movenc.c:69
b
#define b
Definition: input.c:41
MpegEncContext::next_picture
Picture next_picture
copy of the next picture structure.
Definition: mpegvideo.h:158
ff_toupper4
unsigned int ff_toupper4(unsigned int x)
Definition: to_upper4.h:29
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:37
MpegEncContext::dest
uint8_t * dest[3]
Definition: mpegvideo.h:291
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:85
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Picture
Picture.
Definition: mpegpicture.h:46
mpegutils.h
thread.h
MV_TYPE_DMV
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:266
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:641
AV_VIDEO_ENC_PARAMS_MPEG2
@ AV_VIDEO_ENC_PARAMS_MPEG2
Definition: video_enc_params.h:65
DEFINITELY_MPEG12
#define DEFINITELY_MPEG12
Definition: mpv_reconstruct_mb_template.c:25
gray_frame
static void gray_frame(AVFrame *frame)
Definition: mpegvideo_dec.c:248
ff_mpv_reconstruct_mb
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo_dec.c:1009
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:446
fail
#define fail()
Definition: checkasm.h:138
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:124
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2964
hpel_motion_lowres
static int hpel_motion_lowres(MpegEncContext *s, uint8_t *dest, const uint8_t *src, int field_based, int field_select, int src_x, int src_y, int width, int height, ptrdiff_t stride, int h_edge_pos, int v_edge_pos, int w, int h, const h264_chroma_mc_func *pix_op, int motion_x, int motion_y)
Definition: mpegvideo_dec.c:572
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:632
ff_print_debug_info
void ff_print_debug_info(const MpegEncContext *s, const Picture *p, AVFrame *pict)
Definition: mpegvideo_dec.c:499
AVVideoEncParams
Video encoding parameters for a given frame.
Definition: video_enc_params.h:73
MAX_PICTURE_COUNT
#define MAX_PICTURE_COUNT
Definition: mpegpicture.h:33
mult
static int16_t mult(Float11 *f1, Float11 *f2)
Definition: g726.c:60
avassert.h
mpegvideodec.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:628
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:586
emms_c
#define emms_c()
Definition: emms.h:63
ff_mpv_common_frame_size_change
int ff_mpv_common_frame_size_change(MpegEncContext *s)
Definition: mpegvideo_dec.c:192
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:198
ff_mpeg_unref_picture
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
Deallocate a picture; frees the picture tables in case they need to be reallocated anyway.
Definition: mpegpicture.c:307
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:51
MpegEncContext::h_edge_pos
int h_edge_pos
Definition: mpegvideo.h:122
s1
#define s1
Definition: regdef.h:38
REBASE_PICTURE
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
FMT_H261
@ FMT_H261
Definition: mpegutils.h:118
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
UPDATE_PICTURE
#define UPDATE_PICTURE(pic)
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
PICT_TOP_FIELD
#define PICT_TOP_FIELD
Definition: mpegutils.h:36
limits.h
if
if(ret)
Definition: filter_design.txt:179
Picture::reference
int reference
Definition: mpegpicture.h:77
ff_find_unused_picture
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
Definition: mpegpicture.c:448
threadframe.h
MpegEncContext::field_select
int field_select[2][2]
Definition: mpegvideo.h:273
NULL
#define NULL
Definition: coverity.c:32
mpv_reconstruct_mb_internal
static av_always_inline void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
Definition: mpv_reconstruct_mb_template.c:56
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:284
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:342
AVFrame::coded_picture_number
attribute_deprecated int coded_picture_number
picture number in bitstream order
Definition: frame.h:474
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
FF_PTR_ADD
#define FF_PTR_ADD(ptr, off)
Definition: internal.h:90
FF_BUG_IEDGE
#define FF_BUG_IEDGE
Definition: avcodec.h:1352
lowres
static int lowres
Definition: ffplay.c:332
ff_alloc_picture
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int shared, int encoding, int chroma_x_shift, int chroma_y_shift, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
Definition: mpegpicture.c:249
FF_MPV_QSCALE_TYPE_MPEG1
#define FF_MPV_QSCALE_TYPE_MPEG1
Definition: mpegvideodec.h:40
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
MpegEncContext::private_ctx
void * private_ctx
Definition: mpegvideo.h:88
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:263
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
add_dct
static void add_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size)
Definition: mpegvideo_dec.c:998
av_video_enc_params_create_side_data
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
Definition: video_enc_params.c:58
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1390
f
f
Definition: af_crystalizer.c:121
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:314
ff_mpeg_ref_picture
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
Definition: mpegpicture.c:366
MpegEncContext::v_edge_pos
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:122
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:56
h264chroma.h
ff_mpeg_draw_horiz_band
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo_dec.c:536
Picture::alloc_mb_height
int alloc_mb_height
mb_height used to allocate tables
Definition: mpegpicture.h:66
ff_mpv_frame_start
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo_dec.c:268
ff_mpeg_flush
void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo_dec.c:544
height
#define height
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:265
Picture::motion_val
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:54
lowest_referenced_row
static int lowest_referenced_row(MpegEncContext *s, int dir)
find the lowest MB row referenced in the MVs
Definition: mpegvideo_dec.c:962
AVCodec::id
enum AVCodecID id
Definition: codec.h:201
emms.h
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
MpegEncContext::uvlinesize
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:125
FMT_H263
@ FMT_H263
Definition: mpegutils.h:119
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:244
internal.h
ff_mpv_decode_init
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
Definition: mpegvideo_dec.c:44
mpeg_motion_lowres
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op, int motion_x, int motion_y, int h, int mb_y)
Definition: mpegvideo_dec.c:619
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:284
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:49
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AVVideoBlockParams
Data structure for storing block-level encoding information.
Definition: video_enc_params.h:120
AVCodecContext::height
int height
Definition: avcodec.h:617
MPV_motion_lowres
static void MPV_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op)
motion compensation of a single macroblock
Definition: mpegvideo_dec.c:826
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:636
FF_DEBUG_NOMC
#define FF_DEBUG_NOMC
Definition: avcodec.h:1399
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
chroma_4mv_motion_lowres
static void chroma_4mv_motion_lowres(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op, int mx, int my)
Definition: mpegvideo_dec.c:758
ret
ret
Definition: filter_design.txt:187
wmv2dec.h
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
Picture::qscale_table
int8_t * qscale_table
Definition: mpegpicture.h:51
alloc_picture
static int alloc_picture(MpegEncContext *s, Picture *pic)
Definition: mpegvideo_dec.c:240
AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
Definition: avcodec.h:407
ff_mpv_free_context_frame
void ff_mpv_free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution as well as the slice thread contex...
Definition: mpegvideo.c:760
ff_mpeg_update_thread_context
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo_dec.c:60
AVCodecContext
main external API structure.
Definition: avcodec.h:437
Picture::mb_type
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegpicture.h:57
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:281
Picture::f
struct AVFrame * f
Definition: mpegpicture.h:47
ff_mpv_frame_end
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo_dec.c:491
ff_h264chroma_init
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:41
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:632
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
ff_mpv_init_duplicate_contexts
int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
Initialize an MpegEncContext's thread contexts.
Definition: mpegvideo.c:410
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:462
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:464
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:617
imgutils.h
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
av_video_enc_params_block
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
Definition: video_enc_params.h:143
h
h
Definition: vp9dsp_template.c:2038
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:73
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:67
Picture::alloc_mb_stride
int alloc_mb_stride
mb_stride used to allocate tables
Definition: mpegpicture.h:67
video_enc_params.h
ff_print_debug_info2
void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, const uint8_t *mbskip_table, const uint32_t *mbtype_table, const int8_t *qscale_table, int16_t(*const motion_val[2])[2], int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.
Definition: mpegutils.c:158