FFmpeg
mpegvideo_dec.c
Go to the documentation of this file.
1 /*
2  * Common mpeg video decoding code
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include <limits.h>
24 
25 #include "config_components.h"
26 
27 #include "libavutil/avassert.h"
28 #include "libavutil/emms.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/internal.h"
32 
33 #include "avcodec.h"
34 #include "decode.h"
35 #include "h263.h"
36 #include "h264chroma.h"
37 #include "internal.h"
38 #include "mpegutils.h"
39 #include "mpegvideo.h"
40 #include "mpegvideodec.h"
41 #include "mpeg4videodec.h"
42 #include "libavutil/refstruct.h"
43 #include "thread.h"
44 #include "threadprogress.h"
45 #include "wmv2dec.h"
46 
47 #define H264_CHROMA_MC(OPNAME, OP)\
48 static void OPNAME ## h264_chroma_mc1(uint8_t *dst /*align 8*/, const uint8_t *src /*align 1*/, ptrdiff_t stride, int h, int x, int y)\
49 {\
50  const int A = (8-x) * (8-y);\
51  const int B = ( x) * (8-y);\
52  const int C = (8-x) * ( y);\
53  const int D = ( x) * ( y);\
54  \
55  av_assert2(x < 8 && y < 8 && x >= 0 && y >= 0);\
56 \
57  if (D) {\
58  for (int i = 0; i < h; ++i) {\
59  OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
60  dst += stride;\
61  src += stride;\
62  }\
63  } else if (B + C) {\
64  const int E = B + C;\
65  const int step = C ? stride : 1;\
66  for (int i = 0; i < h; ++i) {\
67  OP(dst[0], (A*src[0] + E*src[step+0]));\
68  dst += stride;\
69  src += stride;\
70  }\
71  } else {\
72  for (int i = 0; i < h; ++i) {\
73  OP(dst[0], (A*src[0]));\
74  dst += stride;\
75  src += stride;\
76  }\
77  }\
78 }\
79 
80 #define op_avg(a, b) a = (((a)+(((b) + 32)>>6)+1)>>1)
81 #define op_put(a, b) a = (((b) + 32)>>6)
82 
85 
87 {
88  enum ThreadingStatus thread_status;
89 
91 
92  s->avctx = avctx;
93  s->width = avctx->coded_width;
94  s->height = avctx->coded_height;
95  s->codec_id = avctx->codec->id;
96  s->workaround_bugs = avctx->workaround_bugs;
97 
98  /* convert fourcc to upper case */
99  s->codec_tag = ff_toupper4(avctx->codec_tag);
100 
102 
103  ff_h264chroma_init(&s->h264chroma, 8); //for lowres
104  // lowres may use the following width 2 functions with a height of 1,
105  // yet the H.264 decoder uses them with at least two rows.
106  // Override them with the C versions so that ASM functions can process
107  // two rows at a time.
108  s->h264chroma.avg_h264_chroma_pixels_tab[2] = ff_avg_h264_chroma_mc2_8_c;
109  s->h264chroma.put_h264_chroma_pixels_tab[2] = ff_put_h264_chroma_mc2_8_c;
110  s->h264chroma.avg_h264_chroma_pixels_tab[3] = avg_h264_chroma_mc1;
111  s->h264chroma.put_h264_chroma_pixels_tab[3] = put_h264_chroma_mc1;
112 
113  if (s->picture_pool) // VC-1 can call this multiple times
114  return 0;
115 
116  thread_status = ff_thread_sync_ref(avctx, offsetof(MpegEncContext, picture_pool));
117  if (thread_status != FF_THREAD_IS_COPY) {
118  s->picture_pool = ff_mpv_alloc_pic_pool(thread_status != FF_THREAD_NO_FRAME_THREADING);
119  if (!s->picture_pool)
120  return AVERROR(ENOMEM);
121  }
122  return 0;
123 }
124 
126  const AVCodecContext *src)
127 {
128  MpegEncContext *const s1 = src->priv_data;
129  MpegEncContext *const s = dst->priv_data;
130  int ret = 0;
131 
132  if (dst == src)
133  return 0;
134 
135  av_assert0(s != s1);
136 
137  if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
138  s->height = s1->height;
139  s->width = s1->width;
141  return ret;
142  ret = 1;
143  }
144 
145  s->quarter_sample = s1->quarter_sample;
146 
147  ff_mpv_replace_picture(&s->cur_pic, &s1->cur_pic);
148  ff_mpv_replace_picture(&s->last_pic, &s1->last_pic);
149  ff_mpv_replace_picture(&s->next_pic, &s1->next_pic);
150 
151  s->linesize = s1->linesize;
152  s->uvlinesize = s1->uvlinesize;
153 
154  // Error/bug resilience
155  s->workaround_bugs = s1->workaround_bugs;
156 
157  // MPEG-4 timing info
158  memcpy(&s->last_time_base, &s1->last_time_base,
159  (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
160  (char *) &s1->last_time_base);
161 
162  // B-frame info
163  s->low_delay = s1->low_delay;
164 
165  // MPEG-2/interlacing info
166  memcpy(&s->progressive_sequence, &s1->progressive_sequence,
167  (char *) &s1->first_field + sizeof(s1->first_field) - (char *) &s1->progressive_sequence);
168 
169  return ret;
170 }
171 
173 {
174  MpegEncContext *s = avctx->priv_data;
175 
176  av_refstruct_pool_uninit(&s->picture_pool);
178  return 0;
179 }
180 
182 {
183  int err = 0;
184 
185  if (!s->context_initialized)
186  return AVERROR(EINVAL);
187 
189 
190  ff_mpv_unref_picture(&s->last_pic);
191  ff_mpv_unref_picture(&s->next_pic);
192  ff_mpv_unref_picture(&s->cur_pic);
193 
194  if ((s->width || s->height) &&
195  (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
196  goto fail;
197 
198  /* set chroma shifts */
199  err = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
200  &s->chroma_x_shift,
201  &s->chroma_y_shift);
202  if (err < 0)
203  goto fail;
204 
205  if ((err = ff_mpv_init_context_frame(s)))
206  goto fail;
207 
208  memset(s->thread_context, 0, sizeof(s->thread_context));
209  s->thread_context[0] = s;
210 
211  if (s->width && s->height) {
213  if (err < 0)
214  goto fail;
215  }
216  s->context_reinit = 0;
217 
218  return 0;
219  fail:
221  s->context_reinit = 1;
222  return err;
223 }
224 
225 static int alloc_picture(MpegEncContext *s, MPVWorkPicture *dst, int reference)
226 {
227  AVCodecContext *avctx = s->avctx;
228  MPVPicture *pic = av_refstruct_pool_get(s->picture_pool);
229  int ret;
230 
231  if (!pic)
232  return AVERROR(ENOMEM);
233 
234  dst->ptr = pic;
235 
236  pic->reference = reference;
237 
238  /* WM Image / Screen codecs allocate internal buffers with different
239  * dimensions / colorspaces; ignore user-defined callbacks for these. */
240  if (avctx->codec_id != AV_CODEC_ID_WMV3IMAGE &&
241  avctx->codec_id != AV_CODEC_ID_VC1IMAGE &&
242  avctx->codec_id != AV_CODEC_ID_MSS2) {
243  ret = ff_thread_get_buffer(avctx, pic->f,
244  reference ? AV_GET_BUFFER_FLAG_REF : 0);
245  } else {
246  pic->f->width = avctx->width;
247  pic->f->height = avctx->height;
248  pic->f->format = avctx->pix_fmt;
249  ret = avcodec_default_get_buffer2(avctx, pic->f, 0);
250  }
251  if (ret < 0)
252  goto fail;
253 
254  ret = ff_mpv_pic_check_linesize(avctx, pic->f, &s->linesize, &s->uvlinesize);
255  if (ret < 0)
256  goto fail;
257 
259  if (ret < 0)
260  goto fail;
261 
262  av_assert1(s->mb_width == s->buffer_pools.alloc_mb_width);
263  av_assert1(s->mb_height == s->buffer_pools.alloc_mb_height ||
264  FFALIGN(s->mb_height, 2) == s->buffer_pools.alloc_mb_height);
265  av_assert1(s->mb_stride == s->buffer_pools.alloc_mb_stride);
266  ret = ff_mpv_alloc_pic_accessories(s->avctx, dst, &s->sc,
267  &s->buffer_pools, s->mb_height);
268  if (ret < 0)
269  goto fail;
270 
271  return 0;
272 fail:
274  return ret;
275 }
276 
278 {
279  MPVPicture *pic;
280  int ret = alloc_picture(s, dst, 1);
281  if (ret < 0)
282  return ret;
283 
284  pic = dst->ptr;
285  pic->dummy = 1;
286 
287  ff_thread_progress_report(&pic->progress, INT_MAX);
288 
289  return 0;
290 }
291 
292 static void color_frame(AVFrame *frame, int luma)
293 {
294  int h_chroma_shift, v_chroma_shift;
295 
296  for (int i = 0; i < frame->height; i++)
297  memset(frame->data[0] + frame->linesize[0] * i, luma, frame->width);
298 
299  if (!frame->data[1])
300  return;
301  av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
302  for (int i = 0; i < AV_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
303  memset(frame->data[1] + frame->linesize[1] * i,
304  0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
305  memset(frame->data[2] + frame->linesize[2] * i,
306  0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
307  }
308 }
309 
311 {
312  AVCodecContext *avctx = s->avctx;
313  int ret;
314 
315  av_assert1(!s->last_pic.ptr || s->last_pic.ptr->f->buf[0]);
316  av_assert1(!s->next_pic.ptr || s->next_pic.ptr->f->buf[0]);
317  if (!s->last_pic.ptr && s->pict_type != AV_PICTURE_TYPE_I) {
318  if (s->pict_type == AV_PICTURE_TYPE_B && s->next_pic.ptr)
319  av_log(avctx, AV_LOG_DEBUG,
320  "allocating dummy last picture for B frame\n");
321  else if (s->codec_id != AV_CODEC_ID_H261 /* H.261 has no keyframes */ &&
322  (s->picture_structure == PICT_FRAME || s->first_field))
323  av_log(avctx, AV_LOG_ERROR,
324  "warning: first frame is no keyframe\n");
325 
326  /* Allocate a dummy frame */
327  ret = alloc_dummy_frame(s, &s->last_pic);
328  if (ret < 0)
329  return ret;
330 
331  if (!avctx->hwaccel) {
332  int luma_val = s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263 ? 16 : 0x80;
333  color_frame(s->last_pic.ptr->f, luma_val);
334  }
335  }
336  if (!s->next_pic.ptr && s->pict_type == AV_PICTURE_TYPE_B) {
337  /* Allocate a dummy frame */
338  ret = alloc_dummy_frame(s, &s->next_pic);
339  if (ret < 0)
340  return ret;
341  }
342 
343  av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_pic.ptr &&
344  s->last_pic.ptr->f->buf[0]));
345 
346  return 0;
347 }
348 
349 /**
350  * generic function called after decoding
351  * the header and before a frame is decoded.
352  */
354 {
355  int ret;
356 
357  s->mb_skipped = 0;
358 
359  if (!ff_thread_can_start_frame(avctx)) {
360  av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
361  return AVERROR_BUG;
362  }
363 
364  ff_mpv_unref_picture(&s->cur_pic);
365  ret = alloc_picture(s, &s->cur_pic,
366  s->pict_type != AV_PICTURE_TYPE_B && !s->droppable);
367  if (ret < 0)
368  return ret;
369 
370  s->cur_pic.ptr->f->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST * !!s->top_field_first;
371  s->cur_pic.ptr->f->flags |= AV_FRAME_FLAG_INTERLACED *
372  (!s->progressive_frame && !s->progressive_sequence);
373  s->cur_pic.ptr->field_picture = s->picture_structure != PICT_FRAME;
374 
375  s->cur_pic.ptr->f->pict_type = s->pict_type;
376  if (s->pict_type == AV_PICTURE_TYPE_I)
377  s->cur_pic.ptr->f->flags |= AV_FRAME_FLAG_KEY;
378  else
379  s->cur_pic.ptr->f->flags &= ~AV_FRAME_FLAG_KEY;
380 
381  if (s->pict_type != AV_PICTURE_TYPE_B) {
382  ff_mpv_workpic_from_pic(&s->last_pic, s->next_pic.ptr);
383  if (!s->droppable)
384  ff_mpv_workpic_from_pic(&s->next_pic, s->cur_pic.ptr);
385  }
386  ff_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
387  (void*)s->last_pic.ptr, (void*)s->next_pic.ptr, (void*)s->cur_pic.ptr,
388  s->last_pic.ptr ? s->last_pic.ptr->f->data[0] : NULL,
389  s->next_pic.ptr ? s->next_pic.ptr->f->data[0] : NULL,
390  s->cur_pic.ptr ? s->cur_pic.ptr->f->data[0] : NULL,
391  s->pict_type, s->droppable);
392 
394  if (ret < 0)
395  return ret;
396 
397  if (s->avctx->debug & FF_DEBUG_NOMC)
398  color_frame(s->cur_pic.ptr->f, 0x80);
399 
400  return 0;
401 }
402 
403 /* called after a frame has been decoded. */
405 {
406  emms_c();
407 
408  if (s->cur_pic.reference)
409  ff_thread_progress_report(&s->cur_pic.ptr->progress, INT_MAX);
410 }
411 
413 {
414  ff_print_debug_info2(s->avctx, pict, p->mb_type,
415  p->qscale_table, p->motion_val,
416  p->mb_width, p->mb_height, p->mb_stride, s->quarter_sample);
417 }
418 
420  const MPVPicture *p, int qp_type)
421 {
422  AVVideoEncParams *par;
423  int mult = (qp_type == FF_MPV_QSCALE_TYPE_MPEG1) ? 2 : 1;
424  unsigned int nb_mb = p->mb_height * p->mb_width;
425 
426  if (!(s->avctx->export_side_data & AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS))
427  return 0;
428 
430  if (!par)
431  return AVERROR(ENOMEM);
432 
433  for (unsigned y = 0; y < p->mb_height; y++)
434  for (unsigned x = 0; x < p->mb_width; x++) {
435  const unsigned int block_idx = y * p->mb_width + x;
436  const unsigned int mb_xy = y * p->mb_stride + x;
437  AVVideoBlockParams *const b = av_video_enc_params_block(par, block_idx);
438 
439  b->src_x = x * 16;
440  b->src_y = y * 16;
441  b->w = 16;
442  b->h = 16;
443 
444  b->delta_qp = p->qscale_table[mb_xy] * mult;
445  }
446 
447  return 0;
448 }
449 
451 {
452  ff_draw_horiz_band(s->avctx, s->cur_pic.ptr->f,
453  s->last_pic.ptr ? s->last_pic.ptr->f : NULL,
454  y, h, s->picture_structure,
455  s->first_field, s->low_delay);
456 }
457 
459 {
460  MpegEncContext *const s = avctx->priv_data;
461 
462  ff_mpv_unref_picture(&s->cur_pic);
463  ff_mpv_unref_picture(&s->last_pic);
464  ff_mpv_unref_picture(&s->next_pic);
465 
466  s->mb_x = s->mb_y = 0;
467 
468  s->pp_time = 0;
469 }
470 
472  uint8_t *dest, const uint8_t *src,
473  int field_based, int field_select,
474  int src_x, int src_y,
475  int width, int height, ptrdiff_t stride,
476  int h_edge_pos, int v_edge_pos,
477  int w, int h, const h264_chroma_mc_func *pix_op,
478  int motion_x, int motion_y)
479 {
480  const int lowres = s->avctx->lowres;
481  const int op_index = lowres;
482  const int s_mask = (2 << lowres) - 1;
483  int emu = 0;
484  int sx, sy;
485 
486  av_assert2(op_index <= 3);
487 
488  if (s->quarter_sample) {
489  motion_x /= 2;
490  motion_y /= 2;
491  }
492 
493  sx = motion_x & s_mask;
494  sy = motion_y & s_mask;
495  src_x += motion_x >> lowres + 1;
496  src_y += motion_y >> lowres + 1;
497 
498  src += src_y * stride + src_x;
499 
500  if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
501  (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
502  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src,
503  s->linesize, s->linesize,
504  w + 1, (h + 1) << field_based,
505  src_x, src_y * (1 << field_based),
506  h_edge_pos, v_edge_pos);
507  src = s->sc.edge_emu_buffer;
508  emu = 1;
509  }
510 
511  sx = (sx << 2) >> lowres;
512  sy = (sy << 2) >> lowres;
513  if (field_select)
514  src += s->linesize;
515  pix_op[op_index](dest, src, stride, h, sx, sy);
516  return emu;
517 }
518 
519 /* apply one mpeg motion vector to the three components */
521  uint8_t *dest_y,
522  uint8_t *dest_cb,
523  uint8_t *dest_cr,
524  int field_based,
525  int bottom_field,
526  int field_select,
527  uint8_t *const *ref_picture,
528  const h264_chroma_mc_func *pix_op,
529  int motion_x, int motion_y,
530  int h, int mb_y)
531 {
532  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
533  int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
534  ptrdiff_t uvlinesize, linesize;
535  const int lowres = s->avctx->lowres;
536  const int op_index = lowres - 1 + s->chroma_x_shift;
537  const int block_s = 8 >> lowres;
538  const int s_mask = (2 << lowres) - 1;
539  const int h_edge_pos = s->h_edge_pos >> lowres;
540  const int v_edge_pos = s->v_edge_pos >> lowres;
541  int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
542 
543  av_assert2(op_index <= 3);
544 
545  linesize = s->cur_pic.linesize[0] << field_based;
546  uvlinesize = s->cur_pic.linesize[1] << field_based;
547 
548  // FIXME obviously not perfect but qpel will not work in lowres anyway
549  if (s->quarter_sample) {
550  motion_x /= 2;
551  motion_y /= 2;
552  }
553 
554  if (field_based) {
555  motion_y += (bottom_field - field_select)*((1 << lowres)-1);
556  }
557 
558  sx = motion_x & s_mask;
559  sy = motion_y & s_mask;
560  src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
561  src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
562 
563  if (s->out_format == FMT_H263) {
564  uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
565  uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
566  uvsrc_x = src_x >> 1;
567  uvsrc_y = src_y >> 1;
568  } else if (s->out_format == FMT_H261) {
569  // even chroma mv's are full pel in H261
570  mx = motion_x / 4;
571  my = motion_y / 4;
572  uvsx = (2 * mx) & s_mask;
573  uvsy = (2 * my) & s_mask;
574  uvsrc_x = s->mb_x * block_s + (mx >> lowres);
575  uvsrc_y = mb_y * block_s + (my >> lowres);
576  } else {
577  if (s->chroma_y_shift) {
578  mx = motion_x / 2;
579  my = motion_y / 2;
580  uvsx = mx & s_mask;
581  uvsy = my & s_mask;
582  uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
583  uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
584  } else {
585  if (s->chroma_x_shift) {
586  //Chroma422
587  mx = motion_x / 2;
588  uvsx = mx & s_mask;
589  uvsy = motion_y & s_mask;
590  uvsrc_y = src_y;
591  uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
592  } else {
593  //Chroma444
594  uvsx = motion_x & s_mask;
595  uvsy = motion_y & s_mask;
596  uvsrc_x = src_x;
597  uvsrc_y = src_y;
598  }
599  }
600  }
601 
602  ptr_y = ref_picture[0] + src_y * linesize + src_x;
603  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
604  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
605 
606  if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
607  (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - FFMAX(h, field_select + hc<<s->chroma_y_shift), 0)) {
608  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
609  linesize >> field_based, linesize >> field_based,
610  17, 17 + field_based,
611  src_x, src_y * (1 << field_based), h_edge_pos,
612  v_edge_pos);
613  ptr_y = s->sc.edge_emu_buffer;
614  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
615  uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
616  uint8_t *vbuf =ubuf + 10 * s->uvlinesize;
617  if (s->workaround_bugs & FF_BUG_IEDGE)
618  vbuf -= s->uvlinesize;
619  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
620  uvlinesize >> field_based, uvlinesize >> field_based,
621  9, 9 + field_based,
622  uvsrc_x, uvsrc_y * (1 << field_based),
623  h_edge_pos >> 1, v_edge_pos >> 1);
624  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
625  uvlinesize >> field_based,uvlinesize >> field_based,
626  9, 9 + field_based,
627  uvsrc_x, uvsrc_y * (1 << field_based),
628  h_edge_pos >> 1, v_edge_pos >> 1);
629  ptr_cb = ubuf;
630  ptr_cr = vbuf;
631  }
632  }
633 
634  // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
635  if (bottom_field) {
636  dest_y += s->linesize;
637  dest_cb += s->uvlinesize;
638  dest_cr += s->uvlinesize;
639  }
640 
641  if (field_select) {
642  ptr_y += s->linesize;
643  ptr_cb += s->uvlinesize;
644  ptr_cr += s->uvlinesize;
645  }
646 
647  sx = (sx << 2) >> lowres;
648  sy = (sy << 2) >> lowres;
649  pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
650 
651  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
652  uvsx = (uvsx << 2) >> lowres;
653  uvsy = (uvsy << 2) >> lowres;
654  if (hc) {
655  pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
656  pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
657  }
658  }
659  // FIXME h261 lowres loop filter
660 }
661 
663  uint8_t *dest_cb, uint8_t *dest_cr,
664  uint8_t *const *ref_picture,
665  const h264_chroma_mc_func * pix_op,
666  int mx, int my)
667 {
668  const int lowres = s->avctx->lowres;
669  const int op_index = lowres;
670  const int block_s = 8 >> lowres;
671  const int s_mask = (2 << lowres) - 1;
672  const int h_edge_pos = s->h_edge_pos >> lowres + 1;
673  const int v_edge_pos = s->v_edge_pos >> lowres + 1;
674  int emu = 0, src_x, src_y, sx, sy;
675  ptrdiff_t offset;
676  const uint8_t *ptr;
677 
678  av_assert2(op_index <= 3);
679 
680  if (s->quarter_sample) {
681  mx /= 2;
682  my /= 2;
683  }
684 
685  /* In case of 8X8, we construct a single chroma motion vector
686  with a special rounding */
689 
690  sx = mx & s_mask;
691  sy = my & s_mask;
692  src_x = s->mb_x * block_s + (mx >> lowres + 1);
693  src_y = s->mb_y * block_s + (my >> lowres + 1);
694 
695  offset = src_y * s->uvlinesize + src_x;
696  ptr = ref_picture[1] + offset;
697  if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
698  (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
699  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
700  s->uvlinesize, s->uvlinesize,
701  9, 9,
702  src_x, src_y, h_edge_pos, v_edge_pos);
703  ptr = s->sc.edge_emu_buffer;
704  emu = 1;
705  }
706  sx = (sx << 2) >> lowres;
707  sy = (sy << 2) >> lowres;
708  pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
709 
710  ptr = ref_picture[2] + offset;
711  if (emu) {
712  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
713  s->uvlinesize, s->uvlinesize,
714  9, 9,
715  src_x, src_y, h_edge_pos, v_edge_pos);
716  ptr = s->sc.edge_emu_buffer;
717  }
718  pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
719 }
720 
721 /**
722  * motion compensation of a single macroblock
723  * @param s context
724  * @param dest_y luma destination pointer
725  * @param dest_cb chroma cb/u destination pointer
726  * @param dest_cr chroma cr/v destination pointer
727  * @param dir direction (0->forward, 1->backward)
728  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
729  * @param pix_op halfpel motion compensation function (average or put normally)
730  * the motion vectors are taken from s->mv and the MV type from s->mv_type
731  */
732 static inline void MPV_motion_lowres(MpegEncContext *s,
733  uint8_t *dest_y, uint8_t *dest_cb,
734  uint8_t *dest_cr,
735  int dir, uint8_t *const *ref_picture,
736  const h264_chroma_mc_func *pix_op)
737 {
738  int mx, my;
739  int mb_x, mb_y;
740  const int lowres = s->avctx->lowres;
741  const int block_s = 8 >>lowres;
742 
743  mb_x = s->mb_x;
744  mb_y = s->mb_y;
745 
746  switch (s->mv_type) {
747  case MV_TYPE_16X16:
748  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
749  0, 0, 0,
750  ref_picture, pix_op,
751  s->mv[dir][0][0], s->mv[dir][0][1],
752  2 * block_s, mb_y);
753  break;
754  case MV_TYPE_8X8:
755  mx = 0;
756  my = 0;
757  for (int i = 0; i < 4; i++) {
758  hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
759  s->linesize) * block_s,
760  ref_picture[0], 0, 0,
761  (2 * mb_x + (i & 1)) * block_s,
762  (2 * mb_y + (i >> 1)) * block_s,
763  s->width, s->height, s->linesize,
764  s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
765  block_s, block_s, pix_op,
766  s->mv[dir][i][0], s->mv[dir][i][1]);
767 
768  mx += s->mv[dir][i][0];
769  my += s->mv[dir][i][1];
770  }
771 
772  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
773  chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
774  pix_op, mx, my);
775  break;
776  case MV_TYPE_FIELD:
777  if (s->picture_structure == PICT_FRAME) {
778  /* top field */
779  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
780  1, 0, s->field_select[dir][0],
781  ref_picture, pix_op,
782  s->mv[dir][0][0], s->mv[dir][0][1],
783  block_s, mb_y);
784  /* bottom field */
785  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
786  1, 1, s->field_select[dir][1],
787  ref_picture, pix_op,
788  s->mv[dir][1][0], s->mv[dir][1][1],
789  block_s, mb_y);
790  } else {
791  if (s->picture_structure != s->field_select[dir][0] + 1 &&
792  s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
793  ref_picture = s->cur_pic.ptr->f->data;
794  }
795  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
796  0, 0, s->field_select[dir][0],
797  ref_picture, pix_op,
798  s->mv[dir][0][0],
799  s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
800  }
801  break;
802  case MV_TYPE_16X8:
803  for (int i = 0; i < 2; i++) {
804  uint8_t *const *ref2picture;
805 
806  if (s->picture_structure == s->field_select[dir][i] + 1 ||
807  s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
808  ref2picture = ref_picture;
809  } else {
810  ref2picture = s->cur_pic.ptr->f->data;
811  }
812 
813  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
814  0, 0, s->field_select[dir][i],
815  ref2picture, pix_op,
816  s->mv[dir][i][0], s->mv[dir][i][1] +
817  2 * block_s * i, block_s, mb_y >> 1);
818 
819  dest_y += 2 * block_s * s->linesize;
820  dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
821  dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
822  }
823  break;
824  case MV_TYPE_DMV:
825  if (s->picture_structure == PICT_FRAME) {
826  for (int i = 0; i < 2; i++) {
827  for (int j = 0; j < 2; j++) {
828  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
829  1, j, j ^ i,
830  ref_picture, pix_op,
831  s->mv[dir][2 * i + j][0],
832  s->mv[dir][2 * i + j][1],
833  block_s, mb_y);
834  }
835  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
836  }
837  } else {
838  for (int i = 0; i < 2; i++) {
839  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
840  0, 0, s->picture_structure != i + 1,
841  ref_picture, pix_op,
842  s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
843  2 * block_s, mb_y >> 1);
844 
845  // after put we make avg of the same block
846  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
847 
848  // opposite parity is always in the same
849  // frame if this is second field
850  if (!s->first_field) {
851  ref_picture = s->cur_pic.ptr->f->data;
852  }
853  }
854  }
855  break;
856  default:
857  av_unreachable("No other mpegvideo MV types exist");
858  }
859 }
860 
861 /**
862  * find the lowest MB row referenced in the MVs
863  */
865 {
866  int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
867  int off, mvs;
868 
869  if (s->picture_structure != PICT_FRAME || s->mcsel)
870  goto unhandled;
871 
872  switch (s->mv_type) {
873  case MV_TYPE_16X16:
874  mvs = 1;
875  break;
876  case MV_TYPE_16X8:
877  mvs = 2;
878  break;
879  case MV_TYPE_8X8:
880  mvs = 4;
881  break;
882  default:
883  goto unhandled;
884  }
885 
886  for (int i = 0; i < mvs; i++) {
887  int my = s->mv[dir][i][1];
888  my_max = FFMAX(my_max, my);
889  my_min = FFMIN(my_min, my);
890  }
891 
892  off = ((FFMAX(-my_min, my_max) << qpel_shift) + 63) >> 6;
893 
894  return av_clip(s->mb_y + off, 0, s->mb_height - 1);
895 unhandled:
896  return s->mb_height - 1;
897 }
898 
899 /* add block[] to dest[] */
900 static inline void add_dct(MpegEncContext *s,
901  int16_t block[][64], int i, uint8_t *dest, int line_size)
902 {
903  if (s->block_last_index[i] >= 0) {
904  s->idsp.idct_add(dest, line_size, block[i]);
905  }
906 }
907 
908 /* put block[] to dest[] */
909 static inline void put_dct(MpegEncContext *s,
910  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
911 {
912  s->dct_unquantize_intra(s, block, i, qscale);
913  s->idsp.idct_put(dest, line_size, block);
914 }
915 
916 static inline void add_dequant_dct(MpegEncContext *s,
917  int16_t block[][64], int i, uint8_t *dest, int line_size, int qscale)
918 {
919  if (s->block_last_index[i] >= 0) {
920  s->dct_unquantize_inter(s, block[i], i, qscale);
921 
922  s->idsp.idct_add(dest, line_size, block[i]);
923  }
924 }
925 
926 #define NOT_MPEG12_H261 0
927 #define MAY_BE_MPEG12_H261 1
928 #define DEFINITELY_MPEG12_H261 2
929 
930 /* generic function called after a macroblock has been parsed by the decoder.
931 
932  Important variables used:
933  s->mb_intra : true if intra macroblock
934  s->mv_dir : motion vector direction
935  s->mv_type : motion vector type
936  s->mv : motion vector
937  s->interlaced_dct : true if interlaced dct used (mpeg2)
938  */
939 static av_always_inline
941  int lowres_flag, int is_mpeg12)
942 {
943 #define IS_MPEG12_H261(s) (is_mpeg12 == MAY_BE_MPEG12_H261 ? ((s)->out_format <= FMT_H261) : is_mpeg12)
944  uint8_t *dest_y = s->dest[0], *dest_cb = s->dest[1], *dest_cr = s->dest[2];
945  int dct_linesize, dct_offset;
946  const int linesize = s->cur_pic.linesize[0]; //not s->linesize as this would be wrong for field pics
947  const int uvlinesize = s->cur_pic.linesize[1];
948  const int block_size = lowres_flag ? 8 >> s->avctx->lowres : 8;
949 
950  dct_linesize = linesize << s->interlaced_dct;
951  dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
952 
953  if (!s->mb_intra) {
954  /* motion handling */
955  if (HAVE_THREADS && is_mpeg12 != DEFINITELY_MPEG12_H261 &&
956  s->avctx->active_thread_type & FF_THREAD_FRAME) {
957  if (s->mv_dir & MV_DIR_FORWARD) {
958  ff_thread_progress_await(&s->last_pic.ptr->progress,
960  }
961  if (s->mv_dir & MV_DIR_BACKWARD) {
962  ff_thread_progress_await(&s->next_pic.ptr->progress,
964  }
965  }
966 
967  if (lowres_flag) {
968  const h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
969 
970  if (s->mv_dir & MV_DIR_FORWARD) {
971  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_pic.data, op_pix);
972  op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
973  }
974  if (s->mv_dir & MV_DIR_BACKWARD) {
975  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_pic.data, op_pix);
976  }
977  } else {
978  const op_pixels_func (*op_pix)[4];
979  const qpel_mc_func (*op_qpix)[16];
980 
981  if ((is_mpeg12 == DEFINITELY_MPEG12_H261 || !s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
982  op_pix = s->hdsp.put_pixels_tab;
983  op_qpix = s->qdsp.put_qpel_pixels_tab;
984  } else {
985  op_pix = s->hdsp.put_no_rnd_pixels_tab;
986  op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
987  }
988  if (s->mv_dir & MV_DIR_FORWARD) {
989  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_pic.data, op_pix, op_qpix);
990  op_pix = s->hdsp.avg_pixels_tab;
991  op_qpix = s->qdsp.avg_qpel_pixels_tab;
992  }
993  if (s->mv_dir & MV_DIR_BACKWARD) {
994  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_pic.data, op_pix, op_qpix);
995  }
996  }
997 
998  /* skip dequant / idct if we are really late ;) */
999  if (s->avctx->skip_idct) {
1000  if ( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
1001  ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
1002  || s->avctx->skip_idct >= AVDISCARD_ALL)
1003  return;
1004  }
1005 
1006  /* add dct residue */
1007  if (is_mpeg12 != DEFINITELY_MPEG12_H261 && s->dct_unquantize_inter) {
1008  // H.263, H.263+, H.263I, FLV, RV10, RV20 and MPEG-4 with MPEG-2 quantization
1009  add_dequant_dct(s, block, 0, dest_y , dct_linesize, s->qscale);
1010  add_dequant_dct(s, block, 1, dest_y + block_size, dct_linesize, s->qscale);
1011  add_dequant_dct(s, block, 2, dest_y + dct_offset , dct_linesize, s->qscale);
1012  add_dequant_dct(s, block, 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
1013 
1014  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1015  av_assert2(s->chroma_y_shift);
1016  add_dequant_dct(s, block, 4, dest_cb, uvlinesize, s->chroma_qscale);
1017  add_dequant_dct(s, block, 5, dest_cr, uvlinesize, s->chroma_qscale);
1018  }
1019  } else if (is_mpeg12 == DEFINITELY_MPEG12_H261 || lowres_flag || (s->codec_id != AV_CODEC_ID_WMV2)) {
1020  // H.261, MPEG-1, MPEG-2, MPEG-4 with H.263 quantization,
1021  // MSMP4V1-3 and WMV1.
1022  // Also RV30, RV40 and the VC-1 family when performing error resilience,
1023  // but all blocks are skipped in this case.
1024  add_dct(s, block, 0, dest_y , dct_linesize);
1025  add_dct(s, block, 1, dest_y + block_size, dct_linesize);
1026  add_dct(s, block, 2, dest_y + dct_offset , dct_linesize);
1027  add_dct(s, block, 3, dest_y + dct_offset + block_size, dct_linesize);
1028 
1029  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1030  if (s->chroma_y_shift) {//Chroma420
1031  add_dct(s, block, 4, dest_cb, uvlinesize);
1032  add_dct(s, block, 5, dest_cr, uvlinesize);
1033  } else {
1034  //chroma422
1035  dct_linesize = uvlinesize << s->interlaced_dct;
1036  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
1037 
1038  add_dct(s, block, 4, dest_cb, dct_linesize);
1039  add_dct(s, block, 5, dest_cr, dct_linesize);
1040  add_dct(s, block, 6, dest_cb + dct_offset, dct_linesize);
1041  add_dct(s, block, 7, dest_cr + dct_offset, dct_linesize);
1042  if (!s->chroma_x_shift) {//Chroma444
1043  add_dct(s, block, 8, dest_cb + block_size, dct_linesize);
1044  add_dct(s, block, 9, dest_cr + block_size, dct_linesize);
1045  add_dct(s, block, 10, dest_cb + block_size + dct_offset, dct_linesize);
1046  add_dct(s, block, 11, dest_cr + block_size + dct_offset, dct_linesize);
1047  }
1048  }
1049  } //fi gray
1050  } else if (CONFIG_WMV2_DECODER) {
1051  ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
1052  }
1053  } else {
1054  /* Only MPEG-4 Simple Studio Profile is supported in > 8-bit mode.
1055  TODO: Integrate 10-bit properly into mpegvideo.c so that ER works properly */
1056  if (is_mpeg12 != DEFINITELY_MPEG12_H261 && CONFIG_MPEG4_DECODER &&
1057  /* s->codec_id == AV_CODEC_ID_MPEG4 && */
1058  s->avctx->bits_per_raw_sample > 8) {
1059  ff_mpeg4_decode_studio(s, dest_y, dest_cb, dest_cr, block_size,
1060  uvlinesize, dct_linesize, dct_offset);
1061  } else if (!IS_MPEG12_H261(s)) {
1062  /* dct only in intra block */
1063  put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
1064  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
1065  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
1066  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
1067 
1068  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1069  if (s->chroma_y_shift) {
1070  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
1071  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
1072  } else {
1073  dct_offset >>= 1;
1074  dct_linesize >>= 1;
1075  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
1076  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
1077  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
1078  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
1079  }
1080  }
1081  } else {
1082  s->idsp.idct_put(dest_y, dct_linesize, block[0]);
1083  s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
1084  s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
1085  s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
1086 
1087  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1088  if (s->chroma_y_shift) {
1089  s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
1090  s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
1091  } else {
1092  dct_linesize = uvlinesize << s->interlaced_dct;
1093  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
1094 
1095  s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
1096  s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
1097  s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
1098  s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
1099  if (!s->chroma_x_shift) { //Chroma444
1100  s->idsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
1101  s->idsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
1102  s->idsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
1103  s->idsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
1104  }
1105  }
1106  } //gray
1107  }
1108  }
1109 }
1110 
1111 static av_cold void debug_dct_coeffs(MPVContext *s, const int16_t block[][64])
1112 {
1113  if (!block) // happens when called via error resilience
1114  return;
1115 
1116  void *const logctx = s->avctx;
1117  const uint8_t *const idct_permutation = s->idsp.idct_permutation;
1118 
1119  /* print DCT coefficients */
1120  av_log(logctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1121  for (int i = 0; i < 6; i++) {
1122  for (int j = 0; j < 64; j++) {
1123  av_log(logctx, AV_LOG_DEBUG, "%5d",
1124  block[i][idct_permutation[j]]);
1125  }
1126  av_log(logctx, AV_LOG_DEBUG, "\n");
1127  }
1128 }
1129 
1130 void ff_mpv_reconstruct_mb(MPVContext *s, int16_t block[][64])
1131 {
1132  const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1133  uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
1134 
1135  s->cur_pic.qscale_table[mb_xy] = s->qscale;
1136 
1137  /* avoid copy if macroblock skipped in last frame too */
1138  if (s->mb_skipped) {
1139  s->mb_skipped = 0;
1140  av_assert2(s->pict_type != AV_PICTURE_TYPE_I);
1141  *mbskip_ptr = 1;
1142  } else if (!s->cur_pic.reference) {
1143  *mbskip_ptr = 1;
1144  } else{
1145  *mbskip_ptr = 0; /* not skipped */
1146  }
1147 
1148  if (s->avctx->debug & FF_DEBUG_DCT_COEFF)
1150 
1151  av_assert2((s->out_format <= FMT_H261) == (s->out_format == FMT_H261 || s->out_format == FMT_MPEG1));
1152  if (!s->avctx->lowres) {
1153 #if !CONFIG_SMALL
1154  if (s->out_format <= FMT_H261)
1156  else
1158 #else
1160 #endif
1161  } else
1163 }
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:33
ff_draw_horiz_band
void ff_draw_horiz_band(AVCodecContext *avctx, const AVFrame *cur, const AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
Definition: mpegutils.c:54
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1417
h264_chroma_mc_func
void(* h264_chroma_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: h264chroma.h:25
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:172
av_clip
#define av_clip
Definition: common.h:100
ff_thread_progress_report
void ff_thread_progress_report(ThreadProgress *pro, int n)
This function is a no-op in no-op mode; otherwise it notifies other threads that a certain level of p...
Definition: threadprogress.c:53
MpegEncContext::first_field
int first_field
is 1 for the first field of a field picture 0 otherwise
Definition: mpegvideo.h:273
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
ff_mpv_decode_init
av_cold int ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
Definition: mpegvideo_dec.c:86
threadprogress.h
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const op_pixels_func(*pix_op)[4], const qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:823
ff_mpv_init_duplicate_contexts
av_cold int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
Initialize an MpegEncContext's thread contexts.
Definition: mpegvideo.c:99
MpegEncContext::next_pic
MPVWorkPicture next_pic
copy of the next picture structure.
Definition: mpegvideo.h:126
mpeg4videodec.h
MV_TYPE_16X8
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:174
ff_thread_can_start_frame
int ff_thread_can_start_frame(AVCodecContext *avctx)
Definition: pthread_frame.c:1008
put_dct
static void put_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo_dec.c:909
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:434
AVFrame::width
int width
Definition: frame.h:506
internal.h
op_avg
#define op_avg(a, b)
Definition: mpegvideo_dec.c:80
MpegEncContext::pb_field_time
uint16_t pb_field_time
like above, just for interlaced
Definition: mpegvideo.h:228
b
#define b
Definition: input.c:42
ff_toupper4
unsigned int ff_toupper4(unsigned int x)
Definition: to_upper4.h:29
mpegvideo.h
ff_wmv2_add_mb
void ff_wmv2_add_mb(MpegEncContext *s, int16_t block1[6][64], uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr)
Definition: wmv2dec.c:298
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
ff_put_h264_chroma_mc2_8_c
void ff_put_h264_chroma_mc2_8_c(uint8_t *dst, const uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
MAY_BE_MPEG12_H261
#define MAY_BE_MPEG12_H261
Definition: mpegvideo_dec.c:927
FMT_H261
@ FMT_H261
Definition: mpegvideo.h:56
mpegutils.h
thread.h
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:169
MV_TYPE_DMV
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:176
AV_CODEC_ID_H261
@ AV_CODEC_ID_H261
Definition: codec_id.h:55
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:662
AV_VIDEO_ENC_PARAMS_MPEG2
@ AV_VIDEO_ENC_PARAMS_MPEG2
Definition: video_enc_params.h:65
mx
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
Definition: dsp.h:57
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegvideo.h:55
fail
#define fail()
Definition: checkasm.h:224
MpegEncContext::uvlinesize
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:102
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:3484
hpel_motion_lowres
static int hpel_motion_lowres(MpegEncContext *s, uint8_t *dest, const uint8_t *src, int field_based, int field_select, int src_x, int src_y, int width, int height, ptrdiff_t stride, int h_edge_pos, int v_edge_pos, int w, int h, const h264_chroma_mc_func *pix_op, int motion_x, int motion_y)
Definition: mpegvideo_dec.c:471
refstruct.h
AVVideoEncParams
Video encoding parameters for a given frame.
Definition: video_enc_params.h:73
ff_mpv_init_context_frame
av_cold int ff_mpv_init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
Definition: mpegvideo.c:191
MPVPicture::dummy
int dummy
Picture is a dummy and should not be output.
Definition: mpegpicture.h:81
mult
static int16_t mult(Float11 *f1, Float11 *f2)
Definition: g726.c:60
avassert.h
mpegvideodec.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
av_cold
#define av_cold
Definition: attributes.h:111
MpegEncContext::cur_pic
MPVWorkPicture cur_pic
copy of the current picture structure.
Definition: mpegvideo.h:132
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:649
emms_c
#define emms_c()
Definition: emms.h:89
ff_mpeg_flush
av_cold void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo_dec.c:458
ff_hwaccel_frame_priv_alloc
int ff_hwaccel_frame_priv_alloc(AVCodecContext *avctx, void **hwaccel_picture_private)
Allocate a hwaccel frame private data if the provided avctx uses a hwaccel method that needs it.
Definition: decode.c:2323
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
ff_avg_h264_chroma_mc2_8_c
void ff_avg_h264_chroma_mc2_8_c(uint8_t *dst, const uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:411
ff_thread_get_buffer
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:1044
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:70
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:42
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
decode.h
limits.h
AV_CODEC_ID_VC1IMAGE
@ AV_CODEC_ID_VC1IMAGE
Definition: codec_id.h:204
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:449
my
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
Definition: dsp.h:57
FMT_H263
@ FMT_H263
Definition: mpegvideo.h:57
ff_mpv_common_end
av_cold void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:428
ff_mpv_unref_picture
void ff_mpv_unref_picture(MPVWorkPicture *pic)
Definition: mpegpicture.c:98
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:232
ff_thread_progress_await
void ff_thread_progress_await(const ThreadProgress *pro_c, int n)
This function is a no-op in no-op mode; otherwise it waits until other threads have reached a certain...
Definition: threadprogress.c:64
ff_mpv_export_qp_table
int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const MPVPicture *p, int qp_type)
Definition: mpegvideo_dec.c:419
NULL
#define NULL
Definition: coverity.c:32
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:81
ff_mpv_common_defaults
av_cold void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:171
ff_mpv_decode_close
av_cold int ff_mpv_decode_close(AVCodecContext *avctx)
Definition: mpegvideo_dec.c:172
av_unreachable
#define av_unreachable(msg)
Asserts that are used as compiler optimization hints depending upon ASSERT_LEVEL and NBDEBUG.
Definition: avassert.h:116
DEFINITELY_MPEG12_H261
#define DEFINITELY_MPEG12_H261
Definition: mpegvideo_dec.c:928
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
FF_BUG_IEDGE
#define FF_BUG_IEDGE
Definition: avcodec.h:1354
av_refstruct_pool_get
void * av_refstruct_pool_get(AVRefStructPool *pool)
Get an object from the pool, reusing an old one from the pool when available.
Definition: refstruct.c:297
IS_MPEG12_H261
#define IS_MPEG12_H261(s)
lowres
static int lowres
Definition: ffplay.c:332
FF_THREAD_IS_COPY
@ FF_THREAD_IS_COPY
Definition: thread.h:61
alloc_dummy_frame
static int av_cold alloc_dummy_frame(MpegEncContext *s, MPVWorkPicture *dst)
Definition: mpegvideo_dec.c:277
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Average and put pixel Widths can be 16, 8, 4 or 2.
Definition: hpeldsp.h:39
FF_MPV_QSCALE_TYPE_MPEG1
#define FF_MPV_QSCALE_TYPE_MPEG1
Definition: mpegvideodec.h:40
MPVPicture::reference
int reference
Definition: mpegpicture.h:86
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:173
ff_mpv_alloc_dummy_frames
int ff_mpv_alloc_dummy_frames(MpegEncContext *s)
Ensure that the dummy frames are allocated according to pict_type if necessary.
Definition: mpegvideo_dec.c:310
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
av_video_enc_params_create_side_data
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
Definition: video_enc_params.c:58
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1392
MpegEncContext::quarter_sample
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:230
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:231
f
f
Definition: af_crystalizer.c:122
MpegEncContext::last_pic
MPVWorkPicture last_pic
copy of the previous picture structure.
Definition: mpegvideo.h:120
ff_print_debug_info2
void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, const uint32_t *mbtype_table, const int8_t *qscale_table, int16_t(*const motion_val[2])[2], int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.
Definition: mpegutils.c:155
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:302
height
#define height
Definition: dsp.h:89
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
i
#define i(width, name, range_min, range_max)
Definition: cbs_h264.c:63
ff_h263_round_chroma
static int ff_h263_round_chroma(int x)
Definition: h263.h:30
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:56
h264chroma.h
ff_mpeg_draw_horiz_band
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo_dec.c:450
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:521
op_put
#define op_put(a, b)
Definition: mpegvideo_dec.c:81
ff_mpv_frame_start
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo_dec.c:353
MpegEncContext::workaround_bugs
int workaround_bugs
workaround bugs in encoders which cannot be detected automatically
Definition: mpegvideo.h:90
add_dequant_dct
static void add_dequant_dct(MpegEncContext *s, int16_t block[][64], int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo_dec.c:916
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:175
MpegEncContext::last_time_base
int last_time_base
Definition: mpegvideo.h:221
FF_THREAD_NO_FRAME_THREADING
@ FF_THREAD_NO_FRAME_THREADING
Definition: thread.h:63
color_frame
static void color_frame(AVFrame *frame, int luma)
Definition: mpegvideo_dec.c:292
lowest_referenced_row
static int lowest_referenced_row(MpegEncContext *s, int dir)
find the lowest MB row referenced in the MVs
Definition: mpegvideo_dec.c:864
AV_CODEC_ID_MSS2
@ AV_CODEC_ID_MSS2
Definition: codec_id.h:221
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1584
emms.h
MPVPicture::hwaccel_picture_private
void * hwaccel_picture_private
RefStruct reference for hardware accelerator private data.
Definition: mpegpicture.h:75
avcodec_default_get_buffer2
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: get_buffer.c:253
ff_print_debug_info
void ff_print_debug_info(const MpegEncContext *s, const MPVPicture *p, AVFrame *pict)
Definition: mpegvideo_dec.c:412
mpv_reconstruct_mb_internal
static av_always_inline void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
Definition: mpegvideo_dec.c:940
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:68
internal.h
mpeg_motion_lowres
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op, int motion_x, int motion_y, int h, int mb_y)
Definition: mpegvideo_dec.c:520
MPVEncContext::dct_offset
uint16_t(* dct_offset)[64]
Definition: mpegvideoenc.h:128
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:58
av_always_inline
#define av_always_inline
Definition: attributes.h:68
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
add_dct
static void add_dct(MpegEncContext *s, int16_t block[][64], int i, uint8_t *dest, int line_size)
Definition: mpegvideo_dec.c:900
AVVideoBlockParams
Data structure for storing block-level encoding information.
Definition: video_enc_params.h:120
AVCodecContext::height
int height
Definition: avcodec.h:600
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:639
MPV_motion_lowres
static void MPV_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op)
motion compensation of a single macroblock
Definition: mpegvideo_dec.c:732
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:657
FF_DEBUG_NOMC
#define FF_DEBUG_NOMC
Definition: avcodec.h:1401
avcodec.h
ff_mpv_workpic_from_pic
void ff_mpv_workpic_from_pic(MPVWorkPicture *wpic, MPVPicture *pic)
Definition: mpegpicture.c:128
chroma_4mv_motion_lowres
static void chroma_4mv_motion_lowres(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op, int mx, int my)
Definition: mpegvideo_dec.c:662
ret
ret
Definition: filter_design.txt:187
wmv2dec.h
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
ff_thread_sync_ref
enum ThreadingStatus ff_thread_sync_ref(AVCodecContext *avctx, size_t offset)
Allows to synchronize objects whose lifetime is the whole decoding process among all frame threads.
Definition: decode.c:1977
MPVPicture::f
struct AVFrame * f
Definition: mpegpicture.h:59
AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
Definition: avcodec.h:395
ff_mpeg_update_thread_context
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
update_thread_context for mpegvideo-based decoders.
Definition: mpegvideo_dec.c:125
ff_mpeg4_decode_studio
void ff_mpeg4_decode_studio(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int block_size, int uvlinesize, int dct_linesize, int dct_offset)
Definition: mpeg4videodec.c:260
AVCodecContext
main external API structure.
Definition: avcodec.h:439
AVFrame::height
int height
Definition: frame.h:506
alloc_picture
static int alloc_picture(MpegEncContext *s, MPVWorkPicture *dst, int reference)
Definition: mpegvideo_dec.c:225
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:280
ff_mpv_frame_end
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo_dec.c:404
MpegEncContext::height
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:84
NOT_MPEG12_H261
#define NOT_MPEG12_H261
Definition: mpegvideo_dec.c:926
ff_mpv_pic_check_linesize
int ff_mpv_pic_check_linesize(void *logctx, const AVFrame *f, ptrdiff_t *linesizep, ptrdiff_t *uvlinesizep)
Definition: mpegpicture.c:181
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
ff_h264chroma_init
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:43
ff_mpv_replace_picture
void ff_mpv_replace_picture(MPVWorkPicture *dst, const MPVWorkPicture *src)
Definition: mpegpicture.c:121
ff_mpv_reconstruct_mb
void ff_mpv_reconstruct_mb(MPVContext *s, int16_t block[][64])
Definition: mpegvideo_dec.c:1130
MPVWorkPicture
Definition: mpegpicture.h:95
ThreadingStatus
ThreadingStatus
Definition: thread.h:60
MPVPicture::progress
ThreadProgress progress
Definition: mpegpicture.h:92
w
uint8_t w
Definition: llvidencdsp.c:39
av_refstruct_pool_uninit
static void av_refstruct_pool_uninit(AVRefStructPool **poolp)
Mark the pool as being available for freeing.
Definition: refstruct.h:292
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:168
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:466
MpegEncContext::progressive_sequence
int progressive_sequence
Definition: mpegvideo.h:248
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:600
imgutils.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
ff_mpv_free_context_frame
av_cold void ff_mpv_free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution as well as the slice thread contex...
Definition: mpegvideo.c:402
av_video_enc_params_block
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
Get the block at the specified.
Definition: video_enc_params.h:143
h
h
Definition: vp9dsp_template.c:2070
stride
#define stride
Definition: h264pred_template.c:536
AV_CODEC_ID_WMV3IMAGE
@ AV_CODEC_ID_WMV3IMAGE
Definition: codec_id.h:203
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
MPVPicture
MPVPicture.
Definition: mpegpicture.h:58
width
#define width
Definition: dsp.h:89
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:73
MpegEncContext::low_delay
int low_delay
no reordering needed / has no B-frames
Definition: mpegvideo.h:231
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:228
ff_mpv_alloc_pic_accessories
int ff_mpv_alloc_pic_accessories(AVCodecContext *avctx, MPVWorkPicture *wpic, ScratchpadContext *sc, BufferPoolContext *pools, int mb_height)
Allocate an MPVPicture's accessories (but not the AVFrame's buffer itself) and set the MPVWorkPicture...
Definition: mpegpicture.c:237
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:67
MpegEncContext::width
int width
Definition: mpegvideo.h:84
ff_mpv_alloc_pic_pool
av_cold AVRefStructPool * ff_mpv_alloc_pic_pool(int init_progress)
Allocate a pool of MPVPictures.
Definition: mpegpicture.c:90
src
#define src
Definition: vp8dsp.c:248
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:101
H264_CHROMA_MC
#define H264_CHROMA_MC(OPNAME, OP)
Definition: mpegvideo_dec.c:47
video_enc_params.h
debug_dct_coeffs
static av_cold void debug_dct_coeffs(MPVContext *s, const int16_t block[][64])
Definition: mpegvideo_dec.c:1111
ff_mpv_common_frame_size_change
av_cold int ff_mpv_common_frame_size_change(MpegEncContext *s)
Definition: mpegvideo_dec.c:181
h263.h