FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mpegvideo_motion.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000,2001 Fabrice Bellard
3  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include <string.h>
25 
26 #include "libavutil/avassert.h"
27 #include "libavutil/internal.h"
28 #include "avcodec.h"
29 #include "h261.h"
30 #include "mpegutils.h"
31 #include "mpegvideo.h"
32 #include "mjpegenc.h"
33 #include "msmpeg4.h"
34 #include "qpeldsp.h"
35 #include "wmv2.h"
36 #include <limits.h>
37 
39  uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
40  uint8_t **ref_picture)
41 {
42  uint8_t *ptr;
43  int src_x, src_y, motion_x, motion_y;
44  ptrdiff_t offset, linesize, uvlinesize;
45  int emu = 0;
46 
47  motion_x = s->sprite_offset[0][0];
48  motion_y = s->sprite_offset[0][1];
49  src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy + 1));
50  src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy + 1));
51  motion_x <<= (3 - s->sprite_warping_accuracy);
52  motion_y <<= (3 - s->sprite_warping_accuracy);
53  src_x = av_clip(src_x, -16, s->width);
54  if (src_x == s->width)
55  motion_x = 0;
56  src_y = av_clip(src_y, -16, s->height);
57  if (src_y == s->height)
58  motion_y = 0;
59 
60  linesize = s->linesize;
61  uvlinesize = s->uvlinesize;
62 
63  ptr = ref_picture[0] + src_y * linesize + src_x;
64 
65  if ((unsigned)src_x >= FFMAX(s->h_edge_pos - 17, 0) ||
66  (unsigned)src_y >= FFMAX(s->v_edge_pos - 17, 0)) {
68  linesize, linesize,
69  17, 17,
70  src_x, src_y,
71  s->h_edge_pos, s->v_edge_pos);
72  ptr = s->sc.edge_emu_buffer;
73  }
74 
75  if ((motion_x | motion_y) & 7) {
76  s->mdsp.gmc1(dest_y, ptr, linesize, 16,
77  motion_x & 15, motion_y & 15, 128 - s->no_rounding);
78  s->mdsp.gmc1(dest_y + 8, ptr + 8, linesize, 16,
79  motion_x & 15, motion_y & 15, 128 - s->no_rounding);
80  } else {
81  int dxy;
82 
83  dxy = ((motion_x >> 3) & 1) | ((motion_y >> 2) & 2);
84  if (s->no_rounding) {
85  s->hdsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
86  } else {
87  s->hdsp.put_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
88  }
89  }
90 
91  if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY)
92  return;
93 
94  motion_x = s->sprite_offset[1][0];
95  motion_y = s->sprite_offset[1][1];
96  src_x = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy + 1));
97  src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy + 1));
98  motion_x <<= (3 - s->sprite_warping_accuracy);
99  motion_y <<= (3 - s->sprite_warping_accuracy);
100  src_x = av_clip(src_x, -8, s->width >> 1);
101  if (src_x == s->width >> 1)
102  motion_x = 0;
103  src_y = av_clip(src_y, -8, s->height >> 1);
104  if (src_y == s->height >> 1)
105  motion_y = 0;
106 
107  offset = (src_y * uvlinesize) + src_x;
108  ptr = ref_picture[1] + offset;
109  if ((unsigned)src_x >= FFMAX((s->h_edge_pos >> 1) - 9, 0) ||
110  (unsigned)src_y >= FFMAX((s->v_edge_pos >> 1) - 9, 0)) {
112  uvlinesize, uvlinesize,
113  9, 9,
114  src_x, src_y,
115  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
116  ptr = s->sc.edge_emu_buffer;
117  emu = 1;
118  }
119  s->mdsp.gmc1(dest_cb, ptr, uvlinesize, 8,
120  motion_x & 15, motion_y & 15, 128 - s->no_rounding);
121 
122  ptr = ref_picture[2] + offset;
123  if (emu) {
125  uvlinesize, uvlinesize,
126  9, 9,
127  src_x, src_y,
128  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
129  ptr = s->sc.edge_emu_buffer;
130  }
131  s->mdsp.gmc1(dest_cr, ptr, uvlinesize, 8,
132  motion_x & 15, motion_y & 15, 128 - s->no_rounding);
133 }
134 
136  uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
137  uint8_t **ref_picture)
138 {
139  uint8_t *ptr;
140  int linesize, uvlinesize;
141  const int a = s->sprite_warping_accuracy;
142  int ox, oy;
143 
144  linesize = s->linesize;
145  uvlinesize = s->uvlinesize;
146 
147  ptr = ref_picture[0];
148 
149  ox = s->sprite_offset[0][0] + s->sprite_delta[0][0] * s->mb_x * 16 +
150  s->sprite_delta[0][1] * s->mb_y * 16;
151  oy = s->sprite_offset[0][1] + s->sprite_delta[1][0] * s->mb_x * 16 +
152  s->sprite_delta[1][1] * s->mb_y * 16;
153 
154  s->mdsp.gmc(dest_y, ptr, linesize, 16,
155  ox, oy,
156  s->sprite_delta[0][0], s->sprite_delta[0][1],
157  s->sprite_delta[1][0], s->sprite_delta[1][1],
158  a + 1, (1 << (2 * a + 1)) - s->no_rounding,
159  s->h_edge_pos, s->v_edge_pos);
160  s->mdsp.gmc(dest_y + 8, ptr, linesize, 16,
161  ox + s->sprite_delta[0][0] * 8,
162  oy + s->sprite_delta[1][0] * 8,
163  s->sprite_delta[0][0], s->sprite_delta[0][1],
164  s->sprite_delta[1][0], s->sprite_delta[1][1],
165  a + 1, (1 << (2 * a + 1)) - s->no_rounding,
166  s->h_edge_pos, s->v_edge_pos);
167 
168  if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY)
169  return;
170 
171  ox = s->sprite_offset[1][0] + s->sprite_delta[0][0] * s->mb_x * 8 +
172  s->sprite_delta[0][1] * s->mb_y * 8;
173  oy = s->sprite_offset[1][1] + s->sprite_delta[1][0] * s->mb_x * 8 +
174  s->sprite_delta[1][1] * s->mb_y * 8;
175 
176  ptr = ref_picture[1];
177  s->mdsp.gmc(dest_cb, ptr, uvlinesize, 8,
178  ox, oy,
179  s->sprite_delta[0][0], s->sprite_delta[0][1],
180  s->sprite_delta[1][0], s->sprite_delta[1][1],
181  a + 1, (1 << (2 * a + 1)) - s->no_rounding,
182  (s->h_edge_pos + 1) >> 1, (s->v_edge_pos + 1) >> 1);
183 
184  ptr = ref_picture[2];
185  s->mdsp.gmc(dest_cr, ptr, uvlinesize, 8,
186  ox, oy,
187  s->sprite_delta[0][0], s->sprite_delta[0][1],
188  s->sprite_delta[1][0], s->sprite_delta[1][1],
189  a + 1, (1 << (2 * a + 1)) - s->no_rounding,
190  (s->h_edge_pos + 1) >> 1, (s->v_edge_pos + 1) >> 1);
191 }
192 
193 static inline int hpel_motion(MpegEncContext *s,
194  uint8_t *dest, uint8_t *src,
195  int src_x, int src_y,
196  op_pixels_func *pix_op,
197  int motion_x, int motion_y)
198 {
199  int dxy = 0;
200  int emu = 0;
201 
202  src_x += motion_x >> 1;
203  src_y += motion_y >> 1;
204 
205  /* WARNING: do no forget half pels */
206  src_x = av_clip(src_x, -16, s->width); // FIXME unneeded for emu?
207  if (src_x != s->width)
208  dxy |= motion_x & 1;
209  src_y = av_clip(src_y, -16, s->height);
210  if (src_y != s->height)
211  dxy |= (motion_y & 1) << 1;
212  src += src_y * s->linesize + src_x;
213 
214  if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 1) - 7, 0) ||
215  (unsigned)src_y >= FFMAX(s->v_edge_pos - (motion_y & 1) - 7, 0)) {
217  s->linesize, s->linesize,
218  9, 9,
219  src_x, src_y,
220  s->h_edge_pos, s->v_edge_pos);
221  src = s->sc.edge_emu_buffer;
222  emu = 1;
223  }
224  pix_op[dxy](dest, src, s->linesize, 8);
225  return emu;
226 }
227 
228 static av_always_inline
230  uint8_t *dest_y,
231  uint8_t *dest_cb,
232  uint8_t *dest_cr,
233  int field_based,
234  int bottom_field,
235  int field_select,
236  uint8_t **ref_picture,
237  op_pixels_func (*pix_op)[4],
238  int motion_x,
239  int motion_y,
240  int h,
241  int is_mpeg12,
242  int mb_y)
243 {
244  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
245  int dxy, uvdxy, mx, my, src_x, src_y,
246  uvsrc_x, uvsrc_y, v_edge_pos;
247  ptrdiff_t uvlinesize, linesize;
248 
249 #if 0
250  if (s->quarter_sample) {
251  motion_x >>= 1;
252  motion_y >>= 1;
253  }
254 #endif
255 
256  v_edge_pos = s->v_edge_pos >> field_based;
257  linesize = s->current_picture.f->linesize[0] << field_based;
258  uvlinesize = s->current_picture.f->linesize[1] << field_based;
259 
260  dxy = ((motion_y & 1) << 1) | (motion_x & 1);
261  src_x = s->mb_x * 16 + (motion_x >> 1);
262  src_y = (mb_y << (4 - field_based)) + (motion_y >> 1);
263 
264  if (!is_mpeg12 && s->out_format == FMT_H263) {
265  if ((s->workaround_bugs & FF_BUG_HPEL_CHROMA) && field_based) {
266  mx = (motion_x >> 1) | (motion_x & 1);
267  my = motion_y >> 1;
268  uvdxy = ((my & 1) << 1) | (mx & 1);
269  uvsrc_x = s->mb_x * 8 + (mx >> 1);
270  uvsrc_y = (mb_y << (3 - field_based)) + (my >> 1);
271  } else {
272  uvdxy = dxy | (motion_y & 2) | ((motion_x & 2) >> 1);
273  uvsrc_x = src_x >> 1;
274  uvsrc_y = src_y >> 1;
275  }
276  // Even chroma mv's are full pel in H261
277  } else if (!is_mpeg12 && s->out_format == FMT_H261) {
278  mx = motion_x / 4;
279  my = motion_y / 4;
280  uvdxy = 0;
281  uvsrc_x = s->mb_x * 8 + mx;
282  uvsrc_y = mb_y * 8 + my;
283  } else {
284  if (s->chroma_y_shift) {
285  mx = motion_x / 2;
286  my = motion_y / 2;
287  uvdxy = ((my & 1) << 1) | (mx & 1);
288  uvsrc_x = s->mb_x * 8 + (mx >> 1);
289  uvsrc_y = (mb_y << (3 - field_based)) + (my >> 1);
290  } else {
291  if (s->chroma_x_shift) {
292  // Chroma422
293  mx = motion_x / 2;
294  uvdxy = ((motion_y & 1) << 1) | (mx & 1);
295  uvsrc_x = s->mb_x * 8 + (mx >> 1);
296  uvsrc_y = src_y;
297  } else {
298  // Chroma444
299  uvdxy = dxy;
300  uvsrc_x = src_x;
301  uvsrc_y = src_y;
302  }
303  }
304  }
305 
306  ptr_y = ref_picture[0] + src_y * linesize + src_x;
307  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
308  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
309 
310  if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 1) - 15 , 0) ||
311  (unsigned)src_y >= FFMAX( v_edge_pos - (motion_y & 1) - h + 1, 0)) {
312  if (is_mpeg12 ||
316  "MPEG motion vector out of boundary (%d %d)\n", src_x,
317  src_y);
318  return;
319  }
320  src_y = (unsigned)src_y << field_based;
322  s->linesize, s->linesize,
323  17, 17 + field_based,
324  src_x, src_y,
325  s->h_edge_pos, s->v_edge_pos);
326  ptr_y = s->sc.edge_emu_buffer;
327  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
328  uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
329  uint8_t *vbuf = ubuf + 10 * s->uvlinesize;
330  if (s->workaround_bugs & FF_BUG_IEDGE)
331  vbuf -= s->uvlinesize;
332  uvsrc_y = (unsigned)uvsrc_y << field_based;
333  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
334  s->uvlinesize, s->uvlinesize,
335  9, 9 + field_based,
336  uvsrc_x, uvsrc_y,
337  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
338  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
339  s->uvlinesize, s->uvlinesize,
340  9, 9 + field_based,
341  uvsrc_x, uvsrc_y,
342  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
343  ptr_cb = ubuf;
344  ptr_cr = vbuf;
345  }
346  }
347 
348  /* FIXME use this for field pix too instead of the obnoxious hack which
349  * changes picture.data */
350  if (bottom_field) {
351  dest_y += s->linesize;
352  dest_cb += s->uvlinesize;
353  dest_cr += s->uvlinesize;
354  }
355 
356  if (field_select) {
357  ptr_y += s->linesize;
358  ptr_cb += s->uvlinesize;
359  ptr_cr += s->uvlinesize;
360  }
361 
362  pix_op[0][dxy](dest_y, ptr_y, linesize, h);
363 
364  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
365  pix_op[s->chroma_x_shift][uvdxy]
366  (dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift);
367  pix_op[s->chroma_x_shift][uvdxy]
368  (dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift);
369  }
370  if (!is_mpeg12 && (CONFIG_H261_ENCODER || CONFIG_H261_DECODER) &&
371  s->out_format == FMT_H261) {
373  }
374 }
375 /* apply one mpeg motion vector to the three components */
377  uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
378  int field_select, uint8_t **ref_picture,
379  op_pixels_func (*pix_op)[4],
380  int motion_x, int motion_y, int h, int mb_y)
381 {
382 #if !CONFIG_SMALL
383  if (s->out_format == FMT_MPEG1)
384  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 0, 0,
385  field_select, ref_picture, pix_op,
386  motion_x, motion_y, h, 1, mb_y);
387  else
388 #endif
389  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 0, 0,
390  field_select, ref_picture, pix_op,
391  motion_x, motion_y, h, 0, mb_y);
392 }
393 
394 static void mpeg_motion_field(MpegEncContext *s, uint8_t *dest_y,
395  uint8_t *dest_cb, uint8_t *dest_cr,
396  int bottom_field, int field_select,
397  uint8_t **ref_picture,
398  op_pixels_func (*pix_op)[4],
399  int motion_x, int motion_y, int h, int mb_y)
400 {
401 #if !CONFIG_SMALL
402  if (s->out_format == FMT_MPEG1)
403  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 1,
404  bottom_field, field_select, ref_picture, pix_op,
405  motion_x, motion_y, h, 1, mb_y);
406  else
407 #endif
408  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 1,
409  bottom_field, field_select, ref_picture, pix_op,
410  motion_x, motion_y, h, 0, mb_y);
411 }
412 
413 // FIXME: SIMDify, avg variant, 16x16 version
414 static inline void put_obmc(uint8_t *dst, uint8_t *src[5], int stride)
415 {
416  int x;
417  uint8_t *const top = src[1];
418  uint8_t *const left = src[2];
419  uint8_t *const mid = src[0];
420  uint8_t *const right = src[3];
421  uint8_t *const bottom = src[4];
422 #define OBMC_FILTER(x, t, l, m, r, b)\
423  dst[x]= (t*top[x] + l*left[x] + m*mid[x] + r*right[x] + b*bottom[x] + 4)>>3
424 #define OBMC_FILTER4(x, t, l, m, r, b)\
425  OBMC_FILTER(x , t, l, m, r, b);\
426  OBMC_FILTER(x+1 , t, l, m, r, b);\
427  OBMC_FILTER(x +stride, t, l, m, r, b);\
428  OBMC_FILTER(x+1+stride, t, l, m, r, b);
429 
430  x = 0;
431  OBMC_FILTER (x , 2, 2, 4, 0, 0);
432  OBMC_FILTER (x + 1, 2, 1, 5, 0, 0);
433  OBMC_FILTER4(x + 2, 2, 1, 5, 0, 0);
434  OBMC_FILTER4(x + 4, 2, 0, 5, 1, 0);
435  OBMC_FILTER (x + 6, 2, 0, 5, 1, 0);
436  OBMC_FILTER (x + 7, 2, 0, 4, 2, 0);
437  x += stride;
438  OBMC_FILTER (x , 1, 2, 5, 0, 0);
439  OBMC_FILTER (x + 1, 1, 2, 5, 0, 0);
440  OBMC_FILTER (x + 6, 1, 0, 5, 2, 0);
441  OBMC_FILTER (x + 7, 1, 0, 5, 2, 0);
442  x += stride;
443  OBMC_FILTER4(x , 1, 2, 5, 0, 0);
444  OBMC_FILTER4(x + 2, 1, 1, 6, 0, 0);
445  OBMC_FILTER4(x + 4, 1, 0, 6, 1, 0);
446  OBMC_FILTER4(x + 6, 1, 0, 5, 2, 0);
447  x += 2 * stride;
448  OBMC_FILTER4(x , 0, 2, 5, 0, 1);
449  OBMC_FILTER4(x + 2, 0, 1, 6, 0, 1);
450  OBMC_FILTER4(x + 4, 0, 0, 6, 1, 1);
451  OBMC_FILTER4(x + 6, 0, 0, 5, 2, 1);
452  x += 2*stride;
453  OBMC_FILTER (x , 0, 2, 5, 0, 1);
454  OBMC_FILTER (x + 1, 0, 2, 5, 0, 1);
455  OBMC_FILTER4(x + 2, 0, 1, 5, 0, 2);
456  OBMC_FILTER4(x + 4, 0, 0, 5, 1, 2);
457  OBMC_FILTER (x + 6, 0, 0, 5, 2, 1);
458  OBMC_FILTER (x + 7, 0, 0, 5, 2, 1);
459  x += stride;
460  OBMC_FILTER (x , 0, 2, 4, 0, 2);
461  OBMC_FILTER (x + 1, 0, 1, 5, 0, 2);
462  OBMC_FILTER (x + 6, 0, 0, 5, 1, 2);
463  OBMC_FILTER (x + 7, 0, 0, 4, 2, 2);
464 }
465 
466 /* obmc for 1 8x8 luma block */
467 static inline void obmc_motion(MpegEncContext *s,
468  uint8_t *dest, uint8_t *src,
469  int src_x, int src_y,
470  op_pixels_func *pix_op,
471  int16_t mv[5][2] /* mid top left right bottom */)
472 #define MID 0
473 {
474  int i;
475  uint8_t *ptr[5];
476 
477  av_assert2(s->quarter_sample == 0);
478 
479  for (i = 0; i < 5; i++) {
480  if (i && mv[i][0] == mv[MID][0] && mv[i][1] == mv[MID][1]) {
481  ptr[i] = ptr[MID];
482  } else {
483  ptr[i] = s->sc.obmc_scratchpad + 8 * (i & 1) +
484  s->linesize * 8 * (i >> 1);
485  hpel_motion(s, ptr[i], src, src_x, src_y, pix_op,
486  mv[i][0], mv[i][1]);
487  }
488  }
489 
490  put_obmc(dest, ptr, s->linesize);
491 }
492 
493 static inline void qpel_motion(MpegEncContext *s,
494  uint8_t *dest_y,
495  uint8_t *dest_cb,
496  uint8_t *dest_cr,
497  int field_based, int bottom_field,
498  int field_select, uint8_t **ref_picture,
499  op_pixels_func (*pix_op)[4],
500  qpel_mc_func (*qpix_op)[16],
501  int motion_x, int motion_y, int h)
502 {
503  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
504  int dxy, uvdxy, mx, my, src_x, src_y, uvsrc_x, uvsrc_y, v_edge_pos;
505  ptrdiff_t linesize, uvlinesize;
506 
507  dxy = ((motion_y & 3) << 2) | (motion_x & 3);
508 
509  src_x = s->mb_x * 16 + (motion_x >> 2);
510  src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2);
511 
512  v_edge_pos = s->v_edge_pos >> field_based;
513  linesize = s->linesize << field_based;
514  uvlinesize = s->uvlinesize << field_based;
515 
516  if (field_based) {
517  mx = motion_x / 2;
518  my = motion_y >> 1;
519  } else if (s->workaround_bugs & FF_BUG_QPEL_CHROMA2) {
520  static const int rtab[8] = { 0, 0, 1, 1, 0, 0, 0, 1 };
521  mx = (motion_x >> 1) + rtab[motion_x & 7];
522  my = (motion_y >> 1) + rtab[motion_y & 7];
523  } else if (s->workaround_bugs & FF_BUG_QPEL_CHROMA) {
524  mx = (motion_x >> 1) | (motion_x & 1);
525  my = (motion_y >> 1) | (motion_y & 1);
526  } else {
527  mx = motion_x / 2;
528  my = motion_y / 2;
529  }
530  mx = (mx >> 1) | (mx & 1);
531  my = (my >> 1) | (my & 1);
532 
533  uvdxy = (mx & 1) | ((my & 1) << 1);
534  mx >>= 1;
535  my >>= 1;
536 
537  uvsrc_x = s->mb_x * 8 + mx;
538  uvsrc_y = s->mb_y * (8 >> field_based) + my;
539 
540  ptr_y = ref_picture[0] + src_y * linesize + src_x;
541  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
542  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
543 
544  if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 3) - 15 , 0) ||
545  (unsigned)src_y >= FFMAX( v_edge_pos - (motion_y & 3) - h + 1, 0)) {
547  s->linesize, s->linesize,
548  17, 17 + field_based,
549  src_x, src_y * (1 << field_based),
550  s->h_edge_pos, s->v_edge_pos);
551  ptr_y = s->sc.edge_emu_buffer;
552  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
553  uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
554  uint8_t *vbuf = ubuf + 10 * s->uvlinesize;
555  if (s->workaround_bugs & FF_BUG_IEDGE)
556  vbuf -= s->uvlinesize;
557  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
558  s->uvlinesize, s->uvlinesize,
559  9, 9 + field_based,
560  uvsrc_x, uvsrc_y * (1 << field_based),
561  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
562  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
563  s->uvlinesize, s->uvlinesize,
564  9, 9 + field_based,
565  uvsrc_x, uvsrc_y * (1 << field_based),
566  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
567  ptr_cb = ubuf;
568  ptr_cr = vbuf;
569  }
570  }
571 
572  if (!field_based)
573  qpix_op[0][dxy](dest_y, ptr_y, linesize);
574  else {
575  if (bottom_field) {
576  dest_y += s->linesize;
577  dest_cb += s->uvlinesize;
578  dest_cr += s->uvlinesize;
579  }
580 
581  if (field_select) {
582  ptr_y += s->linesize;
583  ptr_cb += s->uvlinesize;
584  ptr_cr += s->uvlinesize;
585  }
586  // damn interlaced mode
587  // FIXME boundary mirroring is not exactly correct here
588  qpix_op[1][dxy](dest_y, ptr_y, linesize);
589  qpix_op[1][dxy](dest_y + 8, ptr_y + 8, linesize);
590  }
591  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
592  pix_op[1][uvdxy](dest_cr, ptr_cr, uvlinesize, h >> 1);
593  pix_op[1][uvdxy](dest_cb, ptr_cb, uvlinesize, h >> 1);
594  }
595 }
596 
597 /**
598  * H.263 chroma 4mv motion compensation.
599  */
601  uint8_t *dest_cb, uint8_t *dest_cr,
602  uint8_t **ref_picture,
603  op_pixels_func *pix_op,
604  int mx, int my)
605 {
606  uint8_t *ptr;
607  int src_x, src_y, dxy, emu = 0;
608  ptrdiff_t offset;
609 
610  /* In case of 8X8, we construct a single chroma motion vector
611  * with a special rounding */
612  mx = ff_h263_round_chroma(mx);
613  my = ff_h263_round_chroma(my);
614 
615  dxy = ((my & 1) << 1) | (mx & 1);
616  mx >>= 1;
617  my >>= 1;
618 
619  src_x = s->mb_x * 8 + mx;
620  src_y = s->mb_y * 8 + my;
621  src_x = av_clip(src_x, -8, (s->width >> 1));
622  if (src_x == (s->width >> 1))
623  dxy &= ~1;
624  src_y = av_clip(src_y, -8, (s->height >> 1));
625  if (src_y == (s->height >> 1))
626  dxy &= ~2;
627 
628  offset = src_y * s->uvlinesize + src_x;
629  ptr = ref_picture[1] + offset;
630  if ((unsigned)src_x >= FFMAX((s->h_edge_pos >> 1) - (dxy & 1) - 7, 0) ||
631  (unsigned)src_y >= FFMAX((s->v_edge_pos >> 1) - (dxy >> 1) - 7, 0)) {
633  s->uvlinesize, s->uvlinesize,
634  9, 9, src_x, src_y,
635  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
636  ptr = s->sc.edge_emu_buffer;
637  emu = 1;
638  }
639  pix_op[dxy](dest_cb, ptr, s->uvlinesize, 8);
640 
641  ptr = ref_picture[2] + offset;
642  if (emu) {
644  s->uvlinesize, s->uvlinesize,
645  9, 9, src_x, src_y,
646  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
647  ptr = s->sc.edge_emu_buffer;
648  }
649  pix_op[dxy](dest_cr, ptr, s->uvlinesize, 8);
650 }
651 
652 static inline void prefetch_motion(MpegEncContext *s, uint8_t **pix, int dir)
653 {
654  /* fetch pixels for estimated mv 4 macroblocks ahead
655  * optimized for 64byte cache lines */
656  const int shift = s->quarter_sample ? 2 : 1;
657  const int mx = (s->mv[dir][0][0] >> shift) + 16 * s->mb_x + 8;
658  const int my = (s->mv[dir][0][1] >> shift) + 16 * s->mb_y;
659  int off = mx + (my + (s->mb_x & 3) * 4) * s->linesize + 64;
660 
661  s->vdsp.prefetch(pix[0] + off, s->linesize, 4);
662  off = (mx >> 1) + ((my >> 1) + (s->mb_x & 7)) * s->uvlinesize + 64;
663  s->vdsp.prefetch(pix[1] + off, pix[2] - pix[1], 2);
664 }
665 
666 static inline void apply_obmc(MpegEncContext *s,
667  uint8_t *dest_y,
668  uint8_t *dest_cb,
669  uint8_t *dest_cr,
670  uint8_t **ref_picture,
671  op_pixels_func (*pix_op)[4])
672 {
673  LOCAL_ALIGNED_8(int16_t, mv_cache, [4], [4][2]);
674  Picture *cur_frame = &s->current_picture;
675  int mb_x = s->mb_x;
676  int mb_y = s->mb_y;
677  const int xy = mb_x + mb_y * s->mb_stride;
678  const int mot_stride = s->b8_stride;
679  const int mot_xy = mb_x * 2 + mb_y * 2 * mot_stride;
680  int mx, my, i;
681 
682  av_assert2(!s->mb_skipped);
683 
684  AV_COPY32(mv_cache[1][1], cur_frame->motion_val[0][mot_xy]);
685  AV_COPY32(mv_cache[1][2], cur_frame->motion_val[0][mot_xy + 1]);
686 
687  AV_COPY32(mv_cache[2][1],
688  cur_frame->motion_val[0][mot_xy + mot_stride]);
689  AV_COPY32(mv_cache[2][2],
690  cur_frame->motion_val[0][mot_xy + mot_stride + 1]);
691 
692  AV_COPY32(mv_cache[3][1],
693  cur_frame->motion_val[0][mot_xy + mot_stride]);
694  AV_COPY32(mv_cache[3][2],
695  cur_frame->motion_val[0][mot_xy + mot_stride + 1]);
696 
697  if (mb_y == 0 || IS_INTRA(cur_frame->mb_type[xy - s->mb_stride])) {
698  AV_COPY32(mv_cache[0][1], mv_cache[1][1]);
699  AV_COPY32(mv_cache[0][2], mv_cache[1][2]);
700  } else {
701  AV_COPY32(mv_cache[0][1],
702  cur_frame->motion_val[0][mot_xy - mot_stride]);
703  AV_COPY32(mv_cache[0][2],
704  cur_frame->motion_val[0][mot_xy - mot_stride + 1]);
705  }
706 
707  if (mb_x == 0 || IS_INTRA(cur_frame->mb_type[xy - 1])) {
708  AV_COPY32(mv_cache[1][0], mv_cache[1][1]);
709  AV_COPY32(mv_cache[2][0], mv_cache[2][1]);
710  } else {
711  AV_COPY32(mv_cache[1][0], cur_frame->motion_val[0][mot_xy - 1]);
712  AV_COPY32(mv_cache[2][0],
713  cur_frame->motion_val[0][mot_xy - 1 + mot_stride]);
714  }
715 
716  if (mb_x + 1 >= s->mb_width || IS_INTRA(cur_frame->mb_type[xy + 1])) {
717  AV_COPY32(mv_cache[1][3], mv_cache[1][2]);
718  AV_COPY32(mv_cache[2][3], mv_cache[2][2]);
719  } else {
720  AV_COPY32(mv_cache[1][3], cur_frame->motion_val[0][mot_xy + 2]);
721  AV_COPY32(mv_cache[2][3],
722  cur_frame->motion_val[0][mot_xy + 2 + mot_stride]);
723  }
724 
725  mx = 0;
726  my = 0;
727  for (i = 0; i < 4; i++) {
728  const int x = (i & 1) + 1;
729  const int y = (i >> 1) + 1;
730  int16_t mv[5][2] = {
731  { mv_cache[y][x][0], mv_cache[y][x][1] },
732  { mv_cache[y - 1][x][0], mv_cache[y - 1][x][1] },
733  { mv_cache[y][x - 1][0], mv_cache[y][x - 1][1] },
734  { mv_cache[y][x + 1][0], mv_cache[y][x + 1][1] },
735  { mv_cache[y + 1][x][0], mv_cache[y + 1][x][1] }
736  };
737  // FIXME cleanup
738  obmc_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
739  ref_picture[0],
740  mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >> 1) * 8,
741  pix_op[1],
742  mv);
743 
744  mx += mv[0][0];
745  my += mv[0][1];
746  }
747  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
748  chroma_4mv_motion(s, dest_cb, dest_cr,
749  ref_picture, pix_op[1],
750  mx, my);
751 }
752 
753 static inline void apply_8x8(MpegEncContext *s,
754  uint8_t *dest_y,
755  uint8_t *dest_cb,
756  uint8_t *dest_cr,
757  int dir,
758  uint8_t **ref_picture,
759  qpel_mc_func (*qpix_op)[16],
760  op_pixels_func (*pix_op)[4])
761 {
762  int dxy, mx, my, src_x, src_y;
763  int i;
764  int mb_x = s->mb_x;
765  int mb_y = s->mb_y;
766  uint8_t *ptr, *dest;
767 
768  mx = 0;
769  my = 0;
770  if (s->quarter_sample) {
771  for (i = 0; i < 4; i++) {
772  int motion_x = s->mv[dir][i][0];
773  int motion_y = s->mv[dir][i][1];
774 
775  dxy = ((motion_y & 3) << 2) | (motion_x & 3);
776  src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8;
777  src_y = mb_y * 16 + (motion_y >> 2) + (i >> 1) * 8;
778 
779  /* WARNING: do no forget half pels */
780  src_x = av_clip(src_x, -16, s->width);
781  if (src_x == s->width)
782  dxy &= ~3;
783  src_y = av_clip(src_y, -16, s->height);
784  if (src_y == s->height)
785  dxy &= ~12;
786 
787  ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
788  if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 3) - 7, 0) ||
789  (unsigned)src_y >= FFMAX(s->v_edge_pos - (motion_y & 3) - 7, 0)) {
791  s->linesize, s->linesize,
792  9, 9,
793  src_x, src_y,
794  s->h_edge_pos,
795  s->v_edge_pos);
796  ptr = s->sc.edge_emu_buffer;
797  }
798  dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
799  qpix_op[1][dxy](dest, ptr, s->linesize);
800 
801  mx += s->mv[dir][i][0] / 2;
802  my += s->mv[dir][i][1] / 2;
803  }
804  } else {
805  for (i = 0; i < 4; i++) {
806  hpel_motion(s,
807  dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
808  ref_picture[0],
809  mb_x * 16 + (i & 1) * 8,
810  mb_y * 16 + (i >> 1) * 8,
811  pix_op[1],
812  s->mv[dir][i][0],
813  s->mv[dir][i][1]);
814 
815  mx += s->mv[dir][i][0];
816  my += s->mv[dir][i][1];
817  }
818  }
819 
820  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
821  chroma_4mv_motion(s, dest_cb, dest_cr,
822  ref_picture, pix_op[1], mx, my);
823 }
824 
825 /**
826  * motion compensation of a single macroblock
827  * @param s context
828  * @param dest_y luma destination pointer
829  * @param dest_cb chroma cb/u destination pointer
830  * @param dest_cr chroma cr/v destination pointer
831  * @param dir direction (0->forward, 1->backward)
832  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
833  * @param pix_op halfpel motion compensation function (average or put normally)
834  * @param qpix_op qpel motion compensation function (average or put normally)
835  * the motion vectors are taken from s->mv and the MV type from s->mv_type
836  */
838  uint8_t *dest_y,
839  uint8_t *dest_cb,
840  uint8_t *dest_cr,
841  int dir,
842  uint8_t **ref_picture,
843  op_pixels_func (*pix_op)[4],
844  qpel_mc_func (*qpix_op)[16],
845  int is_mpeg12)
846 {
847  int i;
848  int mb_y = s->mb_y;
849 
850  prefetch_motion(s, ref_picture, dir);
851 
852  if (!is_mpeg12 && s->obmc && s->pict_type != AV_PICTURE_TYPE_B) {
853  apply_obmc(s, dest_y, dest_cb, dest_cr, ref_picture, pix_op);
854  return;
855  }
856 
857  switch (s->mv_type) {
858  case MV_TYPE_16X16:
859  if (s->mcsel) {
860  if (s->real_sprite_warping_points == 1) {
861  gmc1_motion(s, dest_y, dest_cb, dest_cr,
862  ref_picture);
863  } else {
864  gmc_motion(s, dest_y, dest_cb, dest_cr,
865  ref_picture);
866  }
867  } else if (!is_mpeg12 && s->quarter_sample) {
868  qpel_motion(s, dest_y, dest_cb, dest_cr,
869  0, 0, 0,
870  ref_picture, pix_op, qpix_op,
871  s->mv[dir][0][0], s->mv[dir][0][1], 16);
872  } else if (!is_mpeg12 && (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) &&
873  s->mspel && s->codec_id == AV_CODEC_ID_WMV2) {
874  ff_mspel_motion(s, dest_y, dest_cb, dest_cr,
875  ref_picture, pix_op,
876  s->mv[dir][0][0], s->mv[dir][0][1], 16);
877  } else {
878  mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
879  ref_picture, pix_op,
880  s->mv[dir][0][0], s->mv[dir][0][1], 16, mb_y);
881  }
882  break;
883  case MV_TYPE_8X8:
884  if (!is_mpeg12)
885  apply_8x8(s, dest_y, dest_cb, dest_cr,
886  dir, ref_picture, qpix_op, pix_op);
887  break;
888  case MV_TYPE_FIELD:
889  if (s->picture_structure == PICT_FRAME) {
890  if (!is_mpeg12 && s->quarter_sample) {
891  for (i = 0; i < 2; i++)
892  qpel_motion(s, dest_y, dest_cb, dest_cr,
893  1, i, s->field_select[dir][i],
894  ref_picture, pix_op, qpix_op,
895  s->mv[dir][i][0], s->mv[dir][i][1], 8);
896  } else {
897  /* top field */
898  mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
899  0, s->field_select[dir][0],
900  ref_picture, pix_op,
901  s->mv[dir][0][0], s->mv[dir][0][1], 8, mb_y);
902  /* bottom field */
903  mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
904  1, s->field_select[dir][1],
905  ref_picture, pix_op,
906  s->mv[dir][1][0], s->mv[dir][1][1], 8, mb_y);
907  }
908  } else {
909  if ( s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field
910  || !ref_picture[0]) {
911  ref_picture = s->current_picture_ptr->f->data;
912  }
913 
914  mpeg_motion(s, dest_y, dest_cb, dest_cr,
915  s->field_select[dir][0],
916  ref_picture, pix_op,
917  s->mv[dir][0][0], s->mv[dir][0][1], 16, mb_y >> 1);
918  }
919  break;
920  case MV_TYPE_16X8:
921  for (i = 0; i < 2; i++) {
922  uint8_t **ref2picture;
923 
924  if ((s->picture_structure == s->field_select[dir][i] + 1
925  || s->pict_type == AV_PICTURE_TYPE_B || s->first_field) && ref_picture[0]) {
926  ref2picture = ref_picture;
927  } else {
928  ref2picture = s->current_picture_ptr->f->data;
929  }
930 
931  mpeg_motion(s, dest_y, dest_cb, dest_cr,
932  s->field_select[dir][i],
933  ref2picture, pix_op,
934  s->mv[dir][i][0], s->mv[dir][i][1] + 16 * i,
935  8, mb_y >> 1);
936 
937  dest_y += 16 * s->linesize;
938  dest_cb += (16 >> s->chroma_y_shift) * s->uvlinesize;
939  dest_cr += (16 >> s->chroma_y_shift) * s->uvlinesize;
940  }
941  break;
942  case MV_TYPE_DMV:
943  if (s->picture_structure == PICT_FRAME) {
944  for (i = 0; i < 2; i++) {
945  int j;
946  for (j = 0; j < 2; j++)
947  mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
948  j, j ^ i, ref_picture, pix_op,
949  s->mv[dir][2 * i + j][0],
950  s->mv[dir][2 * i + j][1], 8, mb_y);
951  pix_op = s->hdsp.avg_pixels_tab;
952  }
953  } else {
954  if (!ref_picture[0]) {
955  ref_picture = s->current_picture_ptr->f->data;
956  }
957  for (i = 0; i < 2; i++) {
958  mpeg_motion(s, dest_y, dest_cb, dest_cr,
959  s->picture_structure != i + 1,
960  ref_picture, pix_op,
961  s->mv[dir][2 * i][0], s->mv[dir][2 * i][1],
962  16, mb_y >> 1);
963 
964  // after put we make avg of the same block
965  pix_op = s->hdsp.avg_pixels_tab;
966 
967  /* opposite parity is always in the same frame if this is
968  * second field */
969  if (!s->first_field) {
970  ref_picture = s->current_picture_ptr->f->data;
971  }
972  }
973  }
974  break;
975  default: av_assert2(0);
976  }
977 }
978 
980  uint8_t *dest_y, uint8_t *dest_cb,
981  uint8_t *dest_cr, int dir,
982  uint8_t **ref_picture,
983  op_pixels_func (*pix_op)[4],
984  qpel_mc_func (*qpix_op)[16])
985 {
986 #if !CONFIG_SMALL
987  if (s->out_format == FMT_MPEG1)
988  mpv_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
989  ref_picture, pix_op, qpix_op, 1);
990  else
991 #endif
992  mpv_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
993  ref_picture, pix_op, qpix_op, 0);
994 }
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
void(* prefetch)(uint8_t *buf, ptrdiff_t stride, int h)
Prefetch memory into cache (if supported by hardware).
Definition: videodsp.h:76
const char * s
Definition: avisynth_c.h:768
static int shift(int a, int b)
Definition: sonic.c:82
static void gmc_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture)
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:269
int sprite_warping_accuracy
Definition: mpegvideo.h:401
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegpicture.h:36
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:68
MJPEG encoder.
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:129
enum AVCodecID codec_id
Definition: mpegvideo.h:109
#define FF_BUG_HPEL_CHROMA
Definition: avcodec.h:2883
int obmc
overlapped block motion compensation
Definition: mpegvideo.h:366
int real_sprite_warping_points
Definition: mpegvideo.h:394
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
mpegvideo header.
#define src
Definition: vp8dsp.c:254
#define AV_COPY32(d, s)
Definition: intreadwrite.h:586
int chroma_x_shift
Definition: mpegvideo.h:475
int field_select[2][2]
Definition: mpegvideo.h:277
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:63
ScratchpadContext sc
Definition: mpegvideo.h:199
uint8_t
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
static void chroma_4mv_motion(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, op_pixels_func *pix_op, int mx, int my)
H.263 chroma 4mv motion compensation.
enum OutputFormat out_format
output format
Definition: mpegvideo.h:101
int no_rounding
apply no rounding to motion compensation (MPEG-4, msmpeg4, ...) for B-frames rounding mode is always ...
Definition: mpegvideo.h:284
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:177
quarterpel DSP functions
#define FF_BUG_QPEL_CHROMA2
Definition: avcodec.h:2880
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:861
#define FF_BUG_IEDGE
Definition: avcodec.h:2887
#define av_log(a,...)
int sprite_offset[2][2]
sprite offset[isChroma][isMVY]
Definition: mpegvideo.h:395
#define LOCAL_ALIGNED_8(t, v,...)
Definition: internal.h:115
void(* gmc)(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy, int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
global motion compensation.
Definition: mpegvideodsp.h:37
#define MID
static void prefetch_motion(MpegEncContext *s, uint8_t **pix, int dir)
int mb_skipped
MUST BE SET only during DECODING.
Definition: mpegvideo.h:192
int chroma_y_shift
Definition: mpegvideo.h:476
void(* gmc1)(uint8_t *dst, uint8_t *src, int srcStride, int h, int x16, int y16, int rounder)
translational global motion compensation.
Definition: mpegvideodsp.h:32
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1777
simple assert() macros that are a bit more flexible than ISO C assert().
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:399
static const uint8_t offset[127][2]
Definition: vf_spp.c:92
#define FFMAX(a, b)
Definition: common.h:94
common internal API header
int sprite_delta[2][2]
sprite_delta [isY][isMVY]
Definition: mpegvideo.h:396
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:53
Picture * current_picture_ptr
pointer to the current picture
Definition: mpegvideo.h:181
Picture.
Definition: mpegpicture.h:45
static void gmc1_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture)
#define FF_BUG_QPEL_CHROMA
Definition: avcodec.h:2878
preferred ID for MPEG-1/2 video decoding
Definition: avcodec.h:196
static av_always_inline void mpeg_motion_internal(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h, int is_mpeg12, int mb_y)
#define OBMC_FILTER4(x, t, l, m, r, b)
int first_field
is 1 for the first field of a field picture 0 otherwise
Definition: mpegvideo.h:481
static const int8_t mv[256][2]
Definition: 4xm.c:77
static av_always_inline void mpv_motion_internal(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16], int is_mpeg12)
motion compensation of a single macroblock
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:266
Libavcodec external API header.
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:131
static void mpeg_motion_field(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int bottom_field, int field_select, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h, int mb_y)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:221
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:97
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:268
H.261 codec.
op_pixels_func put_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)>>1.
Definition: hpeldsp.h:80
struct AVFrame * f
Definition: mpegpicture.h:46
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:132
static int ff_h263_round_chroma(int x)
Definition: motion_est.h:101
static void mpeg_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_select, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h, int mb_y)
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:209
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:204
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
Definition: mpegvideo.h:276
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
Definition: mpegvideo.h:128
MpegEncContext.
Definition: mpegvideo.h:78
struct AVCodecContext * avctx
Definition: mpegvideo.h:95
static void qpel_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16], int motion_x, int motion_y, int h)
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:105
MpegVideoDSPContext mdsp
Definition: mpegvideo.h:229
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
Definition: mpegvideo.h:127
void ff_mspel_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h)
Definition: wmv2.c:100
uint8_t * dest[3]
Definition: mpegvideo.h:295
Bi-dir predicted.
Definition: avutil.h:276
#define IS_INTRA(x, y)
#define PICT_FRAME
Definition: mpegutils.h:39
int picture_structure
Definition: mpegvideo.h:457
VideoDSPContext vdsp
Definition: mpegvideo.h:233
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:270
static void apply_obmc(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, op_pixels_func(*pix_op)[4])
uint8_t * obmc_scratchpad
Definition: mpegpicture.h:38
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegpicture.h:56
int workaround_bugs
workaround bugs in encoders which cannot be detected automatically
Definition: mpegvideo.h:116
#define av_always_inline
Definition: attributes.h:39
void ff_h261_loop_filter(MpegEncContext *s)
Definition: h261.c:63
static int hpel_motion(MpegEncContext *s, uint8_t *dest, uint8_t *src, int src_x, int src_y, op_pixels_func *pix_op, int motion_x, int motion_y)
#define stride
static void obmc_motion(MpegEncContext *s, uint8_t *dest, uint8_t *src, int src_x, int src_y, op_pixels_func *pix_op, int16_t mv[5][2])
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:267
static void apply_8x8(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, qpel_mc_func(*qpix_op)[16], op_pixels_func(*pix_op)[4])
static void put_obmc(uint8_t *dst, uint8_t *src[5], int stride)
#define OBMC_FILTER(x, t, l, m, r, b)
HpelDSPContext hdsp
Definition: mpegvideo.h:226