FFmpeg
mpegvideo_motion.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000,2001 Fabrice Bellard
3  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include <string.h>
25 
26 #include "libavutil/avassert.h"
27 #include "libavutil/internal.h"
28 #include "libavutil/mem_internal.h"
29 
30 #include "avcodec.h"
31 #include "h261.h"
32 #include "mpegutils.h"
33 #include "mpegvideo.h"
34 #include "mjpegenc.h"
35 #include "msmpeg4.h"
36 #include "qpeldsp.h"
37 #include "wmv2.h"
38 #include <limits.h>
39 
41  uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
42  uint8_t **ref_picture)
43 {
44  uint8_t *ptr;
45  int src_x, src_y, motion_x, motion_y;
46  ptrdiff_t offset, linesize, uvlinesize;
47  int emu = 0;
48 
49  motion_x = s->sprite_offset[0][0];
50  motion_y = s->sprite_offset[0][1];
51  src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy + 1));
52  src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy + 1));
53  motion_x *= 1 << (3 - s->sprite_warping_accuracy);
54  motion_y *= 1 << (3 - s->sprite_warping_accuracy);
55  src_x = av_clip(src_x, -16, s->width);
56  if (src_x == s->width)
57  motion_x = 0;
58  src_y = av_clip(src_y, -16, s->height);
59  if (src_y == s->height)
60  motion_y = 0;
61 
62  linesize = s->linesize;
63  uvlinesize = s->uvlinesize;
64 
65  ptr = ref_picture[0] + src_y * linesize + src_x;
66 
67  if ((unsigned)src_x >= FFMAX(s->h_edge_pos - 17, 0) ||
68  (unsigned)src_y >= FFMAX(s->v_edge_pos - 17, 0)) {
70  linesize, linesize,
71  17, 17,
72  src_x, src_y,
73  s->h_edge_pos, s->v_edge_pos);
74  ptr = s->sc.edge_emu_buffer;
75  }
76 
77  if ((motion_x | motion_y) & 7) {
78  s->mdsp.gmc1(dest_y, ptr, linesize, 16,
79  motion_x & 15, motion_y & 15, 128 - s->no_rounding);
80  s->mdsp.gmc1(dest_y + 8, ptr + 8, linesize, 16,
81  motion_x & 15, motion_y & 15, 128 - s->no_rounding);
82  } else {
83  int dxy;
84 
85  dxy = ((motion_x >> 3) & 1) | ((motion_y >> 2) & 2);
86  if (s->no_rounding) {
87  s->hdsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
88  } else {
89  s->hdsp.put_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
90  }
91  }
92 
93  if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY)
94  return;
95 
96  motion_x = s->sprite_offset[1][0];
97  motion_y = s->sprite_offset[1][1];
98  src_x = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy + 1));
99  src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy + 1));
100  motion_x *= 1 << (3 - s->sprite_warping_accuracy);
101  motion_y *= 1 << (3 - s->sprite_warping_accuracy);
102  src_x = av_clip(src_x, -8, s->width >> 1);
103  if (src_x == s->width >> 1)
104  motion_x = 0;
105  src_y = av_clip(src_y, -8, s->height >> 1);
106  if (src_y == s->height >> 1)
107  motion_y = 0;
108 
109  offset = (src_y * uvlinesize) + src_x;
110  ptr = ref_picture[1] + offset;
111  if ((unsigned)src_x >= FFMAX((s->h_edge_pos >> 1) - 9, 0) ||
112  (unsigned)src_y >= FFMAX((s->v_edge_pos >> 1) - 9, 0)) {
114  uvlinesize, uvlinesize,
115  9, 9,
116  src_x, src_y,
117  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
118  ptr = s->sc.edge_emu_buffer;
119  emu = 1;
120  }
121  s->mdsp.gmc1(dest_cb, ptr, uvlinesize, 8,
122  motion_x & 15, motion_y & 15, 128 - s->no_rounding);
123 
124  ptr = ref_picture[2] + offset;
125  if (emu) {
127  uvlinesize, uvlinesize,
128  9, 9,
129  src_x, src_y,
130  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
131  ptr = s->sc.edge_emu_buffer;
132  }
133  s->mdsp.gmc1(dest_cr, ptr, uvlinesize, 8,
134  motion_x & 15, motion_y & 15, 128 - s->no_rounding);
135 }
136 
138  uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
139  uint8_t **ref_picture)
140 {
141  uint8_t *ptr;
142  int linesize, uvlinesize;
143  const int a = s->sprite_warping_accuracy;
144  int ox, oy;
145 
146  linesize = s->linesize;
147  uvlinesize = s->uvlinesize;
148 
149  ptr = ref_picture[0];
150 
151  ox = s->sprite_offset[0][0] + s->sprite_delta[0][0] * s->mb_x * 16 +
152  s->sprite_delta[0][1] * s->mb_y * 16;
153  oy = s->sprite_offset[0][1] + s->sprite_delta[1][0] * s->mb_x * 16 +
154  s->sprite_delta[1][1] * s->mb_y * 16;
155 
156  s->mdsp.gmc(dest_y, ptr, linesize, 16,
157  ox, oy,
158  s->sprite_delta[0][0], s->sprite_delta[0][1],
159  s->sprite_delta[1][0], s->sprite_delta[1][1],
160  a + 1, (1 << (2 * a + 1)) - s->no_rounding,
161  s->h_edge_pos, s->v_edge_pos);
162  s->mdsp.gmc(dest_y + 8, ptr, linesize, 16,
163  ox + s->sprite_delta[0][0] * 8,
164  oy + s->sprite_delta[1][0] * 8,
165  s->sprite_delta[0][0], s->sprite_delta[0][1],
166  s->sprite_delta[1][0], s->sprite_delta[1][1],
167  a + 1, (1 << (2 * a + 1)) - s->no_rounding,
168  s->h_edge_pos, s->v_edge_pos);
169 
170  if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY)
171  return;
172 
173  ox = s->sprite_offset[1][0] + s->sprite_delta[0][0] * s->mb_x * 8 +
174  s->sprite_delta[0][1] * s->mb_y * 8;
175  oy = s->sprite_offset[1][1] + s->sprite_delta[1][0] * s->mb_x * 8 +
176  s->sprite_delta[1][1] * s->mb_y * 8;
177 
178  ptr = ref_picture[1];
179  s->mdsp.gmc(dest_cb, ptr, uvlinesize, 8,
180  ox, oy,
181  s->sprite_delta[0][0], s->sprite_delta[0][1],
182  s->sprite_delta[1][0], s->sprite_delta[1][1],
183  a + 1, (1 << (2 * a + 1)) - s->no_rounding,
184  (s->h_edge_pos + 1) >> 1, (s->v_edge_pos + 1) >> 1);
185 
186  ptr = ref_picture[2];
187  s->mdsp.gmc(dest_cr, ptr, uvlinesize, 8,
188  ox, oy,
189  s->sprite_delta[0][0], s->sprite_delta[0][1],
190  s->sprite_delta[1][0], s->sprite_delta[1][1],
191  a + 1, (1 << (2 * a + 1)) - s->no_rounding,
192  (s->h_edge_pos + 1) >> 1, (s->v_edge_pos + 1) >> 1);
193 }
194 
195 static inline int hpel_motion(MpegEncContext *s,
196  uint8_t *dest, uint8_t *src,
197  int src_x, int src_y,
198  op_pixels_func *pix_op,
199  int motion_x, int motion_y)
200 {
201  int dxy = 0;
202  int emu = 0;
203 
204  src_x += motion_x >> 1;
205  src_y += motion_y >> 1;
206 
207  /* WARNING: do no forget half pels */
208  src_x = av_clip(src_x, -16, s->width); // FIXME unneeded for emu?
209  if (src_x != s->width)
210  dxy |= motion_x & 1;
211  src_y = av_clip(src_y, -16, s->height);
212  if (src_y != s->height)
213  dxy |= (motion_y & 1) << 1;
214  src += src_y * s->linesize + src_x;
215 
216  if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 1) - 7, 0) ||
217  (unsigned)src_y >= FFMAX(s->v_edge_pos - (motion_y & 1) - 7, 0)) {
219  s->linesize, s->linesize,
220  9, 9,
221  src_x, src_y,
222  s->h_edge_pos, s->v_edge_pos);
223  src = s->sc.edge_emu_buffer;
224  emu = 1;
225  }
226  pix_op[dxy](dest, src, s->linesize, 8);
227  return emu;
228 }
229 
230 static av_always_inline
232  uint8_t *dest_y,
233  uint8_t *dest_cb,
234  uint8_t *dest_cr,
235  int field_based,
236  int bottom_field,
237  int field_select,
238  uint8_t **ref_picture,
239  op_pixels_func (*pix_op)[4],
240  int motion_x,
241  int motion_y,
242  int h,
243  int is_mpeg12,
244  int is_16x8,
245  int mb_y)
246 {
247  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
248  int dxy, uvdxy, mx, my, src_x, src_y,
249  uvsrc_x, uvsrc_y, v_edge_pos, block_y_half;
250  ptrdiff_t uvlinesize, linesize;
251 
252  v_edge_pos = s->v_edge_pos >> field_based;
253  linesize = s->current_picture.f->linesize[0] << field_based;
254  uvlinesize = s->current_picture.f->linesize[1] << field_based;
255  block_y_half = (field_based | is_16x8);
256 
257  dxy = ((motion_y & 1) << 1) | (motion_x & 1);
258  src_x = s->mb_x * 16 + (motion_x >> 1);
259  src_y = (mb_y << (4 - block_y_half)) + (motion_y >> 1);
260 
261  if (!is_mpeg12 && s->out_format == FMT_H263) {
262  if ((s->workaround_bugs & FF_BUG_HPEL_CHROMA) && field_based) {
263  mx = (motion_x >> 1) | (motion_x & 1);
264  my = motion_y >> 1;
265  uvdxy = ((my & 1) << 1) | (mx & 1);
266  uvsrc_x = s->mb_x * 8 + (mx >> 1);
267  uvsrc_y = (mb_y << (3 - block_y_half)) + (my >> 1);
268  } else {
269  uvdxy = dxy | (motion_y & 2) | ((motion_x & 2) >> 1);
270  uvsrc_x = src_x >> 1;
271  uvsrc_y = src_y >> 1;
272  }
273  // Even chroma mv's are full pel in H261
274  } else if (!is_mpeg12 && s->out_format == FMT_H261) {
275  mx = motion_x / 4;
276  my = motion_y / 4;
277  uvdxy = 0;
278  uvsrc_x = s->mb_x * 8 + mx;
279  uvsrc_y = mb_y * 8 + my;
280  } else {
281  if (s->chroma_y_shift) {
282  mx = motion_x / 2;
283  my = motion_y / 2;
284  uvdxy = ((my & 1) << 1) | (mx & 1);
285  uvsrc_x = s->mb_x * 8 + (mx >> 1);
286  uvsrc_y = (mb_y << (3 - block_y_half)) + (my >> 1);
287  } else {
288  if (s->chroma_x_shift) {
289  // Chroma422
290  mx = motion_x / 2;
291  uvdxy = ((motion_y & 1) << 1) | (mx & 1);
292  uvsrc_x = s->mb_x * 8 + (mx >> 1);
293  uvsrc_y = src_y;
294  } else {
295  // Chroma444
296  uvdxy = dxy;
297  uvsrc_x = src_x;
298  uvsrc_y = src_y;
299  }
300  }
301  }
302 
303  ptr_y = ref_picture[0] + src_y * linesize + src_x;
304  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
305  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
306 
307  if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 1) - 15 , 0) ||
308  (unsigned)src_y >= FFMAX( v_edge_pos - (motion_y & 1) - h + 1, 0)) {
309  if (is_mpeg12 ||
313  "MPEG motion vector out of boundary (%d %d)\n", src_x,
314  src_y);
315  return;
316  }
317  src_y = (unsigned)src_y << field_based;
319  s->linesize, s->linesize,
320  17, 17 + field_based,
321  src_x, src_y,
322  s->h_edge_pos, s->v_edge_pos);
323  ptr_y = s->sc.edge_emu_buffer;
324  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
325  uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
326  uint8_t *vbuf = ubuf + 10 * s->uvlinesize;
327  if (s->workaround_bugs & FF_BUG_IEDGE)
328  vbuf -= s->uvlinesize;
329  uvsrc_y = (unsigned)uvsrc_y << field_based;
330  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
331  s->uvlinesize, s->uvlinesize,
332  9, 9 + field_based,
333  uvsrc_x, uvsrc_y,
334  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
335  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
336  s->uvlinesize, s->uvlinesize,
337  9, 9 + field_based,
338  uvsrc_x, uvsrc_y,
339  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
340  ptr_cb = ubuf;
341  ptr_cr = vbuf;
342  }
343  }
344 
345  /* FIXME use this for field pix too instead of the obnoxious hack which
346  * changes picture.data */
347  if (bottom_field) {
348  dest_y += s->linesize;
349  dest_cb += s->uvlinesize;
350  dest_cr += s->uvlinesize;
351  }
352 
353  if (field_select) {
354  ptr_y += s->linesize;
355  ptr_cb += s->uvlinesize;
356  ptr_cr += s->uvlinesize;
357  }
358 
359  pix_op[0][dxy](dest_y, ptr_y, linesize, h);
360 
361  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
362  pix_op[s->chroma_x_shift][uvdxy]
363  (dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift);
364  pix_op[s->chroma_x_shift][uvdxy]
365  (dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift);
366  }
367  if (!is_mpeg12 && (CONFIG_H261_ENCODER || CONFIG_H261_DECODER) &&
368  s->out_format == FMT_H261) {
370  }
371 }
372 /* apply one mpeg motion vector to the three components */
374  uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
375  int field_select, uint8_t **ref_picture,
376  op_pixels_func (*pix_op)[4],
377  int motion_x, int motion_y, int h, int is_16x8, int mb_y)
378 {
379 #if !CONFIG_SMALL
380  if (s->out_format == FMT_MPEG1)
381  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 0, 0,
382  field_select, ref_picture, pix_op,
383  motion_x, motion_y, h, 1, is_16x8, mb_y);
384  else
385 #endif
386  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 0, 0,
387  field_select, ref_picture, pix_op,
388  motion_x, motion_y, h, 0, is_16x8, mb_y);
389 }
390 
391 static void mpeg_motion_field(MpegEncContext *s, uint8_t *dest_y,
392  uint8_t *dest_cb, uint8_t *dest_cr,
393  int bottom_field, int field_select,
394  uint8_t **ref_picture,
395  op_pixels_func (*pix_op)[4],
396  int motion_x, int motion_y, int h, int mb_y)
397 {
398 #if !CONFIG_SMALL
399  if (s->out_format == FMT_MPEG1)
400  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 1,
401  bottom_field, field_select, ref_picture, pix_op,
402  motion_x, motion_y, h, 1, 0, mb_y);
403  else
404 #endif
405  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 1,
406  bottom_field, field_select, ref_picture, pix_op,
407  motion_x, motion_y, h, 0, 0, mb_y);
408 }
409 
410 // FIXME: SIMDify, avg variant, 16x16 version
411 static inline void put_obmc(uint8_t *dst, uint8_t *src[5], int stride)
412 {
413  int x;
414  uint8_t *const top = src[1];
415  uint8_t *const left = src[2];
416  uint8_t *const mid = src[0];
417  uint8_t *const right = src[3];
418  uint8_t *const bottom = src[4];
419 #define OBMC_FILTER(x, t, l, m, r, b)\
420  dst[x]= (t*top[x] + l*left[x] + m*mid[x] + r*right[x] + b*bottom[x] + 4)>>3
421 #define OBMC_FILTER4(x, t, l, m, r, b)\
422  OBMC_FILTER(x , t, l, m, r, b);\
423  OBMC_FILTER(x+1 , t, l, m, r, b);\
424  OBMC_FILTER(x +stride, t, l, m, r, b);\
425  OBMC_FILTER(x+1+stride, t, l, m, r, b);
426 
427  x = 0;
428  OBMC_FILTER (x , 2, 2, 4, 0, 0);
429  OBMC_FILTER (x + 1, 2, 1, 5, 0, 0);
430  OBMC_FILTER4(x + 2, 2, 1, 5, 0, 0);
431  OBMC_FILTER4(x + 4, 2, 0, 5, 1, 0);
432  OBMC_FILTER (x + 6, 2, 0, 5, 1, 0);
433  OBMC_FILTER (x + 7, 2, 0, 4, 2, 0);
434  x += stride;
435  OBMC_FILTER (x , 1, 2, 5, 0, 0);
436  OBMC_FILTER (x + 1, 1, 2, 5, 0, 0);
437  OBMC_FILTER (x + 6, 1, 0, 5, 2, 0);
438  OBMC_FILTER (x + 7, 1, 0, 5, 2, 0);
439  x += stride;
440  OBMC_FILTER4(x , 1, 2, 5, 0, 0);
441  OBMC_FILTER4(x + 2, 1, 1, 6, 0, 0);
442  OBMC_FILTER4(x + 4, 1, 0, 6, 1, 0);
443  OBMC_FILTER4(x + 6, 1, 0, 5, 2, 0);
444  x += 2 * stride;
445  OBMC_FILTER4(x , 0, 2, 5, 0, 1);
446  OBMC_FILTER4(x + 2, 0, 1, 6, 0, 1);
447  OBMC_FILTER4(x + 4, 0, 0, 6, 1, 1);
448  OBMC_FILTER4(x + 6, 0, 0, 5, 2, 1);
449  x += 2*stride;
450  OBMC_FILTER (x , 0, 2, 5, 0, 1);
451  OBMC_FILTER (x + 1, 0, 2, 5, 0, 1);
452  OBMC_FILTER4(x + 2, 0, 1, 5, 0, 2);
453  OBMC_FILTER4(x + 4, 0, 0, 5, 1, 2);
454  OBMC_FILTER (x + 6, 0, 0, 5, 2, 1);
455  OBMC_FILTER (x + 7, 0, 0, 5, 2, 1);
456  x += stride;
457  OBMC_FILTER (x , 0, 2, 4, 0, 2);
458  OBMC_FILTER (x + 1, 0, 1, 5, 0, 2);
459  OBMC_FILTER (x + 6, 0, 0, 5, 1, 2);
460  OBMC_FILTER (x + 7, 0, 0, 4, 2, 2);
461 }
462 
463 /* obmc for 1 8x8 luma block */
464 static inline void obmc_motion(MpegEncContext *s,
465  uint8_t *dest, uint8_t *src,
466  int src_x, int src_y,
467  op_pixels_func *pix_op,
468  int16_t mv[5][2] /* mid top left right bottom */)
469 #define MID 0
470 {
471  int i;
472  uint8_t *ptr[5];
473 
474  av_assert2(s->quarter_sample == 0);
475 
476  for (i = 0; i < 5; i++) {
477  if (i && mv[i][0] == mv[MID][0] && mv[i][1] == mv[MID][1]) {
478  ptr[i] = ptr[MID];
479  } else {
480  ptr[i] = s->sc.obmc_scratchpad + 8 * (i & 1) +
481  s->linesize * 8 * (i >> 1);
482  hpel_motion(s, ptr[i], src, src_x, src_y, pix_op,
483  mv[i][0], mv[i][1]);
484  }
485  }
486 
487  put_obmc(dest, ptr, s->linesize);
488 }
489 
490 static inline void qpel_motion(MpegEncContext *s,
491  uint8_t *dest_y,
492  uint8_t *dest_cb,
493  uint8_t *dest_cr,
494  int field_based, int bottom_field,
495  int field_select, uint8_t **ref_picture,
496  op_pixels_func (*pix_op)[4],
497  qpel_mc_func (*qpix_op)[16],
498  int motion_x, int motion_y, int h)
499 {
500  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
501  int dxy, uvdxy, mx, my, src_x, src_y, uvsrc_x, uvsrc_y, v_edge_pos;
502  ptrdiff_t linesize, uvlinesize;
503 
504  dxy = ((motion_y & 3) << 2) | (motion_x & 3);
505 
506  src_x = s->mb_x * 16 + (motion_x >> 2);
507  src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2);
508 
509  v_edge_pos = s->v_edge_pos >> field_based;
510  linesize = s->linesize << field_based;
511  uvlinesize = s->uvlinesize << field_based;
512 
513  if (field_based) {
514  mx = motion_x / 2;
515  my = motion_y >> 1;
516  } else if (s->workaround_bugs & FF_BUG_QPEL_CHROMA2) {
517  static const int rtab[8] = { 0, 0, 1, 1, 0, 0, 0, 1 };
518  mx = (motion_x >> 1) + rtab[motion_x & 7];
519  my = (motion_y >> 1) + rtab[motion_y & 7];
520  } else if (s->workaround_bugs & FF_BUG_QPEL_CHROMA) {
521  mx = (motion_x >> 1) | (motion_x & 1);
522  my = (motion_y >> 1) | (motion_y & 1);
523  } else {
524  mx = motion_x / 2;
525  my = motion_y / 2;
526  }
527  mx = (mx >> 1) | (mx & 1);
528  my = (my >> 1) | (my & 1);
529 
530  uvdxy = (mx & 1) | ((my & 1) << 1);
531  mx >>= 1;
532  my >>= 1;
533 
534  uvsrc_x = s->mb_x * 8 + mx;
535  uvsrc_y = s->mb_y * (8 >> field_based) + my;
536 
537  ptr_y = ref_picture[0] + src_y * linesize + src_x;
538  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
539  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
540 
541  if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 3) - 15 , 0) ||
542  (unsigned)src_y >= FFMAX( v_edge_pos - (motion_y & 3) - h + 1, 0)) {
544  s->linesize, s->linesize,
545  17, 17 + field_based,
546  src_x, src_y * (1 << field_based),
547  s->h_edge_pos, s->v_edge_pos);
548  ptr_y = s->sc.edge_emu_buffer;
549  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
550  uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
551  uint8_t *vbuf = ubuf + 10 * s->uvlinesize;
552  if (s->workaround_bugs & FF_BUG_IEDGE)
553  vbuf -= s->uvlinesize;
554  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
555  s->uvlinesize, s->uvlinesize,
556  9, 9 + field_based,
557  uvsrc_x, uvsrc_y * (1 << field_based),
558  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
559  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
560  s->uvlinesize, s->uvlinesize,
561  9, 9 + field_based,
562  uvsrc_x, uvsrc_y * (1 << field_based),
563  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
564  ptr_cb = ubuf;
565  ptr_cr = vbuf;
566  }
567  }
568 
569  if (!field_based)
570  qpix_op[0][dxy](dest_y, ptr_y, linesize);
571  else {
572  if (bottom_field) {
573  dest_y += s->linesize;
574  dest_cb += s->uvlinesize;
575  dest_cr += s->uvlinesize;
576  }
577 
578  if (field_select) {
579  ptr_y += s->linesize;
580  ptr_cb += s->uvlinesize;
581  ptr_cr += s->uvlinesize;
582  }
583  // damn interlaced mode
584  // FIXME boundary mirroring is not exactly correct here
585  qpix_op[1][dxy](dest_y, ptr_y, linesize);
586  qpix_op[1][dxy](dest_y + 8, ptr_y + 8, linesize);
587  }
588  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
589  pix_op[1][uvdxy](dest_cr, ptr_cr, uvlinesize, h >> 1);
590  pix_op[1][uvdxy](dest_cb, ptr_cb, uvlinesize, h >> 1);
591  }
592 }
593 
594 /**
595  * H.263 chroma 4mv motion compensation.
596  */
598  uint8_t *dest_cb, uint8_t *dest_cr,
599  uint8_t **ref_picture,
600  op_pixels_func *pix_op,
601  int mx, int my)
602 {
603  uint8_t *ptr;
604  int src_x, src_y, dxy, emu = 0;
605  ptrdiff_t offset;
606 
607  /* In case of 8X8, we construct a single chroma motion vector
608  * with a special rounding */
609  mx = ff_h263_round_chroma(mx);
610  my = ff_h263_round_chroma(my);
611 
612  dxy = ((my & 1) << 1) | (mx & 1);
613  mx >>= 1;
614  my >>= 1;
615 
616  src_x = s->mb_x * 8 + mx;
617  src_y = s->mb_y * 8 + my;
618  src_x = av_clip(src_x, -8, (s->width >> 1));
619  if (src_x == (s->width >> 1))
620  dxy &= ~1;
621  src_y = av_clip(src_y, -8, (s->height >> 1));
622  if (src_y == (s->height >> 1))
623  dxy &= ~2;
624 
625  offset = src_y * s->uvlinesize + src_x;
626  ptr = ref_picture[1] + offset;
627  if ((unsigned)src_x >= FFMAX((s->h_edge_pos >> 1) - (dxy & 1) - 7, 0) ||
628  (unsigned)src_y >= FFMAX((s->v_edge_pos >> 1) - (dxy >> 1) - 7, 0)) {
630  s->uvlinesize, s->uvlinesize,
631  9, 9, src_x, src_y,
632  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
633  ptr = s->sc.edge_emu_buffer;
634  emu = 1;
635  }
636  pix_op[dxy](dest_cb, ptr, s->uvlinesize, 8);
637 
638  ptr = ref_picture[2] + offset;
639  if (emu) {
641  s->uvlinesize, s->uvlinesize,
642  9, 9, src_x, src_y,
643  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
644  ptr = s->sc.edge_emu_buffer;
645  }
646  pix_op[dxy](dest_cr, ptr, s->uvlinesize, 8);
647 }
648 
649 static inline void prefetch_motion(MpegEncContext *s, uint8_t **pix, int dir)
650 {
651  /* fetch pixels for estimated mv 4 macroblocks ahead
652  * optimized for 64byte cache lines */
653  const int shift = s->quarter_sample ? 2 : 1;
654  const int mx = (s->mv[dir][0][0] >> shift) + 16 * s->mb_x + 8;
655  const int my = (s->mv[dir][0][1] >> shift) + 16 * s->mb_y;
656  int off = mx + (my + (s->mb_x & 3) * 4) * s->linesize + 64;
657 
658  s->vdsp.prefetch(pix[0] + off, s->linesize, 4);
659  off = (mx >> 1) + ((my >> 1) + (s->mb_x & 7)) * s->uvlinesize + 64;
660  s->vdsp.prefetch(pix[1] + off, pix[2] - pix[1], 2);
661 }
662 
663 static inline void apply_obmc(MpegEncContext *s,
664  uint8_t *dest_y,
665  uint8_t *dest_cb,
666  uint8_t *dest_cr,
667  uint8_t **ref_picture,
668  op_pixels_func (*pix_op)[4])
669 {
670  LOCAL_ALIGNED_8(int16_t, mv_cache, [4], [4][2]);
671  Picture *cur_frame = &s->current_picture;
672  int mb_x = s->mb_x;
673  int mb_y = s->mb_y;
674  const int xy = mb_x + mb_y * s->mb_stride;
675  const int mot_stride = s->b8_stride;
676  const int mot_xy = mb_x * 2 + mb_y * 2 * mot_stride;
677  int mx, my, i;
678 
679  av_assert2(!s->mb_skipped);
680 
681  AV_COPY32(mv_cache[1][1], cur_frame->motion_val[0][mot_xy]);
682  AV_COPY32(mv_cache[1][2], cur_frame->motion_val[0][mot_xy + 1]);
683 
684  AV_COPY32(mv_cache[2][1],
685  cur_frame->motion_val[0][mot_xy + mot_stride]);
686  AV_COPY32(mv_cache[2][2],
687  cur_frame->motion_val[0][mot_xy + mot_stride + 1]);
688 
689  AV_COPY32(mv_cache[3][1],
690  cur_frame->motion_val[0][mot_xy + mot_stride]);
691  AV_COPY32(mv_cache[3][2],
692  cur_frame->motion_val[0][mot_xy + mot_stride + 1]);
693 
694  if (mb_y == 0 || IS_INTRA(cur_frame->mb_type[xy - s->mb_stride])) {
695  AV_COPY32(mv_cache[0][1], mv_cache[1][1]);
696  AV_COPY32(mv_cache[0][2], mv_cache[1][2]);
697  } else {
698  AV_COPY32(mv_cache[0][1],
699  cur_frame->motion_val[0][mot_xy - mot_stride]);
700  AV_COPY32(mv_cache[0][2],
701  cur_frame->motion_val[0][mot_xy - mot_stride + 1]);
702  }
703 
704  if (mb_x == 0 || IS_INTRA(cur_frame->mb_type[xy - 1])) {
705  AV_COPY32(mv_cache[1][0], mv_cache[1][1]);
706  AV_COPY32(mv_cache[2][0], mv_cache[2][1]);
707  } else {
708  AV_COPY32(mv_cache[1][0], cur_frame->motion_val[0][mot_xy - 1]);
709  AV_COPY32(mv_cache[2][0],
710  cur_frame->motion_val[0][mot_xy - 1 + mot_stride]);
711  }
712 
713  if (mb_x + 1 >= s->mb_width || IS_INTRA(cur_frame->mb_type[xy + 1])) {
714  AV_COPY32(mv_cache[1][3], mv_cache[1][2]);
715  AV_COPY32(mv_cache[2][3], mv_cache[2][2]);
716  } else {
717  AV_COPY32(mv_cache[1][3], cur_frame->motion_val[0][mot_xy + 2]);
718  AV_COPY32(mv_cache[2][3],
719  cur_frame->motion_val[0][mot_xy + 2 + mot_stride]);
720  }
721 
722  mx = 0;
723  my = 0;
724  for (i = 0; i < 4; i++) {
725  const int x = (i & 1) + 1;
726  const int y = (i >> 1) + 1;
727  int16_t mv[5][2] = {
728  { mv_cache[y][x][0], mv_cache[y][x][1] },
729  { mv_cache[y - 1][x][0], mv_cache[y - 1][x][1] },
730  { mv_cache[y][x - 1][0], mv_cache[y][x - 1][1] },
731  { mv_cache[y][x + 1][0], mv_cache[y][x + 1][1] },
732  { mv_cache[y + 1][x][0], mv_cache[y + 1][x][1] }
733  };
734  // FIXME cleanup
735  obmc_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
736  ref_picture[0],
737  mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >> 1) * 8,
738  pix_op[1],
739  mv);
740 
741  mx += mv[0][0];
742  my += mv[0][1];
743  }
744  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
745  chroma_4mv_motion(s, dest_cb, dest_cr,
746  ref_picture, pix_op[1],
747  mx, my);
748 }
749 
750 static inline void apply_8x8(MpegEncContext *s,
751  uint8_t *dest_y,
752  uint8_t *dest_cb,
753  uint8_t *dest_cr,
754  int dir,
755  uint8_t **ref_picture,
756  qpel_mc_func (*qpix_op)[16],
757  op_pixels_func (*pix_op)[4])
758 {
759  int dxy, mx, my, src_x, src_y;
760  int i;
761  int mb_x = s->mb_x;
762  int mb_y = s->mb_y;
763  uint8_t *ptr, *dest;
764 
765  mx = 0;
766  my = 0;
767  if (s->quarter_sample) {
768  for (i = 0; i < 4; i++) {
769  int motion_x = s->mv[dir][i][0];
770  int motion_y = s->mv[dir][i][1];
771 
772  dxy = ((motion_y & 3) << 2) | (motion_x & 3);
773  src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8;
774  src_y = mb_y * 16 + (motion_y >> 2) + (i >> 1) * 8;
775 
776  /* WARNING: do no forget half pels */
777  src_x = av_clip(src_x, -16, s->width);
778  if (src_x == s->width)
779  dxy &= ~3;
780  src_y = av_clip(src_y, -16, s->height);
781  if (src_y == s->height)
782  dxy &= ~12;
783 
784  ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
785  if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 3) - 7, 0) ||
786  (unsigned)src_y >= FFMAX(s->v_edge_pos - (motion_y & 3) - 7, 0)) {
788  s->linesize, s->linesize,
789  9, 9,
790  src_x, src_y,
791  s->h_edge_pos,
792  s->v_edge_pos);
793  ptr = s->sc.edge_emu_buffer;
794  }
795  dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
796  qpix_op[1][dxy](dest, ptr, s->linesize);
797 
798  mx += s->mv[dir][i][0] / 2;
799  my += s->mv[dir][i][1] / 2;
800  }
801  } else {
802  for (i = 0; i < 4; i++) {
803  hpel_motion(s,
804  dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
805  ref_picture[0],
806  mb_x * 16 + (i & 1) * 8,
807  mb_y * 16 + (i >> 1) * 8,
808  pix_op[1],
809  s->mv[dir][i][0],
810  s->mv[dir][i][1]);
811 
812  mx += s->mv[dir][i][0];
813  my += s->mv[dir][i][1];
814  }
815  }
816 
817  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
818  chroma_4mv_motion(s, dest_cb, dest_cr,
819  ref_picture, pix_op[1], mx, my);
820 }
821 
822 /**
823  * motion compensation of a single macroblock
824  * @param s context
825  * @param dest_y luma destination pointer
826  * @param dest_cb chroma cb/u destination pointer
827  * @param dest_cr chroma cr/v destination pointer
828  * @param dir direction (0->forward, 1->backward)
829  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
830  * @param pix_op halfpel motion compensation function (average or put normally)
831  * @param qpix_op qpel motion compensation function (average or put normally)
832  * the motion vectors are taken from s->mv and the MV type from s->mv_type
833  */
835  uint8_t *dest_y,
836  uint8_t *dest_cb,
837  uint8_t *dest_cr,
838  int dir,
839  uint8_t **ref_picture,
840  op_pixels_func (*pix_op)[4],
841  qpel_mc_func (*qpix_op)[16],
842  int is_mpeg12)
843 {
844  int i;
845  int mb_y = s->mb_y;
846 
847  prefetch_motion(s, ref_picture, dir);
848 
849  if (!is_mpeg12 && s->obmc && s->pict_type != AV_PICTURE_TYPE_B) {
850  apply_obmc(s, dest_y, dest_cb, dest_cr, ref_picture, pix_op);
851  return;
852  }
853 
854  switch (s->mv_type) {
855  case MV_TYPE_16X16:
856  if (s->mcsel) {
857  if (s->real_sprite_warping_points == 1) {
858  gmc1_motion(s, dest_y, dest_cb, dest_cr,
859  ref_picture);
860  } else {
861  gmc_motion(s, dest_y, dest_cb, dest_cr,
862  ref_picture);
863  }
864  } else if (!is_mpeg12 && s->quarter_sample) {
865  qpel_motion(s, dest_y, dest_cb, dest_cr,
866  0, 0, 0,
867  ref_picture, pix_op, qpix_op,
868  s->mv[dir][0][0], s->mv[dir][0][1], 16);
869  } else if (!is_mpeg12 && (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) &&
870  s->mspel && s->codec_id == AV_CODEC_ID_WMV2) {
871  ff_mspel_motion(s, dest_y, dest_cb, dest_cr,
872  ref_picture, pix_op,
873  s->mv[dir][0][0], s->mv[dir][0][1], 16);
874  } else {
875  mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
876  ref_picture, pix_op,
877  s->mv[dir][0][0], s->mv[dir][0][1], 16, 0, mb_y);
878  }
879  break;
880  case MV_TYPE_8X8:
881  if (!is_mpeg12)
882  apply_8x8(s, dest_y, dest_cb, dest_cr,
883  dir, ref_picture, qpix_op, pix_op);
884  break;
885  case MV_TYPE_FIELD:
886  if (s->picture_structure == PICT_FRAME) {
887  if (!is_mpeg12 && s->quarter_sample) {
888  for (i = 0; i < 2; i++)
889  qpel_motion(s, dest_y, dest_cb, dest_cr,
890  1, i, s->field_select[dir][i],
891  ref_picture, pix_op, qpix_op,
892  s->mv[dir][i][0], s->mv[dir][i][1], 8);
893  } else {
894  /* top field */
895  mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
896  0, s->field_select[dir][0],
897  ref_picture, pix_op,
898  s->mv[dir][0][0], s->mv[dir][0][1], 8, mb_y);
899  /* bottom field */
900  mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
901  1, s->field_select[dir][1],
902  ref_picture, pix_op,
903  s->mv[dir][1][0], s->mv[dir][1][1], 8, mb_y);
904  }
905  } else {
906  if ( s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field
907  || !ref_picture[0]) {
908  ref_picture = s->current_picture_ptr->f->data;
909  }
910 
911  mpeg_motion(s, dest_y, dest_cb, dest_cr,
912  s->field_select[dir][0],
913  ref_picture, pix_op,
914  s->mv[dir][0][0], s->mv[dir][0][1], 16, 0, mb_y >> 1);
915  }
916  break;
917  case MV_TYPE_16X8:
918  for (i = 0; i < 2; i++) {
919  uint8_t **ref2picture;
920 
921  if ((s->picture_structure == s->field_select[dir][i] + 1
922  || s->pict_type == AV_PICTURE_TYPE_B || s->first_field) && ref_picture[0]) {
923  ref2picture = ref_picture;
924  } else {
925  ref2picture = s->current_picture_ptr->f->data;
926  }
927 
928  mpeg_motion(s, dest_y, dest_cb, dest_cr,
929  s->field_select[dir][i],
930  ref2picture, pix_op,
931  s->mv[dir][i][0], s->mv[dir][i][1],
932  8, 1, (mb_y & ~1) + i);
933 
934  dest_y += 16 * s->linesize;
935  dest_cb += (16 >> s->chroma_y_shift) * s->uvlinesize;
936  dest_cr += (16 >> s->chroma_y_shift) * s->uvlinesize;
937  }
938  break;
939  case MV_TYPE_DMV:
940  if (s->picture_structure == PICT_FRAME) {
941  for (i = 0; i < 2; i++) {
942  int j;
943  for (j = 0; j < 2; j++)
944  mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
945  j, j ^ i, ref_picture, pix_op,
946  s->mv[dir][2 * i + j][0],
947  s->mv[dir][2 * i + j][1], 8, mb_y);
948  pix_op = s->hdsp.avg_pixels_tab;
949  }
950  } else {
951  if (!ref_picture[0]) {
952  ref_picture = s->current_picture_ptr->f->data;
953  }
954  for (i = 0; i < 2; i++) {
955  mpeg_motion(s, dest_y, dest_cb, dest_cr,
956  s->picture_structure != i + 1,
957  ref_picture, pix_op,
958  s->mv[dir][2 * i][0], s->mv[dir][2 * i][1],
959  16, 0, mb_y >> 1);
960 
961  // after put we make avg of the same block
962  pix_op = s->hdsp.avg_pixels_tab;
963 
964  /* opposite parity is always in the same frame if this is
965  * second field */
966  if (!s->first_field) {
967  ref_picture = s->current_picture_ptr->f->data;
968  }
969  }
970  }
971  break;
972  default: av_assert2(0);
973  }
974 }
975 
977  uint8_t *dest_y, uint8_t *dest_cb,
978  uint8_t *dest_cr, int dir,
979  uint8_t **ref_picture,
980  op_pixels_func (*pix_op)[4],
981  qpel_mc_func (*qpix_op)[16])
982 {
983 #if !CONFIG_SMALL
984  if (s->out_format == FMT_MPEG1)
985  mpv_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
986  ref_picture, pix_op, qpix_op, 1);
987  else
988 #endif
989  mpv_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
990  ref_picture, pix_op, qpix_op, 0);
991 }
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
static int shift(int a, int b)
Definition: sonic.c:82
void(* gmc1)(uint8_t *dst, uint8_t *src, int srcStride, int h, int x16, int y16, int rounder)
translational global motion compensation.
Definition: mpegvideodsp.h:32
static void gmc_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture)
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:269
int sprite_warping_accuracy
Definition: mpegvideo.h:403
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegpicture.h:36
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:68
MJPEG encoder.
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:132
enum AVCodecID codec_id
Definition: mpegvideo.h:112
#define FF_BUG_HPEL_CHROMA
Definition: avcodec.h:1578
int obmc
overlapped block motion compensation
Definition: mpegvideo.h:366
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:36
int real_sprite_warping_points
Definition: mpegvideo.h:396
mpegvideo header.
void(* prefetch)(uint8_t *buf, ptrdiff_t stride, int h)
Prefetch memory into cache (if supported by hardware).
Definition: videodsp.h:76
#define AV_COPY32(d, s)
Definition: intreadwrite.h:601
int chroma_x_shift
Definition: mpegvideo.h:489
int field_select[2][2]
Definition: mpegvideo.h:277
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:63
ScratchpadContext sc
Definition: mpegvideo.h:202
uint8_t
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
static void chroma_4mv_motion(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, op_pixels_func *pix_op, int mx, int my)
H.263 chroma 4mv motion compensation.
enum OutputFormat out_format
output format
Definition: mpegvideo.h:104
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
void(* gmc)(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy, int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
global motion compensation.
Definition: mpegvideodsp.h:37
int no_rounding
apply no rounding to motion compensation (MPEG-4, msmpeg4, ...) for B-frames rounding mode is always ...
Definition: mpegvideo.h:284
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:180
quarterpel DSP functions
#define FF_BUG_QPEL_CHROMA2
Definition: avcodec.h:1575
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:308
#define FF_BUG_IEDGE
Definition: avcodec.h:1582
#define av_log(a,...)
int sprite_offset[2][2]
sprite offset[isChroma][isMVY]
Definition: mpegvideo.h:397
#define MID
#define src
Definition: vp8dsp.c:255
static void prefetch_motion(MpegEncContext *s, uint8_t **pix, int dir)
int mb_skipped
MUST BE SET only during DECODING.
Definition: mpegvideo.h:195
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
int chroma_y_shift
Definition: mpegvideo.h:490
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:611
simple assert() macros that are a bit more flexible than ISO C assert().
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:401
#define FFMAX(a, b)
Definition: common.h:94
common internal API header
int sprite_delta[2][2]
sprite_delta [isY][isMVY]
Definition: mpegvideo.h:398
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:53
Picture * current_picture_ptr
pointer to the current picture
Definition: mpegvideo.h:184
Picture.
Definition: mpegpicture.h:45
#define s(width, name)
Definition: cbs_vp9.c:257
static void gmc1_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture)
#define FF_BUG_QPEL_CHROMA
Definition: avcodec.h:1573
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:51
#define OBMC_FILTER4(x, t, l, m, r, b)
int first_field
is 1 for the first field of a field picture 0 otherwise
Definition: mpegvideo.h:495
static const int8_t mv[256][2]
Definition: 4xm.c:78
static av_always_inline void mpv_motion_internal(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16], int is_mpeg12)
motion compensation of a single macroblock
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:266
Libavcodec external API header.
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:134
static void mpeg_motion_field(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int bottom_field, int field_select, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h, int mb_y)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:345
#define LOCAL_ALIGNED_8(t, v,...)
Definition: mem_internal.h:124
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:100
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:206
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:268
static av_always_inline void mpeg_motion_internal(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h, int is_mpeg12, int is_16x8, int mb_y)
H.261 codec.
op_pixels_func put_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)>>1.
Definition: hpeldsp.h:82
struct AVFrame * f
Definition: mpegpicture.h:46
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:135
static int ff_h263_round_chroma(int x)
Definition: motion_est.h:101
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:212
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:328
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
Definition: mpegvideo.h:276
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
Definition: mpegvideo.h:131
MpegEncContext.
Definition: mpegvideo.h:81
struct AVCodecContext * avctx
Definition: mpegvideo.h:98
static void qpel_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16], int motion_x, int motion_y, int h)
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
MpegVideoDSPContext mdsp
Definition: mpegvideo.h:232
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
Definition: mpegvideo.h:130
void ff_mspel_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h)
Definition: wmv2.c:100
uint8_t * dest[3]
Definition: mpegvideo.h:295
Bi-dir predicted.
Definition: avutil.h:276
#define IS_INTRA(x, y)
#define PICT_FRAME
Definition: mpegutils.h:39
int picture_structure
Definition: mpegvideo.h:464
static void mpeg_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_select, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h, int is_16x8, int mb_y)
VideoDSPContext vdsp
Definition: mpegvideo.h:236
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:270
static void apply_obmc(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, op_pixels_func(*pix_op)[4])
uint8_t * obmc_scratchpad
Definition: mpegpicture.h:38
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegpicture.h:56
int workaround_bugs
workaround bugs in encoders which cannot be detected automatically
Definition: mpegvideo.h:119
#define av_always_inline
Definition: attributes.h:45
void ff_h261_loop_filter(MpegEncContext *s)
Definition: h261.c:61
static int hpel_motion(MpegEncContext *s, uint8_t *dest, uint8_t *src, int src_x, int src_y, op_pixels_func *pix_op, int motion_x, int motion_y)
#define stride
static void obmc_motion(MpegEncContext *s, uint8_t *dest, uint8_t *src, int src_x, int src_y, op_pixels_func *pix_op, int16_t mv[5][2])
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:267
static void apply_8x8(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, qpel_mc_func(*qpix_op)[16], op_pixels_func(*pix_op)[4])
static void put_obmc(uint8_t *dst, uint8_t *src[5], int stride)
#define OBMC_FILTER(x, t, l, m, r, b)
int i
Definition: input.c:407
HpelDSPContext hdsp
Definition: mpegvideo.h:229