FFmpeg
mpegvideo_motion.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000,2001 Fabrice Bellard
3  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include "config_components.h"
25 
26 #include "libavutil/avassert.h"
27 #include "libavutil/internal.h"
28 #include "libavutil/mem_internal.h"
29 
30 #include "avcodec.h"
31 #include "h261.h"
32 #include "mpegutils.h"
33 #include "mpegvideo.h"
34 #include "qpeldsp.h"
35 #include "wmv2.h"
36 
38  uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
39  uint8_t *const *ref_picture)
40 {
41  const uint8_t *ptr;
42  int src_x, src_y, motion_x, motion_y;
43  ptrdiff_t offset, linesize, uvlinesize;
44  int emu = 0;
45 
46  motion_x = s->sprite_offset[0][0];
47  motion_y = s->sprite_offset[0][1];
48  src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy + 1));
49  src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy + 1));
50  motion_x *= 1 << (3 - s->sprite_warping_accuracy);
51  motion_y *= 1 << (3 - s->sprite_warping_accuracy);
52  src_x = av_clip(src_x, -16, s->width);
53  if (src_x == s->width)
54  motion_x = 0;
55  src_y = av_clip(src_y, -16, s->height);
56  if (src_y == s->height)
57  motion_y = 0;
58 
59  linesize = s->linesize;
60  uvlinesize = s->uvlinesize;
61 
62  ptr = ref_picture[0] + src_y * linesize + src_x;
63 
64  if ((unsigned)src_x >= FFMAX(s->h_edge_pos - 17, 0) ||
65  (unsigned)src_y >= FFMAX(s->v_edge_pos - 17, 0)) {
66  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
68  17, 17,
69  src_x, src_y,
70  s->h_edge_pos, s->v_edge_pos);
71  ptr = s->sc.edge_emu_buffer;
72  }
73 
74  if ((motion_x | motion_y) & 7) {
75  s->mdsp.gmc1(dest_y, ptr, linesize, 16,
76  motion_x & 15, motion_y & 15, 128 - s->no_rounding);
77  s->mdsp.gmc1(dest_y + 8, ptr + 8, linesize, 16,
78  motion_x & 15, motion_y & 15, 128 - s->no_rounding);
79  } else {
80  int dxy;
81 
82  dxy = ((motion_x >> 3) & 1) | ((motion_y >> 2) & 2);
83  if (s->no_rounding) {
84  s->hdsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
85  } else {
86  s->hdsp.put_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
87  }
88  }
89 
90  if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY)
91  return;
92 
93  motion_x = s->sprite_offset[1][0];
94  motion_y = s->sprite_offset[1][1];
95  src_x = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy + 1));
96  src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy + 1));
97  motion_x *= 1 << (3 - s->sprite_warping_accuracy);
98  motion_y *= 1 << (3 - s->sprite_warping_accuracy);
99  src_x = av_clip(src_x, -8, s->width >> 1);
100  if (src_x == s->width >> 1)
101  motion_x = 0;
102  src_y = av_clip(src_y, -8, s->height >> 1);
103  if (src_y == s->height >> 1)
104  motion_y = 0;
105 
106  offset = (src_y * uvlinesize) + src_x;
107  ptr = ref_picture[1] + offset;
108  if ((unsigned)src_x >= FFMAX((s->h_edge_pos >> 1) - 9, 0) ||
109  (unsigned)src_y >= FFMAX((s->v_edge_pos >> 1) - 9, 0)) {
110  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
112  9, 9,
113  src_x, src_y,
114  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
115  ptr = s->sc.edge_emu_buffer;
116  emu = 1;
117  }
118  s->mdsp.gmc1(dest_cb, ptr, uvlinesize, 8,
119  motion_x & 15, motion_y & 15, 128 - s->no_rounding);
120 
121  ptr = ref_picture[2] + offset;
122  if (emu) {
123  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
125  9, 9,
126  src_x, src_y,
127  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
128  ptr = s->sc.edge_emu_buffer;
129  }
130  s->mdsp.gmc1(dest_cr, ptr, uvlinesize, 8,
131  motion_x & 15, motion_y & 15, 128 - s->no_rounding);
132 }
133 
135  uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
136  uint8_t *const *ref_picture)
137 {
138  const uint8_t *ptr;
139  int linesize, uvlinesize;
140  const int a = s->sprite_warping_accuracy;
141  int ox, oy;
142 
143  linesize = s->linesize;
144  uvlinesize = s->uvlinesize;
145 
146  ptr = ref_picture[0];
147 
148  ox = s->sprite_offset[0][0] + s->sprite_delta[0][0] * s->mb_x * 16 +
149  s->sprite_delta[0][1] * s->mb_y * 16;
150  oy = s->sprite_offset[0][1] + s->sprite_delta[1][0] * s->mb_x * 16 +
151  s->sprite_delta[1][1] * s->mb_y * 16;
152 
153  s->mdsp.gmc(dest_y, ptr, linesize, 16,
154  ox, oy,
155  s->sprite_delta[0][0], s->sprite_delta[0][1],
156  s->sprite_delta[1][0], s->sprite_delta[1][1],
157  a + 1, (1 << (2 * a + 1)) - s->no_rounding,
158  s->h_edge_pos, s->v_edge_pos);
159  s->mdsp.gmc(dest_y + 8, ptr, linesize, 16,
160  ox + s->sprite_delta[0][0] * 8,
161  oy + s->sprite_delta[1][0] * 8,
162  s->sprite_delta[0][0], s->sprite_delta[0][1],
163  s->sprite_delta[1][0], s->sprite_delta[1][1],
164  a + 1, (1 << (2 * a + 1)) - s->no_rounding,
165  s->h_edge_pos, s->v_edge_pos);
166 
167  if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY)
168  return;
169 
170  ox = s->sprite_offset[1][0] + s->sprite_delta[0][0] * s->mb_x * 8 +
171  s->sprite_delta[0][1] * s->mb_y * 8;
172  oy = s->sprite_offset[1][1] + s->sprite_delta[1][0] * s->mb_x * 8 +
173  s->sprite_delta[1][1] * s->mb_y * 8;
174 
175  ptr = ref_picture[1];
176  s->mdsp.gmc(dest_cb, ptr, uvlinesize, 8,
177  ox, oy,
178  s->sprite_delta[0][0], s->sprite_delta[0][1],
179  s->sprite_delta[1][0], s->sprite_delta[1][1],
180  a + 1, (1 << (2 * a + 1)) - s->no_rounding,
181  (s->h_edge_pos + 1) >> 1, (s->v_edge_pos + 1) >> 1);
182 
183  ptr = ref_picture[2];
184  s->mdsp.gmc(dest_cr, ptr, uvlinesize, 8,
185  ox, oy,
186  s->sprite_delta[0][0], s->sprite_delta[0][1],
187  s->sprite_delta[1][0], s->sprite_delta[1][1],
188  a + 1, (1 << (2 * a + 1)) - s->no_rounding,
189  (s->h_edge_pos + 1) >> 1, (s->v_edge_pos + 1) >> 1);
190 }
191 
192 static inline int hpel_motion(MpegEncContext *s,
193  uint8_t *dest, uint8_t *src,
194  int src_x, int src_y,
195  op_pixels_func *pix_op,
196  int motion_x, int motion_y)
197 {
198  int dxy = 0;
199  int emu = 0;
200 
201  src_x += motion_x >> 1;
202  src_y += motion_y >> 1;
203 
204  /* WARNING: do no forget half pels */
205  src_x = av_clip(src_x, -16, s->width); // FIXME unneeded for emu?
206  if (src_x != s->width)
207  dxy |= motion_x & 1;
208  src_y = av_clip(src_y, -16, s->height);
209  if (src_y != s->height)
210  dxy |= (motion_y & 1) << 1;
211  src += src_y * s->linesize + src_x;
212 
213  if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 1) - 7, 0) ||
214  (unsigned)src_y >= FFMAX(s->v_edge_pos - (motion_y & 1) - 7, 0)) {
215  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src,
216  s->linesize, s->linesize,
217  9, 9,
218  src_x, src_y,
219  s->h_edge_pos, s->v_edge_pos);
220  src = s->sc.edge_emu_buffer;
221  emu = 1;
222  }
223  pix_op[dxy](dest, src, s->linesize, 8);
224  return emu;
225 }
226 
227 static av_always_inline
229  uint8_t *dest_y,
230  uint8_t *dest_cb,
231  uint8_t *dest_cr,
232  int field_based,
233  int bottom_field,
234  int field_select,
235  uint8_t *const *ref_picture,
236  op_pixels_func (*pix_op)[4],
237  int motion_x,
238  int motion_y,
239  int h,
240  int is_mpeg12,
241  int is_16x8,
242  int mb_y)
243 {
244  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
245  int dxy, uvdxy, mx, my, src_x, src_y,
246  uvsrc_x, uvsrc_y, v_edge_pos, block_y_half;
247  ptrdiff_t uvlinesize, linesize;
248 
249  v_edge_pos = s->v_edge_pos >> field_based;
250  linesize = s->current_picture.f->linesize[0] << field_based;
251  uvlinesize = s->current_picture.f->linesize[1] << field_based;
252  block_y_half = (field_based | is_16x8);
253 
254  dxy = ((motion_y & 1) << 1) | (motion_x & 1);
255  src_x = s->mb_x * 16 + (motion_x >> 1);
256  src_y = (mb_y << (4 - block_y_half)) + (motion_y >> 1);
257 
258  if (!is_mpeg12 && s->out_format == FMT_H263) {
259  if ((s->workaround_bugs & FF_BUG_HPEL_CHROMA) && field_based) {
260  mx = (motion_x >> 1) | (motion_x & 1);
261  my = motion_y >> 1;
262  uvdxy = ((my & 1) << 1) | (mx & 1);
263  uvsrc_x = s->mb_x * 8 + (mx >> 1);
264  uvsrc_y = (mb_y << (3 - block_y_half)) + (my >> 1);
265  } else {
266  uvdxy = dxy | (motion_y & 2) | ((motion_x & 2) >> 1);
267  uvsrc_x = src_x >> 1;
268  uvsrc_y = src_y >> 1;
269  }
270  // Even chroma mv's are full pel in H261
271  } else if (!is_mpeg12 && s->out_format == FMT_H261) {
272  mx = motion_x / 4;
273  my = motion_y / 4;
274  uvdxy = 0;
275  uvsrc_x = s->mb_x * 8 + mx;
276  uvsrc_y = mb_y * 8 + my;
277  } else {
278  if (s->chroma_y_shift) {
279  mx = motion_x / 2;
280  my = motion_y / 2;
281  uvdxy = ((my & 1) << 1) | (mx & 1);
282  uvsrc_x = s->mb_x * 8 + (mx >> 1);
283  uvsrc_y = (mb_y << (3 - block_y_half)) + (my >> 1);
284  } else {
285  if (s->chroma_x_shift) {
286  // Chroma422
287  mx = motion_x / 2;
288  uvdxy = ((motion_y & 1) << 1) | (mx & 1);
289  uvsrc_x = s->mb_x * 8 + (mx >> 1);
290  uvsrc_y = src_y;
291  } else {
292  // Chroma444
293  uvdxy = dxy;
294  uvsrc_x = src_x;
295  uvsrc_y = src_y;
296  }
297  }
298  }
299 
300  ptr_y = ref_picture[0] + src_y * linesize + src_x;
301  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
302  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
303 
304  if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 1) - 15 , 0) ||
305  (unsigned)src_y >= FFMAX( v_edge_pos - (motion_y & 1) - h + 1, 0)) {
306  if (is_mpeg12 || (CONFIG_SMALL &&
307  (s->codec_id == AV_CODEC_ID_MPEG2VIDEO ||
308  s->codec_id == AV_CODEC_ID_MPEG1VIDEO))) {
309  av_log(s->avctx, AV_LOG_DEBUG,
310  "MPEG motion vector out of boundary (%d %d)\n", src_x,
311  src_y);
312  return;
313  }
314  src_y = (unsigned)src_y << field_based;
315  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
316  s->linesize, s->linesize,
317  17, 17 + field_based,
318  src_x, src_y,
319  s->h_edge_pos, s->v_edge_pos);
320  ptr_y = s->sc.edge_emu_buffer;
321  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
322  uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
323  uint8_t *vbuf = ubuf + 10 * s->uvlinesize;
324  if (s->workaround_bugs & FF_BUG_IEDGE)
325  vbuf -= s->uvlinesize;
326  uvsrc_y = (unsigned)uvsrc_y << field_based;
327  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
328  s->uvlinesize, s->uvlinesize,
329  9, 9 + field_based,
330  uvsrc_x, uvsrc_y,
331  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
332  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
333  s->uvlinesize, s->uvlinesize,
334  9, 9 + field_based,
335  uvsrc_x, uvsrc_y,
336  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
337  ptr_cb = ubuf;
338  ptr_cr = vbuf;
339  }
340  }
341 
342  /* FIXME use this for field pix too instead of the obnoxious hack which
343  * changes picture.data */
344  if (bottom_field) {
345  dest_y += s->linesize;
346  dest_cb += s->uvlinesize;
347  dest_cr += s->uvlinesize;
348  }
349 
350  if (field_select) {
351  ptr_y += s->linesize;
352  ptr_cb += s->uvlinesize;
353  ptr_cr += s->uvlinesize;
354  }
355 
356  pix_op[0][dxy](dest_y, ptr_y, linesize, h);
357 
358  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
359  pix_op[s->chroma_x_shift][uvdxy]
360  (dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift);
361  pix_op[s->chroma_x_shift][uvdxy]
362  (dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift);
363  }
364  if (!is_mpeg12 && (CONFIG_H261_ENCODER || CONFIG_H261_DECODER) &&
365  s->out_format == FMT_H261) {
367  }
368 }
369 /* apply one mpeg motion vector to the three components */
371  uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
372  int field_select, uint8_t *const *ref_picture,
373  op_pixels_func (*pix_op)[4],
374  int motion_x, int motion_y, int h, int is_16x8, int mb_y)
375 {
376 #if !CONFIG_SMALL
377  if (s->out_format == FMT_MPEG1)
378  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 0, 0,
379  field_select, ref_picture, pix_op,
380  motion_x, motion_y, h, 1, is_16x8, mb_y);
381  else
382 #endif
383  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 0, 0,
384  field_select, ref_picture, pix_op,
385  motion_x, motion_y, h, 0, is_16x8, mb_y);
386 }
387 
388 static void mpeg_motion_field(MpegEncContext *s, uint8_t *dest_y,
389  uint8_t *dest_cb, uint8_t *dest_cr,
390  int bottom_field, int field_select,
391  uint8_t *const *ref_picture,
392  op_pixels_func (*pix_op)[4],
393  int motion_x, int motion_y, int h, int mb_y)
394 {
395 #if !CONFIG_SMALL
396  if (s->out_format == FMT_MPEG1)
397  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 1,
398  bottom_field, field_select, ref_picture, pix_op,
399  motion_x, motion_y, h, 1, 0, mb_y);
400  else
401 #endif
402  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 1,
403  bottom_field, field_select, ref_picture, pix_op,
404  motion_x, motion_y, h, 0, 0, mb_y);
405 }
406 
407 // FIXME: SIMDify, avg variant, 16x16 version
408 static inline void put_obmc(uint8_t *dst, uint8_t *src[5], int stride)
409 {
410  int x;
411  uint8_t *const top = src[1];
412  uint8_t *const left = src[2];
413  uint8_t *const mid = src[0];
414  uint8_t *const right = src[3];
415  uint8_t *const bottom = src[4];
416 #define OBMC_FILTER(x, t, l, m, r, b)\
417  dst[x]= (t*top[x] + l*left[x] + m*mid[x] + r*right[x] + b*bottom[x] + 4)>>3
418 #define OBMC_FILTER4(x, t, l, m, r, b)\
419  OBMC_FILTER(x , t, l, m, r, b);\
420  OBMC_FILTER(x+1 , t, l, m, r, b);\
421  OBMC_FILTER(x +stride, t, l, m, r, b);\
422  OBMC_FILTER(x+1+stride, t, l, m, r, b);
423 
424  x = 0;
425  OBMC_FILTER (x , 2, 2, 4, 0, 0);
426  OBMC_FILTER (x + 1, 2, 1, 5, 0, 0);
427  OBMC_FILTER4(x + 2, 2, 1, 5, 0, 0);
428  OBMC_FILTER4(x + 4, 2, 0, 5, 1, 0);
429  OBMC_FILTER (x + 6, 2, 0, 5, 1, 0);
430  OBMC_FILTER (x + 7, 2, 0, 4, 2, 0);
431  x += stride;
432  OBMC_FILTER (x , 1, 2, 5, 0, 0);
433  OBMC_FILTER (x + 1, 1, 2, 5, 0, 0);
434  OBMC_FILTER (x + 6, 1, 0, 5, 2, 0);
435  OBMC_FILTER (x + 7, 1, 0, 5, 2, 0);
436  x += stride;
437  OBMC_FILTER4(x , 1, 2, 5, 0, 0);
438  OBMC_FILTER4(x + 2, 1, 1, 6, 0, 0);
439  OBMC_FILTER4(x + 4, 1, 0, 6, 1, 0);
440  OBMC_FILTER4(x + 6, 1, 0, 5, 2, 0);
441  x += 2 * stride;
442  OBMC_FILTER4(x , 0, 2, 5, 0, 1);
443  OBMC_FILTER4(x + 2, 0, 1, 6, 0, 1);
444  OBMC_FILTER4(x + 4, 0, 0, 6, 1, 1);
445  OBMC_FILTER4(x + 6, 0, 0, 5, 2, 1);
446  x += 2*stride;
447  OBMC_FILTER (x , 0, 2, 5, 0, 1);
448  OBMC_FILTER (x + 1, 0, 2, 5, 0, 1);
449  OBMC_FILTER4(x + 2, 0, 1, 5, 0, 2);
450  OBMC_FILTER4(x + 4, 0, 0, 5, 1, 2);
451  OBMC_FILTER (x + 6, 0, 0, 5, 2, 1);
452  OBMC_FILTER (x + 7, 0, 0, 5, 2, 1);
453  x += stride;
454  OBMC_FILTER (x , 0, 2, 4, 0, 2);
455  OBMC_FILTER (x + 1, 0, 1, 5, 0, 2);
456  OBMC_FILTER (x + 6, 0, 0, 5, 1, 2);
457  OBMC_FILTER (x + 7, 0, 0, 4, 2, 2);
458 }
459 
460 /* obmc for 1 8x8 luma block */
461 static inline void obmc_motion(MpegEncContext *s,
462  uint8_t *dest, uint8_t *src,
463  int src_x, int src_y,
464  op_pixels_func *pix_op,
465  int16_t mv[5][2] /* mid top left right bottom */)
466 #define MID 0
467 {
468  int i;
469  uint8_t *ptr[5];
470 
471  av_assert2(s->quarter_sample == 0);
472 
473  for (i = 0; i < 5; i++) {
474  if (i && mv[i][0] == mv[MID][0] && mv[i][1] == mv[MID][1]) {
475  ptr[i] = ptr[MID];
476  } else {
477  ptr[i] = s->sc.obmc_scratchpad + 8 * (i & 1) +
478  s->linesize * 8 * (i >> 1);
479  hpel_motion(s, ptr[i], src, src_x, src_y, pix_op,
480  mv[i][0], mv[i][1]);
481  }
482  }
483 
484  put_obmc(dest, ptr, s->linesize);
485 }
486 
487 static inline void qpel_motion(MpegEncContext *s,
488  uint8_t *dest_y,
489  uint8_t *dest_cb,
490  uint8_t *dest_cr,
491  int field_based, int bottom_field,
492  int field_select, uint8_t *const *ref_picture,
493  op_pixels_func (*pix_op)[4],
494  qpel_mc_func (*qpix_op)[16],
495  int motion_x, int motion_y, int h)
496 {
497  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
498  int dxy, uvdxy, mx, my, src_x, src_y, uvsrc_x, uvsrc_y, v_edge_pos;
499  ptrdiff_t linesize, uvlinesize;
500 
501  dxy = ((motion_y & 3) << 2) | (motion_x & 3);
502 
503  src_x = s->mb_x * 16 + (motion_x >> 2);
504  src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2);
505 
506  v_edge_pos = s->v_edge_pos >> field_based;
507  linesize = s->linesize << field_based;
508  uvlinesize = s->uvlinesize << field_based;
509 
510  if (field_based) {
511  mx = motion_x / 2;
512  my = motion_y >> 1;
513  } else if (s->workaround_bugs & FF_BUG_QPEL_CHROMA2) {
514  static const int rtab[8] = { 0, 0, 1, 1, 0, 0, 0, 1 };
515  mx = (motion_x >> 1) + rtab[motion_x & 7];
516  my = (motion_y >> 1) + rtab[motion_y & 7];
517  } else if (s->workaround_bugs & FF_BUG_QPEL_CHROMA) {
518  mx = (motion_x >> 1) | (motion_x & 1);
519  my = (motion_y >> 1) | (motion_y & 1);
520  } else {
521  mx = motion_x / 2;
522  my = motion_y / 2;
523  }
524  mx = (mx >> 1) | (mx & 1);
525  my = (my >> 1) | (my & 1);
526 
527  uvdxy = (mx & 1) | ((my & 1) << 1);
528  mx >>= 1;
529  my >>= 1;
530 
531  uvsrc_x = s->mb_x * 8 + mx;
532  uvsrc_y = s->mb_y * (8 >> field_based) + my;
533 
534  ptr_y = ref_picture[0] + src_y * linesize + src_x;
535  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
536  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
537 
538  if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 3) - 15 , 0) ||
539  (unsigned)src_y >= FFMAX( v_edge_pos - (motion_y & 3) - h + 1, 0)) {
540  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
541  s->linesize, s->linesize,
542  17, 17 + field_based,
543  src_x, src_y * (1 << field_based),
544  s->h_edge_pos, s->v_edge_pos);
545  ptr_y = s->sc.edge_emu_buffer;
546  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
547  uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
548  uint8_t *vbuf = ubuf + 10 * s->uvlinesize;
549  if (s->workaround_bugs & FF_BUG_IEDGE)
550  vbuf -= s->uvlinesize;
551  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
552  s->uvlinesize, s->uvlinesize,
553  9, 9 + field_based,
554  uvsrc_x, uvsrc_y * (1 << field_based),
555  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
556  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
557  s->uvlinesize, s->uvlinesize,
558  9, 9 + field_based,
559  uvsrc_x, uvsrc_y * (1 << field_based),
560  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
561  ptr_cb = ubuf;
562  ptr_cr = vbuf;
563  }
564  }
565 
566  if (!field_based)
567  qpix_op[0][dxy](dest_y, ptr_y, linesize);
568  else {
569  if (bottom_field) {
570  dest_y += s->linesize;
571  dest_cb += s->uvlinesize;
572  dest_cr += s->uvlinesize;
573  }
574 
575  if (field_select) {
576  ptr_y += s->linesize;
577  ptr_cb += s->uvlinesize;
578  ptr_cr += s->uvlinesize;
579  }
580  // damn interlaced mode
581  // FIXME boundary mirroring is not exactly correct here
582  qpix_op[1][dxy](dest_y, ptr_y, linesize);
583  qpix_op[1][dxy](dest_y + 8, ptr_y + 8, linesize);
584  }
585  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
586  pix_op[1][uvdxy](dest_cr, ptr_cr, uvlinesize, h >> 1);
587  pix_op[1][uvdxy](dest_cb, ptr_cb, uvlinesize, h >> 1);
588  }
589 }
590 
591 /**
592  * H.263 chroma 4mv motion compensation.
593  */
595  uint8_t *dest_cb, uint8_t *dest_cr,
596  uint8_t *const *ref_picture,
597  op_pixels_func *pix_op,
598  int mx, int my)
599 {
600  const uint8_t *ptr;
601  int src_x, src_y, dxy, emu = 0;
602  ptrdiff_t offset;
603 
604  /* In case of 8X8, we construct a single chroma motion vector
605  * with a special rounding */
606  mx = ff_h263_round_chroma(mx);
607  my = ff_h263_round_chroma(my);
608 
609  dxy = ((my & 1) << 1) | (mx & 1);
610  mx >>= 1;
611  my >>= 1;
612 
613  src_x = s->mb_x * 8 + mx;
614  src_y = s->mb_y * 8 + my;
615  src_x = av_clip(src_x, -8, (s->width >> 1));
616  if (src_x == (s->width >> 1))
617  dxy &= ~1;
618  src_y = av_clip(src_y, -8, (s->height >> 1));
619  if (src_y == (s->height >> 1))
620  dxy &= ~2;
621 
622  offset = src_y * s->uvlinesize + src_x;
623  ptr = ref_picture[1] + offset;
624  if ((unsigned)src_x >= FFMAX((s->h_edge_pos >> 1) - (dxy & 1) - 7, 0) ||
625  (unsigned)src_y >= FFMAX((s->v_edge_pos >> 1) - (dxy >> 1) - 7, 0)) {
626  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
627  s->uvlinesize, s->uvlinesize,
628  9, 9, src_x, src_y,
629  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
630  ptr = s->sc.edge_emu_buffer;
631  emu = 1;
632  }
633  pix_op[dxy](dest_cb, ptr, s->uvlinesize, 8);
634 
635  ptr = ref_picture[2] + offset;
636  if (emu) {
637  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
638  s->uvlinesize, s->uvlinesize,
639  9, 9, src_x, src_y,
640  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
641  ptr = s->sc.edge_emu_buffer;
642  }
643  pix_op[dxy](dest_cr, ptr, s->uvlinesize, 8);
644 }
645 
646 static inline void prefetch_motion(MpegEncContext *s, uint8_t *const *pix, int dir)
647 {
648  /* fetch pixels for estimated mv 4 macroblocks ahead
649  * optimized for 64byte cache lines */
650  const int shift = s->quarter_sample ? 2 : 1;
651  const int mx = (s->mv[dir][0][0] >> shift) + 16 * s->mb_x + 8;
652  const int my = (s->mv[dir][0][1] >> shift) + 16 * s->mb_y;
653  int off = mx + (my + (s->mb_x & 3) * 4) * s->linesize + 64;
654 
655  s->vdsp.prefetch(pix[0] + off, s->linesize, 4);
656  off = (mx >> 1) + ((my >> 1) + (s->mb_x & 7)) * s->uvlinesize + 64;
657  s->vdsp.prefetch(pix[1] + off, pix[2] - pix[1], 2);
658 }
659 
660 static inline void apply_obmc(MpegEncContext *s,
661  uint8_t *dest_y,
662  uint8_t *dest_cb,
663  uint8_t *dest_cr,
664  uint8_t *const *ref_picture,
665  op_pixels_func (*pix_op)[4])
666 {
667  LOCAL_ALIGNED_8(int16_t, mv_cache, [4], [4][2]);
668  const Picture *cur_frame = &s->current_picture;
669  int mb_x = s->mb_x;
670  int mb_y = s->mb_y;
671  const int xy = mb_x + mb_y * s->mb_stride;
672  const int mot_stride = s->b8_stride;
673  const int mot_xy = mb_x * 2 + mb_y * 2 * mot_stride;
674  int mx, my, i;
675 
676  av_assert2(!s->mb_skipped);
677 
678  AV_COPY32(mv_cache[1][1], cur_frame->motion_val[0][mot_xy]);
679  AV_COPY32(mv_cache[1][2], cur_frame->motion_val[0][mot_xy + 1]);
680 
681  AV_COPY32(mv_cache[2][1],
682  cur_frame->motion_val[0][mot_xy + mot_stride]);
683  AV_COPY32(mv_cache[2][2],
684  cur_frame->motion_val[0][mot_xy + mot_stride + 1]);
685 
686  AV_COPY32(mv_cache[3][1],
687  cur_frame->motion_val[0][mot_xy + mot_stride]);
688  AV_COPY32(mv_cache[3][2],
689  cur_frame->motion_val[0][mot_xy + mot_stride + 1]);
690 
691  if (mb_y == 0 || IS_INTRA(cur_frame->mb_type[xy - s->mb_stride])) {
692  AV_COPY32(mv_cache[0][1], mv_cache[1][1]);
693  AV_COPY32(mv_cache[0][2], mv_cache[1][2]);
694  } else {
695  AV_COPY32(mv_cache[0][1],
696  cur_frame->motion_val[0][mot_xy - mot_stride]);
697  AV_COPY32(mv_cache[0][2],
698  cur_frame->motion_val[0][mot_xy - mot_stride + 1]);
699  }
700 
701  if (mb_x == 0 || IS_INTRA(cur_frame->mb_type[xy - 1])) {
702  AV_COPY32(mv_cache[1][0], mv_cache[1][1]);
703  AV_COPY32(mv_cache[2][0], mv_cache[2][1]);
704  } else {
705  AV_COPY32(mv_cache[1][0], cur_frame->motion_val[0][mot_xy - 1]);
706  AV_COPY32(mv_cache[2][0],
707  cur_frame->motion_val[0][mot_xy - 1 + mot_stride]);
708  }
709 
710  if (mb_x + 1 >= s->mb_width || IS_INTRA(cur_frame->mb_type[xy + 1])) {
711  AV_COPY32(mv_cache[1][3], mv_cache[1][2]);
712  AV_COPY32(mv_cache[2][3], mv_cache[2][2]);
713  } else {
714  AV_COPY32(mv_cache[1][3], cur_frame->motion_val[0][mot_xy + 2]);
715  AV_COPY32(mv_cache[2][3],
716  cur_frame->motion_val[0][mot_xy + 2 + mot_stride]);
717  }
718 
719  mx = 0;
720  my = 0;
721  for (i = 0; i < 4; i++) {
722  const int x = (i & 1) + 1;
723  const int y = (i >> 1) + 1;
724  int16_t mv[5][2] = {
725  { mv_cache[y][x][0], mv_cache[y][x][1] },
726  { mv_cache[y - 1][x][0], mv_cache[y - 1][x][1] },
727  { mv_cache[y][x - 1][0], mv_cache[y][x - 1][1] },
728  { mv_cache[y][x + 1][0], mv_cache[y][x + 1][1] },
729  { mv_cache[y + 1][x][0], mv_cache[y + 1][x][1] }
730  };
731  // FIXME cleanup
732  obmc_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
733  ref_picture[0],
734  mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >> 1) * 8,
735  pix_op[1],
736  mv);
737 
738  mx += mv[0][0];
739  my += mv[0][1];
740  }
741  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
742  chroma_4mv_motion(s, dest_cb, dest_cr,
743  ref_picture, pix_op[1],
744  mx, my);
745 }
746 
747 static inline void apply_8x8(MpegEncContext *s,
748  uint8_t *dest_y,
749  uint8_t *dest_cb,
750  uint8_t *dest_cr,
751  int dir,
752  uint8_t *const *ref_picture,
753  qpel_mc_func (*qpix_op)[16],
754  op_pixels_func (*pix_op)[4])
755 {
756  int dxy, mx, my, src_x, src_y;
757  int i;
758  int mb_x = s->mb_x;
759  int mb_y = s->mb_y;
760  uint8_t *dest;
761  const uint8_t *ptr;
762 
763  mx = 0;
764  my = 0;
765  if (s->quarter_sample) {
766  for (i = 0; i < 4; i++) {
767  int motion_x = s->mv[dir][i][0];
768  int motion_y = s->mv[dir][i][1];
769 
770  dxy = ((motion_y & 3) << 2) | (motion_x & 3);
771  src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8;
772  src_y = mb_y * 16 + (motion_y >> 2) + (i >> 1) * 8;
773 
774  /* WARNING: do no forget half pels */
775  src_x = av_clip(src_x, -16, s->width);
776  if (src_x == s->width)
777  dxy &= ~3;
778  src_y = av_clip(src_y, -16, s->height);
779  if (src_y == s->height)
780  dxy &= ~12;
781 
782  ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
783  if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 3) - 7, 0) ||
784  (unsigned)src_y >= FFMAX(s->v_edge_pos - (motion_y & 3) - 7, 0)) {
785  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
786  s->linesize, s->linesize,
787  9, 9,
788  src_x, src_y,
789  s->h_edge_pos,
790  s->v_edge_pos);
791  ptr = s->sc.edge_emu_buffer;
792  }
793  dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
794  qpix_op[1][dxy](dest, ptr, s->linesize);
795 
796  mx += s->mv[dir][i][0] / 2;
797  my += s->mv[dir][i][1] / 2;
798  }
799  } else {
800  for (i = 0; i < 4; i++) {
801  hpel_motion(s,
802  dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
803  ref_picture[0],
804  mb_x * 16 + (i & 1) * 8,
805  mb_y * 16 + (i >> 1) * 8,
806  pix_op[1],
807  s->mv[dir][i][0],
808  s->mv[dir][i][1]);
809 
810  mx += s->mv[dir][i][0];
811  my += s->mv[dir][i][1];
812  }
813  }
814 
815  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
816  chroma_4mv_motion(s, dest_cb, dest_cr,
817  ref_picture, pix_op[1], mx, my);
818 }
819 
820 /**
821  * motion compensation of a single macroblock
822  * @param s context
823  * @param dest_y luma destination pointer
824  * @param dest_cb chroma cb/u destination pointer
825  * @param dest_cr chroma cr/v destination pointer
826  * @param dir direction (0->forward, 1->backward)
827  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
828  * @param pix_op halfpel motion compensation function (average or put normally)
829  * @param qpix_op qpel motion compensation function (average or put normally)
830  * the motion vectors are taken from s->mv and the MV type from s->mv_type
831  */
833  uint8_t *dest_y,
834  uint8_t *dest_cb,
835  uint8_t *dest_cr,
836  int dir,
837  uint8_t *const *ref_picture,
838  op_pixels_func (*pix_op)[4],
839  qpel_mc_func (*qpix_op)[16],
840  int is_mpeg12)
841 {
842  int i;
843  int mb_y = s->mb_y;
844 
845  if (!is_mpeg12 && s->obmc && s->pict_type != AV_PICTURE_TYPE_B) {
846  apply_obmc(s, dest_y, dest_cb, dest_cr, ref_picture, pix_op);
847  return;
848  }
849 
850  switch (s->mv_type) {
851  case MV_TYPE_16X16:
852  if (!is_mpeg12 && s->mcsel) {
853  if (s->real_sprite_warping_points == 1) {
854  gmc1_motion(s, dest_y, dest_cb, dest_cr,
855  ref_picture);
856  } else {
857  gmc_motion(s, dest_y, dest_cb, dest_cr,
858  ref_picture);
859  }
860  } else if (!is_mpeg12 && s->quarter_sample) {
861  qpel_motion(s, dest_y, dest_cb, dest_cr,
862  0, 0, 0,
863  ref_picture, pix_op, qpix_op,
864  s->mv[dir][0][0], s->mv[dir][0][1], 16);
865  } else if (!is_mpeg12 && (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) &&
866  s->mspel && s->codec_id == AV_CODEC_ID_WMV2) {
867  ff_mspel_motion(s, dest_y, dest_cb, dest_cr,
868  ref_picture, pix_op,
869  s->mv[dir][0][0], s->mv[dir][0][1], 16);
870  } else {
871  mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
872  ref_picture, pix_op,
873  s->mv[dir][0][0], s->mv[dir][0][1], 16, 0, mb_y);
874  }
875  break;
876  case MV_TYPE_8X8:
877  if (!is_mpeg12)
878  apply_8x8(s, dest_y, dest_cb, dest_cr,
879  dir, ref_picture, qpix_op, pix_op);
880  break;
881  case MV_TYPE_FIELD:
882  if (s->picture_structure == PICT_FRAME) {
883  if (!is_mpeg12 && s->quarter_sample) {
884  for (i = 0; i < 2; i++)
885  qpel_motion(s, dest_y, dest_cb, dest_cr,
886  1, i, s->field_select[dir][i],
887  ref_picture, pix_op, qpix_op,
888  s->mv[dir][i][0], s->mv[dir][i][1], 8);
889  } else {
890  /* top field */
891  mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
892  0, s->field_select[dir][0],
893  ref_picture, pix_op,
894  s->mv[dir][0][0], s->mv[dir][0][1], 8, mb_y);
895  /* bottom field */
896  mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
897  1, s->field_select[dir][1],
898  ref_picture, pix_op,
899  s->mv[dir][1][0], s->mv[dir][1][1], 8, mb_y);
900  }
901  } else {
902  if ( s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field
903  || !ref_picture[0]) {
904  ref_picture = s->current_picture_ptr->f->data;
905  }
906 
907  mpeg_motion(s, dest_y, dest_cb, dest_cr,
908  s->field_select[dir][0],
909  ref_picture, pix_op,
910  s->mv[dir][0][0], s->mv[dir][0][1], 16, 0, mb_y >> 1);
911  }
912  break;
913  case MV_TYPE_16X8:
914  if (CONFIG_SMALL || is_mpeg12) {
915  for (i = 0; i < 2; i++) {
916  uint8_t *const *ref2picture;
917 
918  if ((s->picture_structure == s->field_select[dir][i] + 1 ||
919  s->pict_type == AV_PICTURE_TYPE_B || s->first_field) &&
920  ref_picture[0]) {
921  ref2picture = ref_picture;
922  } else {
923  ref2picture = s->current_picture_ptr->f->data;
924  }
925 
926  mpeg_motion(s, dest_y, dest_cb, dest_cr,
927  s->field_select[dir][i],
928  ref2picture, pix_op,
929  s->mv[dir][i][0], s->mv[dir][i][1],
930  8, 1, (mb_y & ~1) + i);
931 
932  dest_y += 16 * s->linesize;
933  dest_cb += (16 >> s->chroma_y_shift) * s->uvlinesize;
934  dest_cr += (16 >> s->chroma_y_shift) * s->uvlinesize;
935  }
936  break;
937  }
938  case MV_TYPE_DMV:
939  if (CONFIG_SMALL || is_mpeg12) {
940  if (s->picture_structure == PICT_FRAME) {
941  for (i = 0; i < 2; i++) {
942  for (int j = 0; j < 2; j++)
943  mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
944  j, j ^ i, ref_picture, pix_op,
945  s->mv[dir][2 * i + j][0],
946  s->mv[dir][2 * i + j][1], 8, mb_y);
947  pix_op = s->hdsp.avg_pixels_tab;
948  }
949  } else {
950  if (!ref_picture[0]) {
951  ref_picture = s->current_picture_ptr->f->data;
952  }
953  for (i = 0; i < 2; i++) {
954  mpeg_motion(s, dest_y, dest_cb, dest_cr,
955  s->picture_structure != i + 1,
956  ref_picture, pix_op,
957  s->mv[dir][2 * i][0], s->mv[dir][2 * i][1],
958  16, 0, mb_y >> 1);
959 
960  // after put we make avg of the same block
961  pix_op = s->hdsp.avg_pixels_tab;
962 
963  /* opposite parity is always in the same frame if this is
964  * second field */
965  if (!s->first_field)
966  ref_picture = s->current_picture_ptr->f->data;
967  }
968  }
969  break;
970  }
971  default: av_assert2(0);
972  }
973 }
974 
976  uint8_t *dest_y, uint8_t *dest_cb,
977  uint8_t *dest_cr, int dir,
978  uint8_t *const *ref_picture,
979  op_pixels_func (*pix_op)[4],
980  qpel_mc_func (*qpix_op)[16])
981 {
982  prefetch_motion(s, ref_picture, dir);
983 
984 #if !CONFIG_SMALL
985  if (s->out_format == FMT_MPEG1)
986  mpv_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
987  ref_picture, pix_op, qpix_op, 1);
988  else
989 #endif
990  mpv_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
991  ref_picture, pix_op, qpix_op, 0);
992 }
ff_h263_round_chroma
static int ff_h263_round_chroma(int x)
Definition: motion_est.h:101
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:38
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:255
av_clip
#define av_clip
Definition: common.h:95
chroma_4mv_motion
static void chroma_4mv_motion(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t *const *ref_picture, op_pixels_func *pix_op, int mx, int my)
H.263 chroma 4mv motion compensation.
Definition: mpegvideo_motion.c:594
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegutils.h:117
mem_internal.h
ff_mspel_motion
void ff_mspel_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t *const *ref_picture, op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h)
Definition: wmv2.c:50
MV_TYPE_16X8
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:257
mv
static const int8_t mv[256][2]
Definition: 4xm.c:80
obmc_motion
static void obmc_motion(MpegEncContext *s, uint8_t *dest, uint8_t *src, int src_x, int src_y, op_pixels_func *pix_op, int16_t mv[5][2])
Definition: mpegvideo_motion.c:461
MpegEncContext::dest
uint8_t * dest[3]
Definition: mpegvideo.h:284
mpegvideo.h
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Picture
Picture.
Definition: mpegpicture.h:46
FF_BUG_HPEL_CHROMA
#define FF_BUG_HPEL_CHROMA
Definition: avcodec.h:1291
mpegutils.h
MV_TYPE_DMV
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:259
FF_BUG_QPEL_CHROMA2
#define FF_BUG_QPEL_CHROMA2
Definition: avcodec.h:1288
h261.h
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:117
qpel_motion
static void qpel_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t *const *ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16], int motion_x, int motion_y, int h)
Definition: mpegvideo_motion.c:487
mpeg_motion_internal
static av_always_inline void mpeg_motion_internal(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t *const *ref_picture, op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h, int is_mpeg12, int is_16x8, int mb_y)
Definition: mpegvideo_motion.c:228
avassert.h
s
#define s(width, name)
Definition: cbs_vp9.c:256
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:70
FMT_H261
@ FMT_H261
Definition: mpegutils.h:118
OBMC_FILTER4
#define OBMC_FILTER4(x, t, l, m, r, b)
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
IS_INTRA
#define IS_INTRA(x, y)
gmc_motion
static void gmc_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t *const *ref_picture)
Definition: mpegvideo_motion.c:134
LOCAL_ALIGNED_8
#define LOCAL_ALIGNED_8(t, v,...)
Definition: mem_internal.h:125
MpegEncContext::field_select
int field_select[2][2]
Definition: mpegvideo.h:266
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:277
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:975
mpv_motion_internal
static av_always_inline void mpv_motion_internal(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16], int is_mpeg12)
motion compensation of a single macroblock
Definition: mpegvideo_motion.c:832
FF_BUG_IEDGE
#define FF_BUG_IEDGE
Definition: avcodec.h:1295
qpeldsp.h
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
wmv2.h
prefetch_motion
static void prefetch_motion(MpegEncContext *s, uint8_t *const *pix, int dir)
Definition: mpegvideo_motion.c:646
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:256
hpel_motion
static int hpel_motion(MpegEncContext *s, uint8_t *dest, uint8_t *src, int src_x, int src_y, op_pixels_func *pix_op, int motion_x, int motion_y)
Definition: mpegvideo_motion.c:192
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:259
shift
static int shift(int a, int b)
Definition: bonk.c:260
MpegEncContext::v_edge_pos
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:115
OBMC_FILTER
#define OBMC_FILTER(x, t, l, m, r, b)
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:258
mpeg_motion
static void mpeg_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_select, uint8_t *const *ref_picture, op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h, int is_16x8, int mb_y)
Definition: mpegvideo_motion.c:370
Picture::motion_val
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:54
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
MpegEncContext::uvlinesize
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:118
FMT_H263
@ FMT_H263
Definition: mpegutils.h:119
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
internal.h
mpeg_motion_field
static void mpeg_motion_field(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int bottom_field, int field_select, uint8_t *const *ref_picture, op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h, int mb_y)
Definition: mpegvideo_motion.c:388
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:277
av_always_inline
#define av_always_inline
Definition: attributes.h:49
AV_COPY32
#define AV_COPY32(d, s)
Definition: intreadwrite.h:601
MID
#define MID
FF_BUG_QPEL_CHROMA
#define FF_BUG_QPEL_CHROMA
Definition: avcodec.h:1286
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
ff_h261_loop_filter
void ff_h261_loop_filter(MpegEncContext *s)
Definition: h261.c:61
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
put_obmc
static void put_obmc(uint8_t *dst, uint8_t *src[5], int stride)
Definition: mpegvideo_motion.c:408
Picture::mb_type
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegpicture.h:57
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
apply_8x8
static void apply_8x8(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, qpel_mc_func(*qpix_op)[16], op_pixels_func(*pix_op)[4])
Definition: mpegvideo_motion.c:747
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
h
h
Definition: vp9dsp_template.c:2038
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:62
apply_obmc
static void apply_obmc(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t *const *ref_picture, op_pixels_func(*pix_op)[4])
Definition: mpegvideo_motion.c:660
gmc1_motion
static void gmc1_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t *const *ref_picture)
Definition: mpegvideo_motion.c:37