FFmpeg
vc1_mc.c
Go to the documentation of this file.
1 /*
2  * VC-1 and WMV3 decoder
3  * Copyright (c) 2011 Mashiat Sarker Shakkhar
4  * Copyright (c) 2006-2007 Konstantin Shishkov
5  * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 /**
25  * @file
26  * VC-1 and WMV3 block decoding routines
27  */
28 
29 #include "avcodec.h"
30 #include "h264chroma.h"
31 #include "mathops.h"
32 #include "mpegvideo.h"
33 #include "vc1.h"
34 
35 static av_always_inline void vc1_scale_luma(uint8_t *srcY,
36  int k, int linesize)
37 {
38  int i, j;
39  for (j = 0; j < k; j++) {
40  for (i = 0; i < k; i++)
41  srcY[i] = ((srcY[i] - 128) >> 1) + 128;
42  srcY += linesize;
43  }
44 }
45 
46 static av_always_inline void vc1_scale_chroma(uint8_t *srcU, uint8_t *srcV,
47  int k, int uvlinesize)
48 {
49  int i, j;
50  for (j = 0; j < k; j++) {
51  for (i = 0; i < k; i++) {
52  srcU[i] = ((srcU[i] - 128) >> 1) + 128;
53  srcV[i] = ((srcV[i] - 128) >> 1) + 128;
54  }
55  srcU += uvlinesize;
56  srcV += uvlinesize;
57  }
58 }
59 
60 static av_always_inline void vc1_lut_scale_luma(uint8_t *srcY,
61  uint8_t *lut1, uint8_t *lut2,
62  int k, int linesize)
63 {
64  int i, j;
65 
66  for (j = 0; j < k; j += 2) {
67  for (i = 0; i < k; i++)
68  srcY[i] = lut1[srcY[i]];
69  srcY += linesize;
70 
71  if (j + 1 == k)
72  break;
73 
74  for (i = 0; i < k; i++)
75  srcY[i] = lut2[srcY[i]];
76  srcY += linesize;
77  }
78 }
79 
80 static av_always_inline void vc1_lut_scale_chroma(uint8_t *srcU, uint8_t *srcV,
81  uint8_t *lut1, uint8_t *lut2,
82  int k, int uvlinesize)
83 {
84  int i, j;
85 
86  for (j = 0; j < k; j += 2) {
87  for (i = 0; i < k; i++) {
88  srcU[i] = lut1[srcU[i]];
89  srcV[i] = lut1[srcV[i]];
90  }
91  srcU += uvlinesize;
92  srcV += uvlinesize;
93 
94  if (j + 1 == k)
95  break;
96 
97  for (i = 0; i < k; i++) {
98  srcU[i] = lut2[srcU[i]];
99  srcV[i] = lut2[srcV[i]];
100  }
101  srcU += uvlinesize;
102  srcV += uvlinesize;
103  }
104 }
105 
106 static const uint8_t popcount4[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4 };
107 
108 static av_always_inline int get_luma_mv(VC1Context *v, int dir, int16_t *tx, int16_t *ty)
109 {
110  MpegEncContext *s = &v->s;
111  int idx = v->mv_f[dir][s->block_index[0] + v->blocks_off] |
112  (v->mv_f[dir][s->block_index[1] + v->blocks_off] << 1) |
113  (v->mv_f[dir][s->block_index[2] + v->blocks_off] << 2) |
114  (v->mv_f[dir][s->block_index[3] + v->blocks_off] << 3);
115  static const uint8_t index2[16] = { 0, 0, 0, 0x23, 0, 0x13, 0x03, 0, 0, 0x12, 0x02, 0, 0x01, 0, 0, 0 };
116  int opp_count = popcount4[idx];
117 
118  switch (opp_count) {
119  case 0:
120  case 4:
121  *tx = median4(s->mv[dir][0][0], s->mv[dir][1][0], s->mv[dir][2][0], s->mv[dir][3][0]);
122  *ty = median4(s->mv[dir][0][1], s->mv[dir][1][1], s->mv[dir][2][1], s->mv[dir][3][1]);
123  break;
124  case 1:
125  *tx = mid_pred(s->mv[dir][idx < 2][0], s->mv[dir][1 + (idx < 4)][0], s->mv[dir][2 + (idx < 8)][0]);
126  *ty = mid_pred(s->mv[dir][idx < 2][1], s->mv[dir][1 + (idx < 4)][1], s->mv[dir][2 + (idx < 8)][1]);
127  break;
128  case 3:
129  *tx = mid_pred(s->mv[dir][idx > 0xd][0], s->mv[dir][1 + (idx > 0xb)][0], s->mv[dir][2 + (idx > 0x7)][0]);
130  *ty = mid_pred(s->mv[dir][idx > 0xd][1], s->mv[dir][1 + (idx > 0xb)][1], s->mv[dir][2 + (idx > 0x7)][1]);
131  break;
132  case 2:
133  *tx = (s->mv[dir][index2[idx] >> 4][0] + s->mv[dir][index2[idx] & 0xf][0]) / 2;
134  *ty = (s->mv[dir][index2[idx] >> 4][1] + s->mv[dir][index2[idx] & 0xf][1]) / 2;
135  break;
136  }
137  return opp_count;
138 }
139 
140 static av_always_inline int get_chroma_mv(VC1Context *v, int dir, int16_t *tx, int16_t *ty)
141 {
142  MpegEncContext *s = &v->s;
143  int idx = !v->mb_type[0][s->block_index[0]] |
144  (!v->mb_type[0][s->block_index[1]] << 1) |
145  (!v->mb_type[0][s->block_index[2]] << 2) |
146  (!v->mb_type[0][s->block_index[3]] << 3);
147  static const uint8_t index2[16] = { 0, 0, 0, 0x01, 0, 0x02, 0x12, 0, 0, 0x03, 0x13, 0, 0x23, 0, 0, 0 };
148  int valid_count = popcount4[idx];
149 
150  switch (valid_count) {
151  case 4:
152  *tx = median4(s->mv[dir][0][0], s->mv[dir][1][0], s->mv[dir][2][0], s->mv[dir][3][0]);
153  *ty = median4(s->mv[dir][0][1], s->mv[dir][1][1], s->mv[dir][2][1], s->mv[dir][3][1]);
154  break;
155  case 3:
156  *tx = mid_pred(s->mv[dir][idx > 0xd][0], s->mv[dir][1 + (idx > 0xb)][0], s->mv[dir][2 + (idx > 0x7)][0]);
157  *ty = mid_pred(s->mv[dir][idx > 0xd][1], s->mv[dir][1 + (idx > 0xb)][1], s->mv[dir][2 + (idx > 0x7)][1]);
158  break;
159  case 2:
160  *tx = (s->mv[dir][index2[idx] >> 4][0] + s->mv[dir][index2[idx] & 0xf][0]) / 2;
161  *ty = (s->mv[dir][index2[idx] >> 4][1] + s->mv[dir][index2[idx] & 0xf][1]) / 2;
162  break;
163  default:
164  return 0;
165  }
166  return valid_count;
167 }
168 
169 /** Do motion compensation over 1 macroblock
170  * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
171  */
172 void ff_vc1_mc_1mv(VC1Context *v, int dir)
173 {
174  MpegEncContext *s = &v->s;
175  H264ChromaContext *h264chroma = &v->h264chroma;
176  uint8_t *srcY, *srcU, *srcV;
177  int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
178  int v_edge_pos = s->v_edge_pos >> v->field_mode;
179  int i;
180  uint8_t (*luty)[256], (*lutuv)[256];
181  int use_ic;
182  int interlace;
183  int linesize, uvlinesize;
184 
185  if ((!v->field_mode ||
186  (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
187  !v->s.last_picture.f->data[0])
188  return;
189 
190  linesize = s->current_picture_ptr->f->linesize[0];
191  uvlinesize = s->current_picture_ptr->f->linesize[1];
192 
193  mx = s->mv[dir][0][0];
194  my = s->mv[dir][0][1];
195 
196  // store motion vectors for further use in B-frames
197  if (s->pict_type == AV_PICTURE_TYPE_P) {
198  for (i = 0; i < 4; i++) {
199  s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][0] = mx;
200  s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][1] = my;
201  }
202  }
203 
204  uvmx = (mx + ((mx & 3) == 3)) >> 1;
205  uvmy = (my + ((my & 3) == 3)) >> 1;
206  v->luma_mv[s->mb_x][0] = uvmx;
207  v->luma_mv[s->mb_x][1] = uvmy;
208 
209  if (v->field_mode &&
210  v->cur_field_type != v->ref_field_type[dir]) {
211  my = my - 2 + 4 * v->cur_field_type;
212  uvmy = uvmy - 2 + 4 * v->cur_field_type;
213  }
214 
215  // fastuvmc shall be ignored for interlaced frame picture
216  if (v->fastuvmc && (v->fcm != ILACE_FRAME)) {
217  uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
218  uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
219  }
220  if (!dir) {
221  if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
222  srcY = s->current_picture.f->data[0];
223  srcU = s->current_picture.f->data[1];
224  srcV = s->current_picture.f->data[2];
225  luty = v->curr_luty;
226  lutuv = v->curr_lutuv;
227  use_ic = *v->curr_use_ic;
228  interlace = 1;
229  } else {
230  srcY = s->last_picture.f->data[0];
231  srcU = s->last_picture.f->data[1];
232  srcV = s->last_picture.f->data[2];
233  luty = v->last_luty;
234  lutuv = v->last_lutuv;
235  use_ic = v->last_use_ic;
236  interlace = !!(s->last_picture.f->flags & AV_FRAME_FLAG_INTERLACED);
237  }
238  } else {
239  srcY = s->next_picture.f->data[0];
240  srcU = s->next_picture.f->data[1];
241  srcV = s->next_picture.f->data[2];
242  luty = v->next_luty;
243  lutuv = v->next_lutuv;
244  use_ic = v->next_use_ic;
245  interlace = !!(s->next_picture.f->flags & AV_FRAME_FLAG_INTERLACED);
246  }
247 
248  if (!srcY || !srcU) {
249  av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
250  return;
251  }
252 
253  src_x = s->mb_x * 16 + (mx >> 2);
254  src_y = s->mb_y * 16 + (my >> 2);
255  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
256  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
257 
258  if (v->profile != PROFILE_ADVANCED) {
259  src_x = av_clip( src_x, -16, s->mb_width * 16);
260  src_y = av_clip( src_y, -16, s->mb_height * 16);
261  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
262  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
263  } else {
264  src_x = av_clip( src_x, -17, s->avctx->coded_width);
265  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
266  if (v->fcm == ILACE_FRAME) {
267  src_y = av_clip(src_y, -18 + (src_y & 1), s->avctx->coded_height + (src_y & 1));
268  uvsrc_y = av_clip(uvsrc_y, -8 + (uvsrc_y & 1), (s->avctx->coded_height >> 1) + (uvsrc_y & 1));
269  } else {
270  src_y = av_clip(src_y, -18, s->avctx->coded_height + 1);
271  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
272  }
273  }
274 
275  srcY += src_y * s->linesize + src_x;
276  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
277  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
278 
279  if (v->field_mode && v->ref_field_type[dir]) {
280  srcY += linesize;
281  srcU += uvlinesize;
282  srcV += uvlinesize;
283  }
284 
285  /* for grayscale we should not try to read from unknown area */
286  if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY) {
287  srcU = s->sc.edge_emu_buffer + 18 * s->linesize;
288  srcV = s->sc.edge_emu_buffer + 18 * s->linesize;
289  }
290 
291  if (v->rangeredfrm || use_ic
292  || s->h_edge_pos < 22 || v_edge_pos < 22
293  || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel * 3
294  || (unsigned)(src_y - 1) > v_edge_pos - (my&3) - 16 - 3) {
295  uint8_t *ubuf = s->sc.edge_emu_buffer + 19 * s->linesize;
296  uint8_t *vbuf = ubuf + 9 * s->uvlinesize;
297  const int k = 17 + s->mspel * 2;
298 
299  srcY -= s->mspel * (1 + s->linesize);
300  if (interlace) {
301  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer,
302  srcY,
303  linesize << 1,
304  linesize << 1,
305  k,
306  v->field_mode ? k : k + 1 >> 1,
307  src_x - s->mspel,
308  src_y - s->mspel >> !v->field_mode,
309  s->h_edge_pos,
310  s->v_edge_pos >> 1);
311  if (!v->field_mode)
312  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer + linesize,
313  srcY + linesize,
314  linesize << 1,
315  linesize << 1,
316  k,
317  k >> 1,
318  src_x - s->mspel,
319  src_y - s->mspel + 1 >> 1,
320  s->h_edge_pos,
321  s->v_edge_pos >> 1);
322  } else
323  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer,
324  srcY,
325  linesize,
326  linesize,
327  k,
328  v->field_mode ? (k << 1) - 1 : k,
329  src_x - s->mspel,
330  v->field_mode ? 2 * (src_y - s->mspel) + v->ref_field_type[dir] :
331  src_y - s->mspel,
332  s->h_edge_pos,
333  s->v_edge_pos);
334  srcY = s->sc.edge_emu_buffer;
335  if (interlace) {
336  s->vdsp.emulated_edge_mc(ubuf,
337  srcU,
338  uvlinesize << 1,
339  uvlinesize << 1,
340  9,
341  v->field_mode ? 9 : 5,
342  uvsrc_x,
343  uvsrc_y >> !v->field_mode,
344  s->h_edge_pos >> 1,
345  s->v_edge_pos >> 2);
346  s->vdsp.emulated_edge_mc(vbuf,
347  srcV,
348  uvlinesize << 1,
349  uvlinesize << 1,
350  9,
351  v->field_mode ? 9 : 5,
352  uvsrc_x,
353  uvsrc_y >> !v->field_mode,
354  s->h_edge_pos >> 1,
355  s->v_edge_pos >> 2);
356  if (!v->field_mode) {
357  s->vdsp.emulated_edge_mc(ubuf + uvlinesize,
358  srcU + uvlinesize,
359  uvlinesize << 1,
360  uvlinesize << 1,
361  9,
362  4,
363  uvsrc_x,
364  uvsrc_y + 1 >> 1,
365  s->h_edge_pos >> 1,
366  s->v_edge_pos >> 2);
367  s->vdsp.emulated_edge_mc(vbuf + uvlinesize,
368  srcV + uvlinesize,
369  uvlinesize << 1,
370  uvlinesize << 1,
371  9,
372  4,
373  uvsrc_x,
374  uvsrc_y + 1 >> 1,
375  s->h_edge_pos >> 1,
376  s->v_edge_pos >> 2);
377  }
378  } else {
379  s->vdsp.emulated_edge_mc(ubuf,
380  srcU,
381  uvlinesize,
382  uvlinesize,
383  9,
384  v->field_mode ? 17 : 9,
385  uvsrc_x,
386  v->field_mode ? 2 * uvsrc_y + v->ref_field_type[dir] : uvsrc_y,
387  s->h_edge_pos >> 1,
388  s->v_edge_pos >> 1);
389  s->vdsp.emulated_edge_mc(vbuf,
390  srcV,
391  uvlinesize,
392  uvlinesize,
393  9,
394  v->field_mode ? 17 : 9,
395  uvsrc_x,
396  v->field_mode ? 2 * uvsrc_y + v->ref_field_type[dir] : uvsrc_y,
397  s->h_edge_pos >> 1,
398  s->v_edge_pos >> 1);
399  }
400  srcU = ubuf;
401  srcV = vbuf;
402  /* if we deal with range reduction we need to scale source blocks */
403  if (v->rangeredfrm) {
404  vc1_scale_luma(srcY, k, s->linesize);
405  vc1_scale_chroma(srcU, srcV, 9, s->uvlinesize);
406  }
407  /* if we deal with intensity compensation we need to scale source blocks */
408  if (use_ic) {
409  vc1_lut_scale_luma(srcY,
410  luty[v->field_mode ? v->ref_field_type[dir] : ((0 + src_y - s->mspel) & 1)],
411  luty[v->field_mode ? v->ref_field_type[dir] : ((1 + src_y - s->mspel) & 1)],
412  k, s->linesize);
413  vc1_lut_scale_chroma(srcU, srcV,
414  lutuv[v->field_mode ? v->ref_field_type[dir] : ((0 + uvsrc_y) & 1)],
415  lutuv[v->field_mode ? v->ref_field_type[dir] : ((1 + uvsrc_y) & 1)],
416  9, s->uvlinesize);
417  }
418  srcY += s->mspel * (1 + s->linesize);
419  }
420 
421  if (s->mspel) {
422  dxy = ((my & 3) << 2) | (mx & 3);
423  v->vc1dsp.put_vc1_mspel_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, v->rnd);
424  } else { // hpel mc - always used for luma
425  dxy = (my & 2) | ((mx & 2) >> 1);
426  if (!v->rnd)
427  s->hdsp.put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
428  else
429  s->hdsp.put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
430  }
431 
432  if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY)
433  return;
434  /* Chroma MC always uses qpel bilinear */
435  uvmx = (uvmx & 3) << 1;
436  uvmy = (uvmy & 3) << 1;
437  if (!v->rnd) {
438  h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
439  h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
440  } else {
441  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
442  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
443  }
444  if (v->field_mode) {
445  v->mv_f[dir][s->block_index[4] + v->mb_off] = v->cur_field_type != v->ref_field_type[dir];
446  v->mv_f[dir][s->block_index[5] + v->mb_off] = v->cur_field_type != v->ref_field_type[dir];
447  }
448 }
449 
450 /** Do motion compensation for 4-MV macroblock - luminance block
451  */
452 void ff_vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
453 {
454  MpegEncContext *s = &v->s;
455  uint8_t *srcY;
456  int dxy, mx, my, src_x, src_y;
457  int off;
458  int fieldmv = (v->fcm == ILACE_FRAME) ? v->blk_mv_type[s->block_index[n]] : 0;
459  int v_edge_pos = s->v_edge_pos >> v->field_mode;
460  uint8_t (*luty)[256];
461  int use_ic;
462  int interlace;
463  int linesize;
464 
465  if ((!v->field_mode ||
466  (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
467  !v->s.last_picture.f->data[0])
468  return;
469 
470  linesize = s->current_picture_ptr->f->linesize[0];
471 
472  mx = s->mv[dir][n][0];
473  my = s->mv[dir][n][1];
474 
475  if (!dir) {
476  if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
477  srcY = s->current_picture.f->data[0];
478  luty = v->curr_luty;
479  use_ic = *v->curr_use_ic;
480  interlace = 1;
481  } else {
482  srcY = s->last_picture.f->data[0];
483  luty = v->last_luty;
484  use_ic = v->last_use_ic;
485  interlace = !!(s->last_picture.f->flags & AV_FRAME_FLAG_INTERLACED);
486  }
487  } else {
488  srcY = s->next_picture.f->data[0];
489  luty = v->next_luty;
490  use_ic = v->next_use_ic;
491  interlace = !!(s->next_picture.f->flags & AV_FRAME_FLAG_INTERLACED);
492  }
493 
494  if (!srcY) {
495  av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
496  return;
497  }
498 
499  if (v->field_mode) {
500  if (v->cur_field_type != v->ref_field_type[dir])
501  my = my - 2 + 4 * v->cur_field_type;
502  }
503 
504  if (s->pict_type == AV_PICTURE_TYPE_P && n == 3 && v->field_mode) {
505  int opp_count = get_luma_mv(v, 0,
506  &s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
507  &s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1]);
508  int k, f = opp_count > 2;
509  for (k = 0; k < 4; k++)
510  v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
511  }
512 
513  if (v->fcm == ILACE_FRAME) { // not sure if needed for other types of picture
514  int qx, qy;
515  int width = s->avctx->coded_width;
516  int height = s->avctx->coded_height >> 1;
517  if (s->pict_type == AV_PICTURE_TYPE_P) {
518  s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][0] = mx;
519  s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][1] = my;
520  }
521  qx = (s->mb_x * 16) + (mx >> 2);
522  qy = (s->mb_y * 8) + (my >> 3);
523 
524  if (qx < -17)
525  mx -= 4 * (qx + 17);
526  else if (qx > width)
527  mx -= 4 * (qx - width);
528  if (qy < -18)
529  my -= 8 * (qy + 18);
530  else if (qy > height + 1)
531  my -= 8 * (qy - height - 1);
532  }
533 
534  if ((v->fcm == ILACE_FRAME) && fieldmv)
535  off = ((n > 1) ? s->linesize : 0) + (n & 1) * 8;
536  else
537  off = s->linesize * 4 * (n & 2) + (n & 1) * 8;
538 
539  src_x = s->mb_x * 16 + (n & 1) * 8 + (mx >> 2);
540  if (!fieldmv)
541  src_y = s->mb_y * 16 + (n & 2) * 4 + (my >> 2);
542  else
543  src_y = s->mb_y * 16 + ((n > 1) ? 1 : 0) + (my >> 2);
544 
545  if (v->profile != PROFILE_ADVANCED) {
546  src_x = av_clip(src_x, -16, s->mb_width * 16);
547  src_y = av_clip(src_y, -16, s->mb_height * 16);
548  } else {
549  src_x = av_clip(src_x, -17, s->avctx->coded_width);
550  if (v->fcm == ILACE_FRAME)
551  src_y = av_clip(src_y, -18 + (src_y & 1), s->avctx->coded_height + (src_y & 1));
552  else
553  src_y = av_clip(src_y, -18, s->avctx->coded_height + 1);
554  }
555 
556  srcY += src_y * s->linesize + src_x;
557  if (v->field_mode && v->ref_field_type[dir])
558  srcY += linesize;
559 
560  if (v->rangeredfrm || use_ic
561  || s->h_edge_pos < 13 || v_edge_pos < 23
562  || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 8 - s->mspel * 2
563  || (unsigned)(src_y - (s->mspel << fieldmv)) > v_edge_pos - (my & 3) - ((8 + s->mspel * 2) << fieldmv)) {
564  const int k = 9 + s->mspel * 2;
565 
566  srcY -= s->mspel * (1 + (s->linesize << fieldmv));
567  /* check emulate edge stride and offset */
568  if (interlace) {
569  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer,
570  srcY,
571  linesize << 1,
572  linesize << 1,
573  k,
574  v->field_mode ? k : (k << fieldmv) + 1 >> 1,
575  src_x - s->mspel,
576  src_y - (s->mspel << fieldmv) >> !v->field_mode,
577  s->h_edge_pos,
578  s->v_edge_pos >> 1);
579  if (!v->field_mode && !fieldmv)
580  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer + linesize,
581  srcY + linesize,
582  linesize << 1,
583  linesize << 1,
584  k,
585  k >> 1,
586  src_x - s->mspel,
587  src_y - s->mspel + 1 >> 1,
588  s->h_edge_pos,
589  s->v_edge_pos >> 1);
590  } else
591  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer,
592  srcY,
593  linesize,
594  linesize,
595  k,
596  v->field_mode ? (k << 1) - 1 : k << fieldmv,
597  src_x - s->mspel,
598  v->field_mode ? 2 * (src_y - s->mspel) + v->ref_field_type[dir] :
599  src_y - (s->mspel << fieldmv),
600  s->h_edge_pos,
601  s->v_edge_pos);
602  srcY = s->sc.edge_emu_buffer;
603  /* if we deal with range reduction we need to scale source blocks */
604  if (v->rangeredfrm) {
605  vc1_scale_luma(srcY, k, s->linesize << fieldmv);
606  }
607  /* if we deal with intensity compensation we need to scale source blocks */
608  if (use_ic) {
609  vc1_lut_scale_luma(srcY,
610  luty[v->field_mode ? v->ref_field_type[dir] : (((0<<fieldmv)+src_y - (s->mspel << fieldmv)) & 1)],
611  luty[v->field_mode ? v->ref_field_type[dir] : (((1<<fieldmv)+src_y - (s->mspel << fieldmv)) & 1)],
612  k, s->linesize << fieldmv);
613  }
614  srcY += s->mspel * (1 + (s->linesize << fieldmv));
615  }
616 
617  if (s->mspel) {
618  dxy = ((my & 3) << 2) | (mx & 3);
619  if (avg)
620  v->vc1dsp.avg_vc1_mspel_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
621  else
622  v->vc1dsp.put_vc1_mspel_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
623  } else { // hpel mc - always used for luma
624  dxy = (my & 2) | ((mx & 2) >> 1);
625  if (!v->rnd)
626  s->hdsp.put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
627  else
628  s->hdsp.put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
629  }
630 }
631 
632 /** Do motion compensation for 4-MV macroblock - both chroma blocks
633  */
635 {
636  MpegEncContext *s = &v->s;
637  H264ChromaContext *h264chroma = &v->h264chroma;
638  uint8_t *srcU, *srcV;
639  int uvmx, uvmy, uvsrc_x, uvsrc_y;
640  int16_t tx, ty;
641  int chroma_ref_type;
642  int v_edge_pos = s->v_edge_pos >> v->field_mode;
643  uint8_t (*lutuv)[256];
644  int use_ic;
645  int interlace;
646  int uvlinesize;
647 
648  if (!v->field_mode && !v->s.last_picture.f->data[0])
649  return;
650  if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY)
651  return;
652 
653  /* calculate chroma MV vector from four luma MVs */
654  if (!v->field_mode || !v->numref) {
655  int valid_count = get_chroma_mv(v, dir, &tx, &ty);
656  if (!valid_count) {
657  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
658  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
659  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
660  return; //no need to do MC for intra blocks
661  }
662  chroma_ref_type = v->ref_field_type[dir];
663  } else {
664  int opp_count = get_luma_mv(v, dir, &tx, &ty);
665  chroma_ref_type = v->cur_field_type ^ (opp_count > 2);
666  }
667  if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_picture.f->data[0])
668  return;
669  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
670  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
671 
672  uvlinesize = s->current_picture_ptr->f->linesize[1];
673 
674  uvmx = (tx + ((tx & 3) == 3)) >> 1;
675  uvmy = (ty + ((ty & 3) == 3)) >> 1;
676 
677  v->luma_mv[s->mb_x][0] = uvmx;
678  v->luma_mv[s->mb_x][1] = uvmy;
679 
680  if (v->fastuvmc) {
681  uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
682  uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
683  }
684  // Field conversion bias
685  if (v->cur_field_type != chroma_ref_type)
686  uvmy += 2 - 4 * chroma_ref_type;
687 
688  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
689  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
690 
691  if (v->profile != PROFILE_ADVANCED) {
692  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
693  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
694  } else {
695  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
696  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
697  }
698 
699  if (!dir) {
700  if (v->field_mode && (v->cur_field_type != chroma_ref_type) && v->second_field) {
701  srcU = s->current_picture.f->data[1];
702  srcV = s->current_picture.f->data[2];
703  lutuv = v->curr_lutuv;
704  use_ic = *v->curr_use_ic;
705  interlace = 1;
706  } else {
707  srcU = s->last_picture.f->data[1];
708  srcV = s->last_picture.f->data[2];
709  lutuv = v->last_lutuv;
710  use_ic = v->last_use_ic;
711  interlace = !!(s->last_picture.f->flags & AV_FRAME_FLAG_INTERLACED);
712  }
713  } else {
714  srcU = s->next_picture.f->data[1];
715  srcV = s->next_picture.f->data[2];
716  lutuv = v->next_lutuv;
717  use_ic = v->next_use_ic;
718  interlace = !!(s->next_picture.f->flags & AV_FRAME_FLAG_INTERLACED);
719  }
720 
721  if (!srcU) {
722  av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
723  return;
724  }
725 
726  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
727  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
728 
729  if (v->field_mode) {
730  if (chroma_ref_type) {
731  srcU += uvlinesize;
732  srcV += uvlinesize;
733  }
734  }
735 
736  if (v->rangeredfrm || use_ic
737  || s->h_edge_pos < 18 || v_edge_pos < 18
738  || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
739  || (unsigned)uvsrc_y > (v_edge_pos >> 1) - 9) {
740  if (interlace) {
741  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer,
742  srcU,
743  uvlinesize << 1,
744  uvlinesize << 1,
745  9,
746  v->field_mode ? 9 : 5,
747  uvsrc_x,
748  uvsrc_y >> !v->field_mode,
749  s->h_edge_pos >> 1,
750  s->v_edge_pos >> 2);
751  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer + 16,
752  srcV,
753  uvlinesize << 1,
754  uvlinesize << 1,
755  9,
756  v->field_mode ? 9 : 5,
757  uvsrc_x,
758  uvsrc_y >> !v->field_mode,
759  s->h_edge_pos >> 1,
760  s->v_edge_pos >> 2);
761  if (!v->field_mode) {
762  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer + uvlinesize,
763  srcU + uvlinesize,
764  uvlinesize << 1,
765  uvlinesize << 1,
766  9,
767  4,
768  uvsrc_x,
769  uvsrc_y + 1 >> 1,
770  s->h_edge_pos >> 1,
771  s->v_edge_pos >> 2);
772  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer + 16 + uvlinesize,
773  srcV + uvlinesize,
774  uvlinesize << 1,
775  uvlinesize << 1,
776  9,
777  4,
778  uvsrc_x,
779  uvsrc_y + 1 >> 1,
780  s->h_edge_pos >> 1,
781  s->v_edge_pos >> 2);
782  }
783  } else {
784  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer,
785  srcU,
786  uvlinesize,
787  uvlinesize,
788  9,
789  v->field_mode ? 17 : 9,
790  uvsrc_x,
791  v->field_mode ? 2 * uvsrc_y + chroma_ref_type : uvsrc_y,
792  s->h_edge_pos >> 1,
793  s->v_edge_pos >> 1);
794  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer + 16,
795  srcV,
796  uvlinesize,
797  uvlinesize,
798  9,
799  v->field_mode ? 17 : 9,
800  uvsrc_x,
801  v->field_mode ? 2 * uvsrc_y + chroma_ref_type : uvsrc_y,
802  s->h_edge_pos >> 1,
803  s->v_edge_pos >> 1);
804  }
805  srcU = s->sc.edge_emu_buffer;
806  srcV = s->sc.edge_emu_buffer + 16;
807 
808  /* if we deal with range reduction we need to scale source blocks */
809  if (v->rangeredfrm) {
810  vc1_scale_chroma(srcU, srcV, 9, s->uvlinesize);
811  }
812  /* if we deal with intensity compensation we need to scale source blocks */
813  if (use_ic) {
814  vc1_lut_scale_chroma(srcU, srcV,
815  lutuv[v->field_mode ? chroma_ref_type : ((0 + uvsrc_y) & 1)],
816  lutuv[v->field_mode ? chroma_ref_type : ((1 + uvsrc_y) & 1)],
817  9, s->uvlinesize);
818  }
819  }
820 
821  /* Chroma MC always uses qpel bilinear */
822  uvmx = (uvmx & 3) << 1;
823  uvmy = (uvmy & 3) << 1;
824  if (!v->rnd) {
825  h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
826  h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
827  } else {
828  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
829  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
830  }
831  if (v->field_mode) {
832  v->mv_f[dir][s->block_index[4] + v->mb_off] = v->cur_field_type != chroma_ref_type;
833  v->mv_f[dir][s->block_index[5] + v->mb_off] = v->cur_field_type != chroma_ref_type;
834  }
835 }
836 
837 /** Do motion compensation for 4-MV interlaced frame chroma macroblock (both U and V)
838  */
839 void ff_vc1_mc_4mv_chroma4(VC1Context *v, int dir, int dir2, int avg)
840 {
841  MpegEncContext *s = &v->s;
842  H264ChromaContext *h264chroma = &v->h264chroma;
843  uint8_t *srcU, *srcV;
844  int uvsrc_x, uvsrc_y;
845  int uvmx_field[4], uvmy_field[4];
846  int i, off, tx, ty;
847  int fieldmv = v->blk_mv_type[s->block_index[0]];
848  static const uint8_t s_rndtblfield[16] = { 0, 0, 1, 2, 4, 4, 5, 6, 2, 2, 3, 8, 6, 6, 7, 12 };
849  int v_dist = fieldmv ? 1 : 4; // vertical offset for lower sub-blocks
850  int v_edge_pos = s->v_edge_pos >> 1;
851  int use_ic;
852  int interlace;
853  int uvlinesize;
854  uint8_t (*lutuv)[256];
855 
856  if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY)
857  return;
858 
859  uvlinesize = s->current_picture_ptr->f->linesize[1];
860 
861  for (i = 0; i < 4; i++) {
862  int d = i < 2 ? dir: dir2;
863  tx = s->mv[d][i][0];
864  uvmx_field[i] = (tx + ((tx & 3) == 3)) >> 1;
865  ty = s->mv[d][i][1];
866  if (fieldmv)
867  uvmy_field[i] = (ty >> 4) * 8 + s_rndtblfield[ty & 0xF];
868  else
869  uvmy_field[i] = (ty + ((ty & 3) == 3)) >> 1;
870  }
871 
872  for (i = 0; i < 4; i++) {
873  off = (i & 1) * 4 + ((i & 2) ? v_dist * s->uvlinesize : 0);
874  uvsrc_x = s->mb_x * 8 + (i & 1) * 4 + (uvmx_field[i] >> 2);
875  uvsrc_y = s->mb_y * 8 + ((i & 2) ? v_dist : 0) + (uvmy_field[i] >> 2);
876  // FIXME: implement proper pull-back (see vc1cropmv.c, vc1CROPMV_ChromaPullBack())
877  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
878  if (v->fcm == ILACE_FRAME)
879  uvsrc_y = av_clip(uvsrc_y, -8 + (uvsrc_y & 1), (s->avctx->coded_height >> 1) + (uvsrc_y & 1));
880  else
881  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
882  if (i < 2 ? dir : dir2) {
883  srcU = s->next_picture.f->data[1];
884  srcV = s->next_picture.f->data[2];
885  lutuv = v->next_lutuv;
886  use_ic = v->next_use_ic;
887  interlace = !!(s->next_picture.f->flags & AV_FRAME_FLAG_INTERLACED);
888  } else {
889  srcU = s->last_picture.f->data[1];
890  srcV = s->last_picture.f->data[2];
891  lutuv = v->last_lutuv;
892  use_ic = v->last_use_ic;
893  interlace = !!(s->last_picture.f->flags & AV_FRAME_FLAG_INTERLACED);
894  }
895  if (!srcU)
896  return;
897  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
898  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
899  uvmx_field[i] = (uvmx_field[i] & 3) << 1;
900  uvmy_field[i] = (uvmy_field[i] & 3) << 1;
901 
902  if (use_ic
903  || s->h_edge_pos < 10 || v_edge_pos < (5 << fieldmv)
904  || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 5
905  || (unsigned)uvsrc_y > v_edge_pos - (5 << fieldmv)) {
906  if (interlace) {
907  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer,
908  srcU,
909  uvlinesize << 1,
910  uvlinesize << 1,
911  5,
912  (5 << fieldmv) + 1 >> 1,
913  uvsrc_x,
914  uvsrc_y >> 1,
915  s->h_edge_pos >> 1,
916  s->v_edge_pos >> 2);
917  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer + 16,
918  srcV,
919  uvlinesize << 1,
920  uvlinesize << 1,
921  5,
922  (5 << fieldmv) + 1 >> 1,
923  uvsrc_x,
924  uvsrc_y >> 1,
925  s->h_edge_pos >> 1,
926  s->v_edge_pos >> 2);
927  if (!fieldmv) {
928  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer + uvlinesize,
929  srcU + uvlinesize,
930  uvlinesize << 1,
931  uvlinesize << 1,
932  5,
933  2,
934  uvsrc_x,
935  uvsrc_y + 1 >> 1,
936  s->h_edge_pos >> 1,
937  s->v_edge_pos >> 2);
938  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer + 16 + uvlinesize,
939  srcV + uvlinesize,
940  uvlinesize << 1,
941  uvlinesize << 1,
942  5,
943  2,
944  uvsrc_x,
945  uvsrc_y + 1 >> 1,
946  s->h_edge_pos >> 1,
947  s->v_edge_pos >> 2);
948  }
949  } else {
950  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer,
951  srcU,
952  uvlinesize,
953  uvlinesize,
954  5,
955  5 << fieldmv,
956  uvsrc_x,
957  uvsrc_y,
958  s->h_edge_pos >> 1,
959  s->v_edge_pos >> 1);
960  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer + 16,
961  srcV,
962  uvlinesize,
963  uvlinesize,
964  5,
965  5 << fieldmv,
966  uvsrc_x,
967  uvsrc_y,
968  s->h_edge_pos >> 1,
969  s->v_edge_pos >> 1);
970  }
971  srcU = s->sc.edge_emu_buffer;
972  srcV = s->sc.edge_emu_buffer + 16;
973 
974  /* if we deal with intensity compensation we need to scale source blocks */
975  if (use_ic) {
976  vc1_lut_scale_chroma(srcU, srcV,
977  lutuv[(uvsrc_y + (0 << fieldmv)) & 1],
978  lutuv[(uvsrc_y + (1 << fieldmv)) & 1],
979  5, s->uvlinesize << fieldmv);
980  }
981  }
982  if (avg) {
983  if (!v->rnd) {
984  h264chroma->avg_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
985  h264chroma->avg_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
986  } else {
987  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
988  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
989  }
990  } else {
991  if (!v->rnd) {
992  h264chroma->put_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
993  h264chroma->put_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
994  } else {
995  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
996  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
997  }
998  }
999  }
1000 }
1001 
1002 /** Motion compensation for direct or interpolated blocks in B-frames
1003  */
1005 {
1006  MpegEncContext *s = &v->s;
1007  H264ChromaContext *h264chroma = &v->h264chroma;
1008  uint8_t *srcY, *srcU, *srcV;
1009  int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
1010  int v_edge_pos = s->v_edge_pos >> v->field_mode;
1011  int use_ic = v->next_use_ic;
1012  int interlace;
1013  int linesize, uvlinesize;
1014 
1015  if (!v->field_mode && !v->s.next_picture.f->data[0])
1016  return;
1017 
1018  linesize = s->current_picture_ptr->f->linesize[0];
1019  uvlinesize = s->current_picture_ptr->f->linesize[1];
1020 
1021  mx = s->mv[1][0][0];
1022  my = s->mv[1][0][1];
1023  uvmx = (mx + ((mx & 3) == 3)) >> 1;
1024  uvmy = (my + ((my & 3) == 3)) >> 1;
1025  if (v->field_mode && v->cur_field_type != v->ref_field_type[1]) {
1026  my = my - 2 + 4 * v->cur_field_type;
1027  uvmy = uvmy - 2 + 4 * v->cur_field_type;
1028  }
1029  if (v->fastuvmc) {
1030  uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
1031  uvmy = uvmy + ((uvmy < 0) ? -(uvmy & 1) : (uvmy & 1));
1032  }
1033  srcY = s->next_picture.f->data[0];
1034  srcU = s->next_picture.f->data[1];
1035  srcV = s->next_picture.f->data[2];
1036 
1037  interlace = !!(s->next_picture.f->flags & AV_FRAME_FLAG_INTERLACED);
1038 
1039  src_x = s->mb_x * 16 + (mx >> 2);
1040  src_y = s->mb_y * 16 + (my >> 2);
1041  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1042  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1043 
1044  if (v->profile != PROFILE_ADVANCED) {
1045  src_x = av_clip( src_x, -16, s->mb_width * 16);
1046  src_y = av_clip( src_y, -16, s->mb_height * 16);
1047  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
1048  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
1049  } else {
1050  src_x = av_clip( src_x, -17, s->avctx->coded_width);
1051  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1052  if (v->fcm == ILACE_FRAME) {
1053  src_y = av_clip(src_y, -18 + (src_y & 1), s->avctx->coded_height + (src_y & 1));
1054  uvsrc_y = av_clip(uvsrc_y, -8 + (uvsrc_y & 1), (s->avctx->coded_height >> 1) + (uvsrc_y & 1));
1055  } else {
1056  src_y = av_clip(src_y, -18, s->avctx->coded_height + 1);
1057  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1058  }
1059  }
1060 
1061  srcY += src_y * s->linesize + src_x;
1062  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
1063  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
1064 
1065  if (v->field_mode && v->ref_field_type[1]) {
1066  srcY += linesize;
1067  srcU += uvlinesize;
1068  srcV += uvlinesize;
1069  }
1070 
1071  /* for grayscale we should not try to read from unknown area */
1072  if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY) {
1073  srcU = s->sc.edge_emu_buffer + 18 * s->linesize;
1074  srcV = s->sc.edge_emu_buffer + 18 * s->linesize;
1075  }
1076 
1077  if (v->rangeredfrm || s->h_edge_pos < 22 || v_edge_pos < 22 || use_ic
1078  || (unsigned)(src_x - 1) > s->h_edge_pos - (mx & 3) - 16 - 3
1079  || (unsigned)(src_y - 1) > v_edge_pos - (my & 3) - 16 - 3) {
1080  uint8_t *ubuf = s->sc.edge_emu_buffer + 19 * s->linesize;
1081  uint8_t *vbuf = ubuf + 9 * s->uvlinesize;
1082  const int k = 17 + s->mspel * 2;
1083 
1084  srcY -= s->mspel * (1 + s->linesize);
1085  if (interlace) {
1086  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer,
1087  srcY,
1088  linesize << 1,
1089  linesize << 1,
1090  k,
1091  v->field_mode ? k : (k + 1 >> 1),
1092  src_x - s->mspel,
1093  src_y - s->mspel >> !v->field_mode,
1094  s->h_edge_pos,
1095  s->v_edge_pos >> 1);
1096  if (!v->field_mode)
1097  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer + linesize,
1098  srcY + linesize,
1099  linesize << 1,
1100  linesize << 1,
1101  k,
1102  k >> 1,
1103  src_x - s->mspel,
1104  src_y - s->mspel + 1 >> 1,
1105  s->h_edge_pos,
1106  s->v_edge_pos >> 1);
1107  } else
1108  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer,
1109  srcY,
1110  linesize,
1111  linesize,
1112  k,
1113  v->field_mode ? (k << 1) - 1 : k,
1114  src_x - s->mspel,
1115  v->field_mode ? 2 * (src_y - s->mspel) + v->ref_field_type[1] :
1116  src_y - s->mspel,
1117  s->h_edge_pos,
1118  s->v_edge_pos);
1119  srcY = s->sc.edge_emu_buffer;
1120  if (interlace) {
1121  s->vdsp.emulated_edge_mc(ubuf,
1122  srcU,
1123  uvlinesize << 1,
1124  uvlinesize << 1,
1125  9,
1126  v->field_mode ? 9 : 5,
1127  uvsrc_x,
1128  uvsrc_y >> !v->field_mode,
1129  s->h_edge_pos >> 1,
1130  s->v_edge_pos >> 2);
1131  s->vdsp.emulated_edge_mc(vbuf,
1132  srcV,
1133  uvlinesize << 1,
1134  uvlinesize << 1,
1135  9,
1136  v->field_mode ? 9 : 5,
1137  uvsrc_x,
1138  uvsrc_y >> !v->field_mode,
1139  s->h_edge_pos >> 1,
1140  s->v_edge_pos >> 2);
1141  if (!v->field_mode) {
1142  s->vdsp.emulated_edge_mc(ubuf + uvlinesize,
1143  srcU + uvlinesize,
1144  uvlinesize << 1,
1145  uvlinesize << 1,
1146  9,
1147  4,
1148  uvsrc_x,
1149  uvsrc_y + 1 >> 1,
1150  s->h_edge_pos >> 1,
1151  s->v_edge_pos >> 2);
1152  s->vdsp.emulated_edge_mc(vbuf + uvlinesize,
1153  srcV + uvlinesize,
1154  uvlinesize << 1,
1155  uvlinesize << 1,
1156  9,
1157  4,
1158  uvsrc_x,
1159  uvsrc_y + 1 >> 1,
1160  s->h_edge_pos >> 1,
1161  s->v_edge_pos >> 2);
1162  }
1163  } else {
1164  s->vdsp.emulated_edge_mc(ubuf,
1165  srcU,
1166  uvlinesize,
1167  uvlinesize,
1168  9,
1169  v->field_mode ? 17 : 9,
1170  uvsrc_x,
1171  v->field_mode ? 2 * uvsrc_y + v->ref_field_type[1] : uvsrc_y,
1172  s->h_edge_pos >> 1,
1173  s->v_edge_pos >> 1);
1174  s->vdsp.emulated_edge_mc(vbuf,
1175  srcV,
1176  uvlinesize,
1177  uvlinesize,
1178  9,
1179  v->field_mode ? 17 : 9,
1180  uvsrc_x,
1181  v->field_mode ? 2 * uvsrc_y + v->ref_field_type[1] : uvsrc_y,
1182  s->h_edge_pos >> 1,
1183  s->v_edge_pos >> 1);
1184  }
1185  srcU = ubuf;
1186  srcV = vbuf;
1187  /* if we deal with range reduction we need to scale source blocks */
1188  if (v->rangeredfrm) {
1189  vc1_scale_luma(srcY, k, s->linesize);
1190  vc1_scale_chroma(srcU, srcV, 9, s->uvlinesize);
1191  }
1192 
1193  if (use_ic) {
1194  uint8_t (*luty )[256] = v->next_luty;
1195  uint8_t (*lutuv)[256] = v->next_lutuv;
1196  vc1_lut_scale_luma(srcY,
1197  luty[v->field_mode ? v->ref_field_type[1] : ((0+src_y - s->mspel) & 1)],
1198  luty[v->field_mode ? v->ref_field_type[1] : ((1+src_y - s->mspel) & 1)],
1199  k, s->linesize);
1200  vc1_lut_scale_chroma(srcU, srcV,
1201  lutuv[v->field_mode ? v->ref_field_type[1] : ((0+uvsrc_y) & 1)],
1202  lutuv[v->field_mode ? v->ref_field_type[1] : ((1+uvsrc_y) & 1)],
1203  9, s->uvlinesize);
1204  }
1205  srcY += s->mspel * (1 + s->linesize);
1206  }
1207 
1208  if (s->mspel) {
1209  dxy = ((my & 3) << 2) | (mx & 3);
1210  v->vc1dsp.avg_vc1_mspel_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, v->rnd);
1211  } else { // hpel mc
1212  dxy = (my & 2) | ((mx & 2) >> 1);
1213 
1214  if (!v->rnd)
1215  s->hdsp.avg_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
1216  else
1217  s->hdsp.avg_no_rnd_pixels_tab[dxy](s->dest[0], srcY, s->linesize, 16);
1218  }
1219 
1220  if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY)
1221  return;
1222  /* Chroma MC always uses qpel bilinear */
1223  uvmx = (uvmx & 3) << 1;
1224  uvmy = (uvmy & 3) << 1;
1225  if (!v->rnd) {
1226  h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
1227  h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
1228  } else {
1229  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
1230  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
1231  }
1232 }
av_clip
#define av_clip
Definition: common.h:96
VC1Context::next_luty
uint8_t next_luty[2][256]
Definition: vc1.h:293
VC1Context
The VC1 Context.
Definition: vc1.h:173
vc1_lut_scale_luma
static av_always_inline void vc1_lut_scale_luma(uint8_t *srcY, uint8_t *lut1, uint8_t *lut2, int k, int linesize)
Definition: vc1_mc.c:60
VC1Context::curr_luty
uint8_t(* curr_luty)[256]
Definition: vc1.h:294
vc1.h
ILACE_FRAME
@ ILACE_FRAME
in the bitstream is reported as 10b
Definition: vc1.h:150
VC1DSPContext::avg_vc1_mspel_pixels_tab
vc1op_pixels_func avg_vc1_mspel_pixels_tab[2][16]
Definition: vc1dsp.h:60
VC1DSPContext::avg_no_rnd_vc1_chroma_pixels_tab
h264_chroma_mc_func avg_no_rnd_vc1_chroma_pixels_tab[3]
Definition: vc1dsp.h:64
MpegEncContext::next_picture
Picture next_picture
copy of the next picture structure.
Definition: mpegvideo.h:157
VC1Context::fastuvmc
int fastuvmc
Rounding of qpel vector to hpel ? (not in Simple)
Definition: vc1.h:220
popcount4
static const uint8_t popcount4[16]
Definition: vc1_mc.c:106
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:85
VC1DSPContext::put_no_rnd_vc1_chroma_pixels_tab
h264_chroma_mc_func put_no_rnd_vc1_chroma_pixels_tab[3]
Definition: vc1dsp.h:63
VC1Context::luma_mv
int16_t((* luma_mv)[2]
Definition: vc1.h:390
H264ChromaContext::avg_h264_chroma_pixels_tab
h264_chroma_mc_func avg_h264_chroma_pixels_tab[4]
Definition: h264chroma.h:29
VC1Context::last_use_ic
int last_use_ic
Definition: vc1.h:295
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
VC1Context::last_lutuv
uint8_t last_lutuv[2][256]
lookup tables used for intensity compensation
Definition: vc1.h:291
get_luma_mv
static av_always_inline int get_luma_mv(VC1Context *v, int dir, int16_t *tx, int16_t *ty)
Definition: vc1_mc.c:108
median4
#define median4
Definition: mathops.h:117
ff_vc1_mc_4mv_luma
void ff_vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
Do motion compensation for 4-MV macroblock - luminance block.
Definition: vc1_mc.c:452
VC1Context::numref
int numref
number of past field pictures used as reference
Definition: vc1.h:353
vc1_scale_luma
static av_always_inline void vc1_scale_luma(uint8_t *srcY, int k, int linesize)
Definition: vc1_mc.c:35
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
ff_vc1_interp_mc
void ff_vc1_interp_mc(VC1Context *v)
Motion compensation for direct or interpolated blocks in B-frames.
Definition: vc1_mc.c:1004
width
#define width
VC1Context::mb_type
uint8_t * mb_type[3]
Definition: vc1.h:262
s
#define s(width, name)
Definition: cbs_vp9.c:198
VC1Context::mv_f
uint8_t * mv_f[2]
0: MV obtained from same field, 1: opposite field
Definition: vc1.h:347
PROFILE_ADVANCED
@ PROFILE_ADVANCED
Definition: vc1_common.h:52
VC1Context::last_luty
uint8_t last_luty[2][256]
Definition: vc1.h:291
VC1Context::rnd
int rnd
rounding control
Definition: vc1.h:296
VC1Context::field_mode
int field_mode
1 for interlaced field pictures
Definition: vc1.h:349
mathops.h
VC1Context::vc1dsp
VC1DSPContext vc1dsp
Definition: vc1.h:177
VC1Context::curr_lutuv
uint8_t((* curr_lutuv)[256]
Definition: vc1.h:294
H264ChromaContext::put_h264_chroma_pixels_tab
h264_chroma_mc_func put_h264_chroma_pixels_tab[4]
Definition: h264chroma.h:28
VC1Context::h264chroma
H264ChromaContext h264chroma
Definition: vc1.h:176
f
f
Definition: af_crystalizer.c:121
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:318
VC1Context::mb_off
int mb_off
Definition: vc1.h:361
vc1_scale_chroma
static av_always_inline void vc1_scale_chroma(uint8_t *srcU, uint8_t *srcV, int k, int uvlinesize)
Definition: vc1_mc.c:46
get_chroma_mv
static av_always_inline int get_chroma_mv(VC1Context *v, int dir, int16_t *tx, int16_t *ty)
Definition: vc1_mc.c:140
h264chroma.h
VC1Context::rangeredfrm
uint8_t rangeredfrm
Frame decoding info for S/M profiles only.
Definition: vc1.h:301
avg
#define avg(a, b, c, d)
Definition: colorspacedsp_template.c:28
VC1Context::next_use_ic
int next_use_ic
Definition: vc1.h:295
height
#define height
ff_vc1_mc_1mv
void ff_vc1_mc_1mv(VC1Context *v, int dir)
Do motion compensation over 1 macroblock Mostly adapted hpel_motion and qpel_motion from mpegvideo....
Definition: vc1_mc.c:172
ff_vc1_mc_4mv_chroma4
void ff_vc1_mc_4mv_chroma4(VC1Context *v, int dir, int dir2, int avg)
Do motion compensation for 4-MV interlaced frame chroma macroblock (both U and V)
Definition: vc1_mc.c:839
VC1Context::cur_field_type
int cur_field_type
0: top, 1: bottom
Definition: vc1.h:359
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
av_always_inline
#define av_always_inline
Definition: attributes.h:49
VC1Context::s
MpegEncContext s
Definition: vc1.h:174
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:636
avcodec.h
mid_pred
#define mid_pred
Definition: mathops.h:98
VC1Context::second_field
int second_field
Definition: vc1.h:351
VC1Context::ref_field_type
int ref_field_type[2]
forward and backward reference field type (top or bottom)
Definition: vc1.h:360
VC1DSPContext::put_vc1_mspel_pixels_tab
vc1op_pixels_func put_vc1_mspel_pixels_tab[2][16]
Definition: vc1dsp.h:59
Picture::f
struct AVFrame * f
Definition: mpegpicture.h:47
VC1Context::profile
int profile
Sequence header data for all Profiles TODO: choose between ints, uint8_ts and monobit flags.
Definition: vc1.h:216
vc1_lut_scale_chroma
static av_always_inline void vc1_lut_scale_chroma(uint8_t *srcU, uint8_t *srcV, uint8_t *lut1, uint8_t *lut2, int k, int uvlinesize)
Definition: vc1_mc.c:80
MpegEncContext::last_picture
Picture last_picture
copy of the previous picture structure.
Definition: mpegvideo.h:151
VC1Context::next_lutuv
uint8_t next_lutuv[2][256]
lookup tables used for intensity compensation
Definition: vc1.h:293
VC1Context::fcm
enum FrameCodingMode fcm
Frame decoding info for Advanced profile.
Definition: vc1.h:307
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
VC1Context::curr_use_ic
int * curr_use_ic
Definition: vc1.h:295
H264ChromaContext
Definition: h264chroma.h:27
d
d
Definition: ffmpeg_filter.c:368
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
VC1Context::blk_mv_type
uint8_t * blk_mv_type
0: frame MV, 1: field MV (interlaced frame)
Definition: vc1.h:346
VC1Context::blocks_off
int blocks_off
Definition: vc1.h:361
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:67
ff_vc1_mc_4mv_chroma
void ff_vc1_mc_4mv_chroma(VC1Context *v, int dir)
Do motion compensation for 4-MV macroblock - both chroma blocks.
Definition: vc1_mc.c:634