FFmpeg
vc1_pred.c
Go to the documentation of this file.
1 /*
2  * VC-1 and WMV3 decoder
3  * Copyright (c) 2011 Mashiat Sarker Shakkhar
4  * Copyright (c) 2006-2007 Konstantin Shishkov
5  * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 /**
25  * @file
26  * VC-1 and WMV3 block decoding routines
27  */
28 
29 #include "mathops.h"
30 #include "mpegutils.h"
31 #include "mpegvideo.h"
32 #include "vc1.h"
33 #include "vc1_pred.h"
34 #include "vc1data.h"
35 
36 static av_always_inline int scaleforsame_x(VC1Context *v, int n /* MV */, int dir)
37 {
38  int scaledvalue, refdist;
39  int scalesame1, scalesame2;
40  int scalezone1_x, zone1offset_x;
41  int table_index = dir ^ v->second_field;
42 
43  if (v->s.pict_type != AV_PICTURE_TYPE_B)
44  refdist = v->refdist;
45  else
46  refdist = dir ? v->brfd : v->frfd;
47  if (refdist > 3)
48  refdist = 3;
49  scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
50  scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
51  scalezone1_x = ff_vc1_field_mvpred_scales[table_index][3][refdist];
52  zone1offset_x = ff_vc1_field_mvpred_scales[table_index][5][refdist];
53 
54  if (FFABS(n) > 255)
55  scaledvalue = n;
56  else {
57  if (FFABS(n) < scalezone1_x)
58  scaledvalue = (n * scalesame1) >> 8;
59  else {
60  if (n < 0)
61  scaledvalue = ((n * scalesame2) >> 8) - zone1offset_x;
62  else
63  scaledvalue = ((n * scalesame2) >> 8) + zone1offset_x;
64  }
65  }
66  return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
67 }
68 
69 static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n /* MV */, int dir)
70 {
71  int scaledvalue, refdist;
72  int scalesame1, scalesame2;
73  int scalezone1_y, zone1offset_y;
74  int table_index = dir ^ v->second_field;
75 
76  if (v->s.pict_type != AV_PICTURE_TYPE_B)
77  refdist = v->refdist;
78  else
79  refdist = dir ? v->brfd : v->frfd;
80  if (refdist > 3)
81  refdist = 3;
82  scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
83  scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
84  scalezone1_y = ff_vc1_field_mvpred_scales[table_index][4][refdist];
85  zone1offset_y = ff_vc1_field_mvpred_scales[table_index][6][refdist];
86 
87  if (FFABS(n) > 63)
88  scaledvalue = n;
89  else {
90  if (FFABS(n) < scalezone1_y)
91  scaledvalue = (n * scalesame1) >> 8;
92  else {
93  if (n < 0)
94  scaledvalue = ((n * scalesame2) >> 8) - zone1offset_y;
95  else
96  scaledvalue = ((n * scalesame2) >> 8) + zone1offset_y;
97  }
98  }
99 
100  if (v->cur_field_type && !v->ref_field_type[dir])
101  return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
102  else
103  return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
104 }
105 
106 static av_always_inline int scaleforopp_x(VC1Context *v, int n /* MV */)
107 {
108  int scalezone1_x, zone1offset_x;
109  int scaleopp1, scaleopp2, brfd;
110  int scaledvalue;
111 
112  brfd = FFMIN(v->brfd, 3);
113  scalezone1_x = ff_vc1_b_field_mvpred_scales[3][brfd];
114  zone1offset_x = ff_vc1_b_field_mvpred_scales[5][brfd];
115  scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
116  scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
117 
118  if (FFABS(n) > 255)
119  scaledvalue = n;
120  else {
121  if (FFABS(n) < scalezone1_x)
122  scaledvalue = (n * scaleopp1) >> 8;
123  else {
124  if (n < 0)
125  scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_x;
126  else
127  scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_x;
128  }
129  }
130  return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
131 }
132 
133 static av_always_inline int scaleforopp_y(VC1Context *v, int n /* MV */, int dir)
134 {
135  int scalezone1_y, zone1offset_y;
136  int scaleopp1, scaleopp2, brfd;
137  int scaledvalue;
138 
139  brfd = FFMIN(v->brfd, 3);
140  scalezone1_y = ff_vc1_b_field_mvpred_scales[4][brfd];
141  zone1offset_y = ff_vc1_b_field_mvpred_scales[6][brfd];
142  scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
143  scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
144 
145  if (FFABS(n) > 63)
146  scaledvalue = n;
147  else {
148  if (FFABS(n) < scalezone1_y)
149  scaledvalue = (n * scaleopp1) >> 8;
150  else {
151  if (n < 0)
152  scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_y;
153  else
154  scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_y;
155  }
156  }
157  if (v->cur_field_type && !v->ref_field_type[dir]) {
158  return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
159  } else {
160  return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
161  }
162 }
163 
164 static av_always_inline int scaleforsame(VC1Context *v, int i, int n /* MV */,
165  int dim, int dir)
166 {
167  int brfd, scalesame;
168  int hpel = 1 - v->s.quarter_sample;
169 
170  n >>= hpel;
171  if (v->s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) {
172  if (dim)
173  n = scaleforsame_y(v, i, n, dir) * (1 << hpel);
174  else
175  n = scaleforsame_x(v, n, dir) * (1 << hpel);
176  return n;
177  }
178  brfd = FFMIN(v->brfd, 3);
179  scalesame = ff_vc1_b_field_mvpred_scales[0][brfd];
180 
181  n = (n * scalesame >> 8) * (1 << hpel);
182  return n;
183 }
184 
185 static av_always_inline int scaleforopp(VC1Context *v, int n /* MV */,
186  int dim, int dir)
187 {
188  int refdist, scaleopp;
189  int hpel = 1 - v->s.quarter_sample;
190 
191  n >>= hpel;
192  if (v->s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) {
193  if (dim)
194  n = scaleforopp_y(v, n, dir) * (1 << hpel);
195  else
196  n = scaleforopp_x(v, n) * (1 << hpel);
197  return n;
198  }
199  if (v->s.pict_type != AV_PICTURE_TYPE_B)
200  refdist = v->refdist;
201  else
202  refdist = dir ? v->brfd : v->frfd;
203  refdist = FFMIN(refdist, 3);
204  scaleopp = ff_vc1_field_mvpred_scales[dir ^ v->second_field][0][refdist];
205 
206  n = (n * scaleopp >> 8) * (1 << hpel);
207  return n;
208 }
209 
210 /** Predict and set motion vector
211  */
212 void ff_vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
213  int mv1, int r_x, int r_y, uint8_t* is_intra,
214  int pred_flag, int dir)
215 {
216  MpegEncContext *s = &v->s;
217  int xy, wrap, off = 0;
218  int16_t *A, *B, *C;
219  int px, py;
220  int sum;
221  int mixedmv_pic, num_samefield = 0, num_oppfield = 0;
222  int opposite, a_f, b_f, c_f;
223  int16_t field_predA[2];
224  int16_t field_predB[2];
225  int16_t field_predC[2];
226  int a_valid, b_valid, c_valid;
227  int hybridmv_thresh, y_bias = 0;
228 
229  if (v->mv_mode == MV_PMODE_MIXED_MV ||
231  mixedmv_pic = 1;
232  else
233  mixedmv_pic = 0;
234  /* scale MV difference to be quad-pel */
235  if (!s->quarter_sample) {
236  dmv_x *= 2;
237  dmv_y *= 2;
238  }
239 
240  wrap = s->b8_stride;
241  xy = s->block_index[n];
242 
243  if (s->mb_intra) {
244  s->mv[0][n][0] = s->current_picture.motion_val[0][xy + v->blocks_off][0] = 0;
245  s->mv[0][n][1] = s->current_picture.motion_val[0][xy + v->blocks_off][1] = 0;
246  s->current_picture.motion_val[1][xy + v->blocks_off][0] = 0;
247  s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0;
248  if (mv1) { /* duplicate motion data for 1-MV block */
249  s->current_picture.motion_val[0][xy + 1 + v->blocks_off][0] = 0;
250  s->current_picture.motion_val[0][xy + 1 + v->blocks_off][1] = 0;
251  s->current_picture.motion_val[0][xy + wrap + v->blocks_off][0] = 0;
252  s->current_picture.motion_val[0][xy + wrap + v->blocks_off][1] = 0;
253  s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
254  s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
255  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
256  s->current_picture.motion_val[1][xy + 1 + v->blocks_off][0] = 0;
257  s->current_picture.motion_val[1][xy + 1 + v->blocks_off][1] = 0;
258  s->current_picture.motion_val[1][xy + wrap + v->blocks_off][0] = 0;
259  s->current_picture.motion_val[1][xy + wrap + v->blocks_off][1] = 0;
260  s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
261  s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
262  }
263  return;
264  }
265 
266  a_valid = !s->first_slice_line || (n == 2 || n == 3);
267  b_valid = a_valid;
268  c_valid = s->mb_x || (n == 1 || n == 3);
269  if (mv1) {
270  if (v->field_mode && mixedmv_pic)
271  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
272  else
273  off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
274  b_valid = b_valid && s->mb_width > 1;
275  } else {
276  //in 4-MV mode different blocks have different B predictor position
277  switch (n) {
278  case 0:
279  if (v->res_rtm_flag)
280  off = s->mb_x ? -1 : 1;
281  else
282  off = s->mb_x ? -1 : 2 * s->mb_width - wrap - 1;
283  break;
284  case 1:
285  off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
286  break;
287  case 2:
288  off = 1;
289  break;
290  case 3:
291  off = -1;
292  }
293  if (v->field_mode && s->mb_width == 1)
294  b_valid = b_valid && c_valid;
295  }
296 
297  if (v->field_mode) {
298  a_valid = a_valid && !is_intra[xy - wrap];
299  b_valid = b_valid && !is_intra[xy - wrap + off];
300  c_valid = c_valid && !is_intra[xy - 1];
301  }
302 
303  if (a_valid) {
304  A = s->current_picture.motion_val[dir][xy - wrap + v->blocks_off];
305  a_f = v->mv_f[dir][xy - wrap + v->blocks_off];
306  num_oppfield += a_f;
307  num_samefield += 1 - a_f;
308  field_predA[0] = A[0];
309  field_predA[1] = A[1];
310  } else {
311  field_predA[0] = field_predA[1] = 0;
312  a_f = 0;
313  }
314  if (b_valid) {
315  B = s->current_picture.motion_val[dir][xy - wrap + off + v->blocks_off];
316  b_f = v->mv_f[dir][xy - wrap + off + v->blocks_off];
317  num_oppfield += b_f;
318  num_samefield += 1 - b_f;
319  field_predB[0] = B[0];
320  field_predB[1] = B[1];
321  } else {
322  field_predB[0] = field_predB[1] = 0;
323  b_f = 0;
324  }
325  if (c_valid) {
326  C = s->current_picture.motion_val[dir][xy - 1 + v->blocks_off];
327  c_f = v->mv_f[dir][xy - 1 + v->blocks_off];
328  num_oppfield += c_f;
329  num_samefield += 1 - c_f;
330  field_predC[0] = C[0];
331  field_predC[1] = C[1];
332  } else {
333  field_predC[0] = field_predC[1] = 0;
334  c_f = 0;
335  }
336 
337  if (v->field_mode) {
338  if (!v->numref)
339  // REFFIELD determines if the last field or the second-last field is
340  // to be used as reference
341  opposite = 1 - v->reffield;
342  else {
343  if (num_samefield <= num_oppfield)
344  opposite = 1 - pred_flag;
345  else
346  opposite = pred_flag;
347  }
348  } else
349  opposite = 0;
350  if (opposite) {
351  v->mv_f[dir][xy + v->blocks_off] = 1;
352  v->ref_field_type[dir] = !v->cur_field_type;
353  if (a_valid && !a_f) {
354  field_predA[0] = scaleforopp(v, field_predA[0], 0, dir);
355  field_predA[1] = scaleforopp(v, field_predA[1], 1, dir);
356  }
357  if (b_valid && !b_f) {
358  field_predB[0] = scaleforopp(v, field_predB[0], 0, dir);
359  field_predB[1] = scaleforopp(v, field_predB[1], 1, dir);
360  }
361  if (c_valid && !c_f) {
362  field_predC[0] = scaleforopp(v, field_predC[0], 0, dir);
363  field_predC[1] = scaleforopp(v, field_predC[1], 1, dir);
364  }
365  } else {
366  v->mv_f[dir][xy + v->blocks_off] = 0;
367  v->ref_field_type[dir] = v->cur_field_type;
368  if (a_valid && a_f) {
369  field_predA[0] = scaleforsame(v, n, field_predA[0], 0, dir);
370  field_predA[1] = scaleforsame(v, n, field_predA[1], 1, dir);
371  }
372  if (b_valid && b_f) {
373  field_predB[0] = scaleforsame(v, n, field_predB[0], 0, dir);
374  field_predB[1] = scaleforsame(v, n, field_predB[1], 1, dir);
375  }
376  if (c_valid && c_f) {
377  field_predC[0] = scaleforsame(v, n, field_predC[0], 0, dir);
378  field_predC[1] = scaleforsame(v, n, field_predC[1], 1, dir);
379  }
380  }
381 
382  if (a_valid) {
383  px = field_predA[0];
384  py = field_predA[1];
385  } else if (c_valid) {
386  px = field_predC[0];
387  py = field_predC[1];
388  } else if (b_valid) {
389  px = field_predB[0];
390  py = field_predB[1];
391  } else {
392  px = 0;
393  py = 0;
394  }
395 
396  if (num_samefield + num_oppfield > 1) {
397  px = mid_pred(field_predA[0], field_predB[0], field_predC[0]);
398  py = mid_pred(field_predA[1], field_predB[1], field_predC[1]);
399  }
400 
401  /* Pullback MV as specified in 8.3.5.3.4 */
402  if (!v->field_mode) {
403  int qx, qy, X, Y;
404  int MV = mv1 ? -60 : -28;
405  qx = (s->mb_x << 6) + ((n == 1 || n == 3) ? 32 : 0);
406  qy = (s->mb_y << 6) + ((n == 2 || n == 3) ? 32 : 0);
407  X = (s->mb_width << 6) - 4;
408  Y = (s->mb_height << 6) - 4;
409  if (qx + px < MV) px = MV - qx;
410  if (qy + py < MV) py = MV - qy;
411  if (qx + px > X) px = X - qx;
412  if (qy + py > Y) py = Y - qy;
413  }
414 
415  if (!v->field_mode || s->pict_type != AV_PICTURE_TYPE_B) {
416  /* Calculate hybrid prediction as specified in 8.3.5.3.5 (also 10.3.5.4.3.5) */
417  hybridmv_thresh = 32;
418  if (a_valid && c_valid) {
419  if (is_intra[xy - wrap])
420  sum = FFABS(px) + FFABS(py);
421  else
422  sum = FFABS(px - field_predA[0]) + FFABS(py - field_predA[1]);
423  if (sum > hybridmv_thresh) {
424  if (get_bits1(&s->gb)) { // read HYBRIDPRED bit
425  px = field_predA[0];
426  py = field_predA[1];
427  } else {
428  px = field_predC[0];
429  py = field_predC[1];
430  }
431  } else {
432  if (is_intra[xy - 1])
433  sum = FFABS(px) + FFABS(py);
434  else
435  sum = FFABS(px - field_predC[0]) + FFABS(py - field_predC[1]);
436  if (sum > hybridmv_thresh) {
437  if (get_bits1(&s->gb)) {
438  px = field_predA[0];
439  py = field_predA[1];
440  } else {
441  px = field_predC[0];
442  py = field_predC[1];
443  }
444  }
445  }
446  }
447  }
448 
449  if (v->field_mode && v->numref)
450  r_y >>= 1;
451  if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0)
452  y_bias = 1;
453  /* store MV using signed modulus of MV range defined in 4.11 */
454  s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
455  s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
456  if (mv1) { /* duplicate motion data for 1-MV block */
457  s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
458  s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
459  s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
460  s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
461  s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
462  s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
463  v->mv_f[dir][xy + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
464  v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
465  }
466 }
467 
468 /** Predict and set motion vector for interlaced frame picture MBs
469  */
470 void ff_vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
471  int mvn, int r_x, int r_y, int dir)
472 {
473  MpegEncContext *s = &v->s;
474  int xy, wrap, off = 0;
475  int A[2], B[2], C[2];
476  int px = 0, py = 0;
477  int a_valid = 0, b_valid = 0, c_valid = 0;
478  int field_a, field_b, field_c; // 0: same, 1: opposite
479  int total_valid, num_samefield, num_oppfield;
480  int pos_c, pos_b, n_adj;
481 
482  wrap = s->b8_stride;
483  xy = s->block_index[n];
484 
485  if (s->mb_intra) {
486  s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
487  s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
488  s->current_picture.motion_val[1][xy][0] = 0;
489  s->current_picture.motion_val[1][xy][1] = 0;
490  if (mvn == 1) { /* duplicate motion data for 1-MV block */
491  s->current_picture.motion_val[0][xy + 1][0] = 0;
492  s->current_picture.motion_val[0][xy + 1][1] = 0;
493  s->current_picture.motion_val[0][xy + wrap][0] = 0;
494  s->current_picture.motion_val[0][xy + wrap][1] = 0;
495  s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
496  s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
497  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
498  s->current_picture.motion_val[1][xy + 1][0] = 0;
499  s->current_picture.motion_val[1][xy + 1][1] = 0;
500  s->current_picture.motion_val[1][xy + wrap][0] = 0;
501  s->current_picture.motion_val[1][xy + wrap][1] = 0;
502  s->current_picture.motion_val[1][xy + wrap + 1][0] = 0;
503  s->current_picture.motion_val[1][xy + wrap + 1][1] = 0;
504  }
505  return;
506  }
507 
508  off = ((n == 0) || (n == 1)) ? 1 : -1;
509  /* predict A */
510  if (s->mb_x || (n == 1) || (n == 3)) {
511  if ((v->blk_mv_type[xy]) // current block (MB) has a field MV
512  || (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV
513  A[0] = s->current_picture.motion_val[dir][xy - 1][0];
514  A[1] = s->current_picture.motion_val[dir][xy - 1][1];
515  a_valid = 1;
516  } else { // current block has frame mv and cand. has field MV (so average)
517  A[0] = (s->current_picture.motion_val[dir][xy - 1][0]
518  + s->current_picture.motion_val[dir][xy - 1 + off * wrap][0] + 1) >> 1;
519  A[1] = (s->current_picture.motion_val[dir][xy - 1][1]
520  + s->current_picture.motion_val[dir][xy - 1 + off * wrap][1] + 1) >> 1;
521  a_valid = 1;
522  }
523  if (!(n & 1) && v->is_intra[s->mb_x - 1]) {
524  a_valid = 0;
525  A[0] = A[1] = 0;
526  }
527  } else
528  A[0] = A[1] = 0;
529  /* Predict B and C */
530  B[0] = B[1] = C[0] = C[1] = 0;
531  if (n == 0 || n == 1 || v->blk_mv_type[xy]) {
532  if (!s->first_slice_line) {
533  if (!v->is_intra[s->mb_x - s->mb_stride]) {
534  b_valid = 1;
535  n_adj = n | 2;
536  pos_b = s->block_index[n_adj] - 2 * wrap;
537  if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) {
538  n_adj = (n & 2) | (n & 1);
539  }
540  B[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][0];
541  B[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][1];
542  if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) {
543  B[0] = (B[0] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
544  B[1] = (B[1] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
545  }
546  }
547  if (s->mb_width > 1) {
548  if (!v->is_intra[s->mb_x - s->mb_stride + 1]) {
549  c_valid = 1;
550  n_adj = 2;
551  pos_c = s->block_index[2] - 2 * wrap + 2;
552  if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
553  n_adj = n & 2;
554  }
555  C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][0];
556  C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][1];
557  if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
558  C[0] = (1 + C[0] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
559  C[1] = (1 + C[1] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
560  }
561  if (s->mb_x == s->mb_width - 1) {
562  if (!v->is_intra[s->mb_x - s->mb_stride - 1]) {
563  c_valid = 1;
564  n_adj = 3;
565  pos_c = s->block_index[3] - 2 * wrap - 2;
566  if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
567  n_adj = n | 1;
568  }
569  C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][0];
570  C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][1];
571  if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
572  C[0] = (1 + C[0] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
573  C[1] = (1 + C[1] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
574  }
575  } else
576  c_valid = 0;
577  }
578  }
579  }
580  }
581  } else {
582  pos_b = s->block_index[1];
583  b_valid = 1;
584  B[0] = s->current_picture.motion_val[dir][pos_b][0];
585  B[1] = s->current_picture.motion_val[dir][pos_b][1];
586  pos_c = s->block_index[0];
587  c_valid = 1;
588  C[0] = s->current_picture.motion_val[dir][pos_c][0];
589  C[1] = s->current_picture.motion_val[dir][pos_c][1];
590  }
591 
592  total_valid = a_valid + b_valid + c_valid;
593  // check if predictor A is out of bounds
594  if (!s->mb_x && !(n == 1 || n == 3)) {
595  A[0] = A[1] = 0;
596  }
597  // check if predictor B is out of bounds
598  if ((s->first_slice_line && v->blk_mv_type[xy]) || (s->first_slice_line && !(n & 2))) {
599  B[0] = B[1] = C[0] = C[1] = 0;
600  }
601  if (!v->blk_mv_type[xy]) {
602  if (s->mb_width == 1) {
603  px = B[0];
604  py = B[1];
605  } else {
606  if (total_valid >= 2) {
607  px = mid_pred(A[0], B[0], C[0]);
608  py = mid_pred(A[1], B[1], C[1]);
609  } else if (total_valid) {
610  if (a_valid) { px = A[0]; py = A[1]; }
611  else if (b_valid) { px = B[0]; py = B[1]; }
612  else { px = C[0]; py = C[1]; }
613  }
614  }
615  } else {
616  if (a_valid)
617  field_a = (A[1] & 4) ? 1 : 0;
618  else
619  field_a = 0;
620  if (b_valid)
621  field_b = (B[1] & 4) ? 1 : 0;
622  else
623  field_b = 0;
624  if (c_valid)
625  field_c = (C[1] & 4) ? 1 : 0;
626  else
627  field_c = 0;
628 
629  num_oppfield = field_a + field_b + field_c;
630  num_samefield = total_valid - num_oppfield;
631  if (total_valid == 3) {
632  if ((num_samefield == 3) || (num_oppfield == 3)) {
633  px = mid_pred(A[0], B[0], C[0]);
634  py = mid_pred(A[1], B[1], C[1]);
635  } else if (num_samefield >= num_oppfield) {
636  /* take one MV from same field set depending on priority
637  the check for B may not be necessary */
638  px = !field_a ? A[0] : B[0];
639  py = !field_a ? A[1] : B[1];
640  } else {
641  px = field_a ? A[0] : B[0];
642  py = field_a ? A[1] : B[1];
643  }
644  } else if (total_valid == 2) {
645  if (num_samefield >= num_oppfield) {
646  if (!field_a && a_valid) {
647  px = A[0];
648  py = A[1];
649  } else if (!field_b && b_valid) {
650  px = B[0];
651  py = B[1];
652  } else /*if (c_valid)*/ {
653  av_assert1(c_valid);
654  px = C[0];
655  py = C[1];
656  }
657  } else {
658  if (field_a && a_valid) {
659  px = A[0];
660  py = A[1];
661  } else /*if (field_b && b_valid)*/ {
662  av_assert1(field_b && b_valid);
663  px = B[0];
664  py = B[1];
665  }
666  }
667  } else if (total_valid == 1) {
668  px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]);
669  py = (a_valid) ? A[1] : ((b_valid) ? B[1] : C[1]);
670  }
671  }
672 
673  /* store MV using signed modulus of MV range defined in 4.11 */
674  s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
675  s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
676  if (mvn == 1) { /* duplicate motion data for 1-MV block */
677  s->current_picture.motion_val[dir][xy + 1 ][0] = s->current_picture.motion_val[dir][xy][0];
678  s->current_picture.motion_val[dir][xy + 1 ][1] = s->current_picture.motion_val[dir][xy][1];
679  s->current_picture.motion_val[dir][xy + wrap ][0] = s->current_picture.motion_val[dir][xy][0];
680  s->current_picture.motion_val[dir][xy + wrap ][1] = s->current_picture.motion_val[dir][xy][1];
681  s->current_picture.motion_val[dir][xy + wrap + 1][0] = s->current_picture.motion_val[dir][xy][0];
682  s->current_picture.motion_val[dir][xy + wrap + 1][1] = s->current_picture.motion_val[dir][xy][1];
683  } else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */
684  s->current_picture.motion_val[dir][xy + 1][0] = s->current_picture.motion_val[dir][xy][0];
685  s->current_picture.motion_val[dir][xy + 1][1] = s->current_picture.motion_val[dir][xy][1];
686  s->mv[dir][n + 1][0] = s->mv[dir][n][0];
687  s->mv[dir][n + 1][1] = s->mv[dir][n][1];
688  }
689 }
690 
691 void ff_vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
692  int direct, int mvtype)
693 {
694  MpegEncContext *s = &v->s;
695  int xy, wrap, off = 0;
696  int16_t *A, *B, *C;
697  int px, py;
698  int sum;
699  int r_x, r_y;
700  const uint8_t *is_intra = v->mb_type[0];
701 
702  av_assert0(!v->field_mode);
703 
704  r_x = v->range_x;
705  r_y = v->range_y;
706  /* scale MV difference to be quad-pel */
707  if (!s->quarter_sample) {
708  dmv_x[0] *= 2;
709  dmv_y[0] *= 2;
710  dmv_x[1] *= 2;
711  dmv_y[1] *= 2;
712  }
713 
714  wrap = s->b8_stride;
715  xy = s->block_index[0];
716 
717  if (s->mb_intra) {
718  s->current_picture.motion_val[0][xy][0] =
719  s->current_picture.motion_val[0][xy][1] =
720  s->current_picture.motion_val[1][xy][0] =
721  s->current_picture.motion_val[1][xy][1] = 0;
722  return;
723  }
724  if (direct && s->next_picture_ptr->field_picture)
725  av_log(s->avctx, AV_LOG_WARNING, "Mixed frame/field direct mode not supported\n");
726 
727  s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
728  s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
729  s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
730  s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
731 
732  /* Pullback predicted motion vectors as specified in 8.4.5.4 */
733  s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
734  s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
735  s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
736  s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
737  if (direct) {
738  s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
739  s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
740  s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
741  s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
742  return;
743  }
744 
745  if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
746  C = s->current_picture.motion_val[0][xy - 2];
747  A = s->current_picture.motion_val[0][xy - wrap * 2];
748  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
749  B = s->current_picture.motion_val[0][xy - wrap * 2 + off];
750 
751  if (!s->mb_x) C[0] = C[1] = 0;
752  if (!s->first_slice_line) { // predictor A is not out of bounds
753  if (s->mb_width == 1) {
754  px = A[0];
755  py = A[1];
756  } else {
757  px = mid_pred(A[0], B[0], C[0]);
758  py = mid_pred(A[1], B[1], C[1]);
759  }
760  } else if (s->mb_x) { // predictor C is not out of bounds
761  px = C[0];
762  py = C[1];
763  } else {
764  px = py = 0;
765  }
766  /* Pullback MV as specified in 8.3.5.3.4 */
767  {
768  int qx, qy, X, Y;
769  int sh = v->profile < PROFILE_ADVANCED ? 5 : 6;
770  int MV = 4 - (1 << sh);
771  qx = (s->mb_x << sh);
772  qy = (s->mb_y << sh);
773  X = (s->mb_width << sh) - 4;
774  Y = (s->mb_height << sh) - 4;
775  if (qx + px < MV) px = MV - qx;
776  if (qy + py < MV) py = MV - qy;
777  if (qx + px > X) px = X - qx;
778  if (qy + py > Y) py = Y - qy;
779  }
780  /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
781  if (0 && !s->first_slice_line && s->mb_x) {
782  if (is_intra[xy - wrap])
783  sum = FFABS(px) + FFABS(py);
784  else
785  sum = FFABS(px - A[0]) + FFABS(py - A[1]);
786  if (sum > 32) {
787  if (get_bits1(&s->gb)) {
788  px = A[0];
789  py = A[1];
790  } else {
791  px = C[0];
792  py = C[1];
793  }
794  } else {
795  if (is_intra[xy - 2])
796  sum = FFABS(px) + FFABS(py);
797  else
798  sum = FFABS(px - C[0]) + FFABS(py - C[1]);
799  if (sum > 32) {
800  if (get_bits1(&s->gb)) {
801  px = A[0];
802  py = A[1];
803  } else {
804  px = C[0];
805  py = C[1];
806  }
807  }
808  }
809  }
810  /* store MV using signed modulus of MV range defined in 4.11 */
811  s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
812  s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
813  }
814  if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
815  C = s->current_picture.motion_val[1][xy - 2];
816  A = s->current_picture.motion_val[1][xy - wrap * 2];
817  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
818  B = s->current_picture.motion_val[1][xy - wrap * 2 + off];
819 
820  if (!s->mb_x)
821  C[0] = C[1] = 0;
822  if (!s->first_slice_line) { // predictor A is not out of bounds
823  if (s->mb_width == 1) {
824  px = A[0];
825  py = A[1];
826  } else {
827  px = mid_pred(A[0], B[0], C[0]);
828  py = mid_pred(A[1], B[1], C[1]);
829  }
830  } else if (s->mb_x) { // predictor C is not out of bounds
831  px = C[0];
832  py = C[1];
833  } else {
834  px = py = 0;
835  }
836  /* Pullback MV as specified in 8.3.5.3.4 */
837  {
838  int qx, qy, X, Y;
839  int sh = v->profile < PROFILE_ADVANCED ? 5 : 6;
840  int MV = 4 - (1 << sh);
841  qx = (s->mb_x << sh);
842  qy = (s->mb_y << sh);
843  X = (s->mb_width << sh) - 4;
844  Y = (s->mb_height << sh) - 4;
845  if (qx + px < MV) px = MV - qx;
846  if (qy + py < MV) py = MV - qy;
847  if (qx + px > X) px = X - qx;
848  if (qy + py > Y) py = Y - qy;
849  }
850  /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
851  if (0 && !s->first_slice_line && s->mb_x) {
852  if (is_intra[xy - wrap])
853  sum = FFABS(px) + FFABS(py);
854  else
855  sum = FFABS(px - A[0]) + FFABS(py - A[1]);
856  if (sum > 32) {
857  if (get_bits1(&s->gb)) {
858  px = A[0];
859  py = A[1];
860  } else {
861  px = C[0];
862  py = C[1];
863  }
864  } else {
865  if (is_intra[xy - 2])
866  sum = FFABS(px) + FFABS(py);
867  else
868  sum = FFABS(px - C[0]) + FFABS(py - C[1]);
869  if (sum > 32) {
870  if (get_bits1(&s->gb)) {
871  px = A[0];
872  py = A[1];
873  } else {
874  px = C[0];
875  py = C[1];
876  }
877  }
878  }
879  }
880  /* store MV using signed modulus of MV range defined in 4.11 */
881 
882  s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
883  s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
884  }
885  s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
886  s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
887  s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
888  s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
889 }
890 
891 void ff_vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y,
892  int mv1, int *pred_flag)
893 {
894  int dir = (v->bmvtype == BMV_TYPE_BACKWARD) ? 1 : 0;
895  MpegEncContext *s = &v->s;
896  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
897 
898  if (v->bmvtype == BMV_TYPE_DIRECT) {
899  int total_opp, k, f;
900  if (s->next_picture.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
901  s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
902  v->bfraction, 0, s->quarter_sample);
903  s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
904  v->bfraction, 0, s->quarter_sample);
905  s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
906  v->bfraction, 1, s->quarter_sample);
907  s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
908  v->bfraction, 1, s->quarter_sample);
909 
910  total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off]
911  + v->mv_f_next[0][s->block_index[1] + v->blocks_off]
912  + v->mv_f_next[0][s->block_index[2] + v->blocks_off]
913  + v->mv_f_next[0][s->block_index[3] + v->blocks_off];
914  f = (total_opp > 2) ? 1 : 0;
915  } else {
916  s->mv[0][0][0] = s->mv[0][0][1] = 0;
917  s->mv[1][0][0] = s->mv[1][0][1] = 0;
918  f = 0;
919  }
920  v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f;
921  for (k = 0; k < 4; k++) {
922  s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
923  s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
924  s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
925  s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
926  v->mv_f[0][s->block_index[k] + v->blocks_off] = f;
927  v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
928  }
929  return;
930  }
931  if (v->bmvtype == BMV_TYPE_INTERPOLATED) {
932  ff_vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
933  ff_vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
934  return;
935  }
936  if (dir) { // backward
937  ff_vc1_pred_mv(v, n, dmv_x[1], dmv_y[1], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
938  if (n == 3 || mv1) {
939  ff_vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
940  }
941  } else { // forward
942  ff_vc1_pred_mv(v, n, dmv_x[0], dmv_y[0], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
943  if (n == 3 || mv1) {
944  ff_vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], 0, 1);
945  }
946  }
947 }
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
direct
static void direct(const float *in, const FFTComplex *ir, int len, float *out)
Definition: af_afir.c:61
av_clip
#define av_clip
Definition: common.h:96
VC1Context
The VC1 Context.
Definition: vc1.h:173
vc1.h
X
@ X
Definition: vf_addroi.c:26
BMV_TYPE_DIRECT
@ BMV_TYPE_DIRECT
Definition: vc1.h:105
VC1Context::reffield
int reffield
if numref = 0 (1 reference) then reffield decides which
Definition: vc1.h:355
MV_PMODE_INTENSITY_COMP
@ MV_PMODE_INTENSITY_COMP
Definition: vc1.h:83
mpegvideo.h
VC1Context::luma_mv
int16_t((* luma_mv)[2]
Definition: vc1.h:390
mpegutils.h
scaleforsame
static av_always_inline int scaleforsame(VC1Context *v, int i, int n, int dim, int dir)
Definition: vc1_pred.c:164
scaleforopp_y
static av_always_inline int scaleforopp_y(VC1Context *v, int n, int dir)
Definition: vc1_pred.c:133
A
#define A(x)
Definition: vp56_arith.h:28
MpegEncContext::pict_type
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:201
wrap
#define wrap(func)
Definition: neontest.h:65
VC1Context::numref
int numref
number of past field pictures used as reference
Definition: vc1.h:353
C
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
Definition: writing_filters.txt:58
VC1Context::refdist
int refdist
distance of the current picture from reference
Definition: vc1.h:352
ff_vc1_pred_b_mv_intfi
void ff_vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
Definition: vc1_pred.c:891
VC1Context::mb_type
uint8_t * mb_type[3]
Definition: vc1.h:262
s
#define s(width, name)
Definition: cbs_vp9.c:257
ff_vc1_b_field_mvpred_scales
const uint16_t ff_vc1_b_field_mvpred_scales[7][4]
Definition: vc1data.c:1121
VC1Context::mv_f
uint8_t * mv_f[2]
0: MV obtained from same field, 1: opposite field
Definition: vc1.h:347
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
PROFILE_ADVANCED
@ PROFILE_ADVANCED
Definition: vc1_common.h:52
f
#define f(width, name)
Definition: cbs_vp9.c:255
VC1Context::frfd
int frfd
Definition: vc1.h:364
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:65
VC1Context::mv_mode
uint8_t mv_mode
Frame decoding info for all profiles.
Definition: vc1.h:231
VC1Context::field_mode
int field_mode
1 for interlaced field pictures
Definition: vc1.h:349
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:499
mathops.h
ff_vc1_field_mvpred_scales
const uint16_t ff_vc1_field_mvpred_scales[2][7][4]
Definition: vc1data.c:1097
VC1Context::mv_mode2
uint8_t mv_mode2
Secondary MV coding mode (B-frames)
Definition: vc1.h:232
vc1_pred.h
VC1Context::mv_f_next
uint8_t * mv_f_next[2]
Definition: vc1.h:348
VC1Context::is_intra
uint8_t * is_intra
Definition: vc1.h:389
VC1Context::mb_off
int mb_off
Definition: vc1.h:361
MV
Definition: clearvideo.c:46
VC1Context::bfraction
int16_t bfraction
Relative position % anchors=> how to scale MVs.
Definition: vc1.h:270
MV_PMODE_MIXED_MV
@ MV_PMODE_MIXED_MV
Definition: vc1.h:82
BMV_TYPE_INTERPOLATED
@ BMV_TYPE_INTERPOLATED
Definition: vc1.h:104
ff_vc1_pred_b_mv
void ff_vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mvtype)
Definition: vc1_pred.c:691
scale_mv
#define scale_mv(n, dim)
MpegEncContext::quarter_sample
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:391
vc1data.h
Y
#define Y
Definition: boxblur.h:37
VC1Context::cur_field_type
int cur_field_type
0: top, 1: bottom
Definition: vc1.h:359
scaleforsame_y
static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n, int dir)
Definition: vc1_pred.c:69
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
VC1Context::range_x
int range_x
Definition: vc1.h:235
BMV_TYPE_FORWARD
@ BMV_TYPE_FORWARD
Definition: vc1.h:103
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
av_always_inline
#define av_always_inline
Definition: attributes.h:49
scaleforsame_x
static av_always_inline int scaleforsame_x(VC1Context *v, int n, int dir)
Definition: vc1_pred.c:36
VC1Context::s
MpegEncContext s
Definition: vc1.h:174
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
ff_vc1_pred_mv_intfr
void ff_vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y, int mvn, int r_x, int r_y, int dir)
Predict and set motion vector for interlaced frame picture MBs.
Definition: vc1_pred.c:470
dim
int dim
Definition: vorbis_enc_data.h:425
mid_pred
#define mid_pred
Definition: mathops.h:97
VC1Context::second_field
int second_field
Definition: vc1.h:351
scaleforopp_x
static av_always_inline int scaleforopp_x(VC1Context *v, int n)
Definition: vc1_pred.c:106
VC1Context::ref_field_type
int ref_field_type[2]
forward and backward reference field type (top or bottom)
Definition: vc1.h:360
BMV_TYPE_BACKWARD
@ BMV_TYPE_BACKWARD
Definition: vc1.h:102
B
#define B
Definition: huffyuvdsp.h:32
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
VC1Context::brfd
int brfd
reference frame distance (forward or backward)
Definition: vc1.h:364
VC1Context::res_rtm_flag
int res_rtm_flag
reserved, set to 1
Definition: vc1.h:189
VC1Context::profile
int profile
Sequence header data for all Profiles TODO: choose between ints, uint8_ts and monobit flags.
Definition: vc1.h:216
scaleforopp
static av_always_inline int scaleforopp(VC1Context *v, int n, int dim, int dir)
Definition: vc1_pred.c:185
VC1Context::range_y
int range_y
MV range.
Definition: vc1.h:235
VC1Context::bmvtype
int bmvtype
Definition: vc1.h:363
ff_vc1_pred_mv
void ff_vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y, int mv1, int r_x, int r_y, uint8_t *is_intra, int pred_flag, int dir)
Predict and set motion vector.
Definition: vc1_pred.c:212
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
VC1Context::blk_mv_type
uint8_t * blk_mv_type
0: frame MV, 1: field MV (interlaced frame)
Definition: vc1.h:346
VC1Context::blocks_off
int blocks_off
Definition: vc1.h:361
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:71
MB_TYPE_INTRA
#define MB_TYPE_INTRA
Definition: mpegutils.h:72