FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
h264_mvpred.h
Go to the documentation of this file.
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... motion vector prediction
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * H.264 / AVC / MPEG-4 part10 motion vector prediction.
25  * @author Michael Niedermayer <michaelni@gmx.at>
26  */
27 
28 #ifndef AVCODEC_H264_MVPRED_H
29 #define AVCODEC_H264_MVPRED_H
30 
31 #include "internal.h"
32 #include "avcodec.h"
33 #include "h264dec.h"
34 #include "mpegutils.h"
35 #include "libavutil/avassert.h"
36 
37 
39  const int16_t **C,
40  int i, int list, int part_width)
41 {
42  const int topright_ref = sl->ref_cache[list][i - 8 + part_width];
43 
44  /* there is no consistent mapping of mvs to neighboring locations that will
45  * make mbaff happy, so we can't move all this logic to fill_caches */
46  if (FRAME_MBAFF(h)) {
47 #define SET_DIAG_MV(MV_OP, REF_OP, XY, Y4) \
48  const int xy = XY, y4 = Y4; \
49  const int mb_type = mb_types[xy + (y4 >> 2) * h->mb_stride]; \
50  if (!USES_LIST(mb_type, list)) \
51  return LIST_NOT_USED; \
52  mv = h->cur_pic_ptr->motion_val[list][h->mb2b_xy[xy] + 3 + y4 * h->b_stride]; \
53  sl->mv_cache[list][scan8[0] - 2][0] = mv[0]; \
54  sl->mv_cache[list][scan8[0] - 2][1] = mv[1] MV_OP; \
55  return h->cur_pic_ptr->ref_index[list][4 * xy + 1 + (y4 & ~1)] REF_OP;
56 
57  if (topright_ref == PART_NOT_AVAILABLE
58  && i >= scan8[0] + 8 && (i & 7) == 4
59  && sl->ref_cache[list][scan8[0] - 1] != PART_NOT_AVAILABLE) {
60  const uint32_t *mb_types = h->cur_pic_ptr->mb_type;
61  const int16_t *mv;
62  AV_ZERO32(sl->mv_cache[list][scan8[0] - 2]);
63  *C = sl->mv_cache[list][scan8[0] - 2];
64 
65  if (!MB_FIELD(sl) && IS_INTERLACED(sl->left_type[0])) {
66  SET_DIAG_MV(* 2, >> 1, sl->left_mb_xy[0] + h->mb_stride,
67  (sl->mb_y & 1) * 2 + (i >> 5));
68  }
69  if (MB_FIELD(sl) && !IS_INTERLACED(sl->left_type[0])) {
70  // left shift will turn LIST_NOT_USED into PART_NOT_AVAILABLE, but that's OK.
71  SET_DIAG_MV(/ 2, *2, sl->left_mb_xy[i >= 36], ((i >> 2)) & 3);
72  }
73  }
74 #undef SET_DIAG_MV
75  }
76 
77  if (topright_ref != PART_NOT_AVAILABLE) {
78  *C = sl->mv_cache[list][i - 8 + part_width];
79  return topright_ref;
80  } else {
81  ff_tlog(h->avctx, "topright MV not available\n");
82 
83  *C = sl->mv_cache[list][i - 8 - 1];
84  return sl->ref_cache[list][i - 8 - 1];
85  }
86 }
87 
88 /**
89  * Get the predicted MV.
90  * @param n the block index
91  * @param part_width the width of the partition (4, 8,16) -> (1, 2, 4)
92  * @param mx the x component of the predicted motion vector
93  * @param my the y component of the predicted motion vector
94  */
95 static av_always_inline void pred_motion(const H264Context *const h,
96  H264SliceContext *sl,
97  int n,
98  int part_width, int list, int ref,
99  int *const mx, int *const my)
100 {
101  const int index8 = scan8[n];
102  const int top_ref = sl->ref_cache[list][index8 - 8];
103  const int left_ref = sl->ref_cache[list][index8 - 1];
104  const int16_t *const A = sl->mv_cache[list][index8 - 1];
105  const int16_t *const B = sl->mv_cache[list][index8 - 8];
106  const int16_t *C;
107  int diagonal_ref, match_count;
108 
109  av_assert2(part_width == 1 || part_width == 2 || part_width == 4);
110 
111 /* mv_cache
112  * B . . A T T T T
113  * U . . L . . , .
114  * U . . L . . . .
115  * U . . L . . , .
116  * . . . L . . . .
117  */
118 
119  diagonal_ref = fetch_diagonal_mv(h, sl, &C, index8, list, part_width);
120  match_count = (diagonal_ref == ref) + (top_ref == ref) + (left_ref == ref);
121  ff_tlog(h->avctx, "pred_motion match_count=%d\n", match_count);
122  if (match_count > 1) { //most common
123  *mx = mid_pred(A[0], B[0], C[0]);
124  *my = mid_pred(A[1], B[1], C[1]);
125  } else if (match_count == 1) {
126  if (left_ref == ref) {
127  *mx = A[0];
128  *my = A[1];
129  } else if (top_ref == ref) {
130  *mx = B[0];
131  *my = B[1];
132  } else {
133  *mx = C[0];
134  *my = C[1];
135  }
136  } else {
137  if (top_ref == PART_NOT_AVAILABLE &&
138  diagonal_ref == PART_NOT_AVAILABLE &&
139  left_ref != PART_NOT_AVAILABLE) {
140  *mx = A[0];
141  *my = A[1];
142  } else {
143  *mx = mid_pred(A[0], B[0], C[0]);
144  *my = mid_pred(A[1], B[1], C[1]);
145  }
146  }
147 
148  ff_tlog(h->avctx,
149  "pred_motion (%2d %2d %2d) (%2d %2d %2d) (%2d %2d %2d) -> (%2d %2d %2d) at %2d %2d %d list %d\n",
150  top_ref, B[0], B[1], diagonal_ref, C[0], C[1], left_ref,
151  A[0], A[1], ref, *mx, *my, sl->mb_x, sl->mb_y, n, list);
152 }
153 
154 /**
155  * Get the directionally predicted 16x8 MV.
156  * @param n the block index
157  * @param mx the x component of the predicted motion vector
158  * @param my the y component of the predicted motion vector
159  */
161  H264SliceContext *sl,
162  int n, int list, int ref,
163  int *const mx, int *const my)
164 {
165  if (n == 0) {
166  const int top_ref = sl->ref_cache[list][scan8[0] - 8];
167  const int16_t *const B = sl->mv_cache[list][scan8[0] - 8];
168 
169  ff_tlog(h->avctx, "pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n",
170  top_ref, B[0], B[1], sl->mb_x, sl->mb_y, n, list);
171 
172  if (top_ref == ref) {
173  *mx = B[0];
174  *my = B[1];
175  return;
176  }
177  } else {
178  const int left_ref = sl->ref_cache[list][scan8[8] - 1];
179  const int16_t *const A = sl->mv_cache[list][scan8[8] - 1];
180 
181  ff_tlog(h->avctx, "pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n",
182  left_ref, A[0], A[1], sl->mb_x, sl->mb_y, n, list);
183 
184  if (left_ref == ref) {
185  *mx = A[0];
186  *my = A[1];
187  return;
188  }
189  }
190 
191  //RARE
192  pred_motion(h, sl, n, 4, list, ref, mx, my);
193 }
194 
195 /**
196  * Get the directionally predicted 8x16 MV.
197  * @param n the block index
198  * @param mx the x component of the predicted motion vector
199  * @param my the y component of the predicted motion vector
200  */
202  H264SliceContext *sl,
203  int n, int list, int ref,
204  int *const mx, int *const my)
205 {
206  if (n == 0) {
207  const int left_ref = sl->ref_cache[list][scan8[0] - 1];
208  const int16_t *const A = sl->mv_cache[list][scan8[0] - 1];
209 
210  ff_tlog(h->avctx, "pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n",
211  left_ref, A[0], A[1], sl->mb_x, sl->mb_y, n, list);
212 
213  if (left_ref == ref) {
214  *mx = A[0];
215  *my = A[1];
216  return;
217  }
218  } else {
219  const int16_t *C;
220  int diagonal_ref;
221 
222  diagonal_ref = fetch_diagonal_mv(h, sl, &C, scan8[4], list, 2);
223 
224  ff_tlog(h->avctx, "pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n",
225  diagonal_ref, C[0], C[1], sl->mb_x, sl->mb_y, n, list);
226 
227  if (diagonal_ref == ref) {
228  *mx = C[0];
229  *my = C[1];
230  return;
231  }
232  }
233 
234  //RARE
235  pred_motion(h, sl, n, 2, list, ref, mx, my);
236 }
237 
238 #define FIX_MV_MBAFF(type, refn, mvn, idx) \
239  if (FRAME_MBAFF(h)) { \
240  if (MB_FIELD(sl)) { \
241  if (!IS_INTERLACED(type)) { \
242  refn <<= 1; \
243  AV_COPY32(mvbuf[idx], mvn); \
244  mvbuf[idx][1] /= 2; \
245  mvn = mvbuf[idx]; \
246  } \
247  } else { \
248  if (IS_INTERLACED(type)) { \
249  refn >>= 1; \
250  AV_COPY32(mvbuf[idx], mvn); \
251  mvbuf[idx][1] *= 2; \
252  mvn = mvbuf[idx]; \
253  } \
254  } \
255  }
256 
258  H264SliceContext *sl)
259 {
260  DECLARE_ALIGNED(4, static const int16_t, zeromv)[2] = { 0 };
261  DECLARE_ALIGNED(4, int16_t, mvbuf)[3][2];
262  int8_t *ref = h->cur_pic.ref_index[0];
263  int16_t(*mv)[2] = h->cur_pic.motion_val[0];
264  int top_ref, left_ref, diagonal_ref, match_count, mx, my;
265  const int16_t *A, *B, *C;
266  int b_stride = h->b_stride;
267 
268  fill_rectangle(&sl->ref_cache[0][scan8[0]], 4, 4, 8, 0, 1);
269 
270  /* To avoid doing an entire fill_decode_caches, we inline the relevant
271  * parts here.
272  * FIXME: this is a partial duplicate of the logic in fill_decode_caches,
273  * but it's faster this way. Is there a way to avoid this duplication?
274  */
275  if (USES_LIST(sl->left_type[LTOP], 0)) {
276  left_ref = ref[4 * sl->left_mb_xy[LTOP] + 1 + (sl->left_block[0] & ~1)];
277  A = mv[h->mb2b_xy[sl->left_mb_xy[LTOP]] + 3 + b_stride * sl->left_block[0]];
278  FIX_MV_MBAFF(sl->left_type[LTOP], left_ref, A, 0);
279  if (!(left_ref | AV_RN32A(A)))
280  goto zeromv;
281  } else if (sl->left_type[LTOP]) {
282  left_ref = LIST_NOT_USED;
283  A = zeromv;
284  } else {
285  goto zeromv;
286  }
287 
288  if (USES_LIST(sl->top_type, 0)) {
289  top_ref = ref[4 * sl->top_mb_xy + 2];
290  B = mv[h->mb2b_xy[sl->top_mb_xy] + 3 * b_stride];
291  FIX_MV_MBAFF(sl->top_type, top_ref, B, 1);
292  if (!(top_ref | AV_RN32A(B)))
293  goto zeromv;
294  } else if (sl->top_type) {
295  top_ref = LIST_NOT_USED;
296  B = zeromv;
297  } else {
298  goto zeromv;
299  }
300 
301  ff_tlog(h->avctx, "pred_pskip: (%d) (%d) at %2d %2d\n",
302  top_ref, left_ref, sl->mb_x, sl->mb_y);
303 
304  if (USES_LIST(sl->topright_type, 0)) {
305  diagonal_ref = ref[4 * sl->topright_mb_xy + 2];
306  C = mv[h->mb2b_xy[sl->topright_mb_xy] + 3 * b_stride];
307  FIX_MV_MBAFF(sl->topright_type, diagonal_ref, C, 2);
308  } else if (sl->topright_type) {
309  diagonal_ref = LIST_NOT_USED;
310  C = zeromv;
311  } else {
312  if (USES_LIST(sl->topleft_type, 0)) {
313  diagonal_ref = ref[4 * sl->topleft_mb_xy + 1 +
314  (sl->topleft_partition & 2)];
315  C = mv[h->mb2b_xy[sl->topleft_mb_xy] + 3 + b_stride +
316  (sl->topleft_partition & 2 * b_stride)];
317  FIX_MV_MBAFF(sl->topleft_type, diagonal_ref, C, 2);
318  } else if (sl->topleft_type) {
319  diagonal_ref = LIST_NOT_USED;
320  C = zeromv;
321  } else {
322  diagonal_ref = PART_NOT_AVAILABLE;
323  C = zeromv;
324  }
325  }
326 
327  match_count = !diagonal_ref + !top_ref + !left_ref;
328  ff_tlog(h->avctx, "pred_pskip_motion match_count=%d\n", match_count);
329  if (match_count > 1) {
330  mx = mid_pred(A[0], B[0], C[0]);
331  my = mid_pred(A[1], B[1], C[1]);
332  } else if (match_count == 1) {
333  if (!left_ref) {
334  mx = A[0];
335  my = A[1];
336  } else if (!top_ref) {
337  mx = B[0];
338  my = B[1];
339  } else {
340  mx = C[0];
341  my = C[1];
342  }
343  } else {
344  mx = mid_pred(A[0], B[0], C[0]);
345  my = mid_pred(A[1], B[1], C[1]);
346  }
347 
348  fill_rectangle(sl->mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mx, my), 4);
349  return;
350 
351 zeromv:
352  fill_rectangle(sl->mv_cache[0][scan8[0]], 4, 4, 8, 0, 4);
353  return;
354 }
355 
356 static void fill_decode_neighbors(const H264Context *h, H264SliceContext *sl, int mb_type)
357 {
358  const int mb_xy = sl->mb_xy;
359  int topleft_xy, top_xy, topright_xy, left_xy[LEFT_MBS];
360  static const uint8_t left_block_options[4][32] = {
361  { 0, 1, 2, 3, 7, 10, 8, 11, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4, 3 + 3 * 4, 1 + 4 * 4, 1 + 8 * 4, 1 + 5 * 4, 1 + 9 * 4 },
362  { 2, 2, 3, 3, 8, 11, 8, 11, 3 + 2 * 4, 3 + 2 * 4, 3 + 3 * 4, 3 + 3 * 4, 1 + 5 * 4, 1 + 9 * 4, 1 + 5 * 4, 1 + 9 * 4 },
363  { 0, 0, 1, 1, 7, 10, 7, 10, 3 + 0 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 1 * 4, 1 + 4 * 4, 1 + 8 * 4, 1 + 4 * 4, 1 + 8 * 4 },
364  { 0, 2, 0, 2, 7, 10, 7, 10, 3 + 0 * 4, 3 + 2 * 4, 3 + 0 * 4, 3 + 2 * 4, 1 + 4 * 4, 1 + 8 * 4, 1 + 4 * 4, 1 + 8 * 4 }
365  };
366 
367  sl->topleft_partition = -1;
368 
369  top_xy = mb_xy - (h->mb_stride << MB_FIELD(sl));
370 
371  /* Wow, what a mess, why didn't they simplify the interlacing & intra
372  * stuff, I can't imagine that these complex rules are worth it. */
373 
374  topleft_xy = top_xy - 1;
375  topright_xy = top_xy + 1;
376  left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1;
377  sl->left_block = left_block_options[0];
378  if (FRAME_MBAFF(h)) {
379  const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.mb_type[mb_xy - 1]);
380  const int curr_mb_field_flag = IS_INTERLACED(mb_type);
381  if (sl->mb_y & 1) {
382  if (left_mb_field_flag != curr_mb_field_flag) {
383  left_xy[LBOT] = left_xy[LTOP] = mb_xy - h->mb_stride - 1;
384  if (curr_mb_field_flag) {
385  left_xy[LBOT] += h->mb_stride;
386  sl->left_block = left_block_options[3];
387  } else {
388  topleft_xy += h->mb_stride;
389  /* take top left mv from the middle of the mb, as opposed
390  * to all other modes which use the bottom right partition */
391  sl->topleft_partition = 0;
392  sl->left_block = left_block_options[1];
393  }
394  }
395  } else {
396  if (curr_mb_field_flag) {
397  topleft_xy += h->mb_stride & (((h->cur_pic.mb_type[top_xy - 1] >> 7) & 1) - 1);
398  topright_xy += h->mb_stride & (((h->cur_pic.mb_type[top_xy + 1] >> 7) & 1) - 1);
399  top_xy += h->mb_stride & (((h->cur_pic.mb_type[top_xy] >> 7) & 1) - 1);
400  }
401  if (left_mb_field_flag != curr_mb_field_flag) {
402  if (curr_mb_field_flag) {
403  left_xy[LBOT] += h->mb_stride;
404  sl->left_block = left_block_options[3];
405  } else {
406  sl->left_block = left_block_options[2];
407  }
408  }
409  }
410  }
411 
412  sl->topleft_mb_xy = topleft_xy;
413  sl->top_mb_xy = top_xy;
414  sl->topright_mb_xy = topright_xy;
415  sl->left_mb_xy[LTOP] = left_xy[LTOP];
416  sl->left_mb_xy[LBOT] = left_xy[LBOT];
417  //FIXME do we need all in the context?
418 
419  sl->topleft_type = h->cur_pic.mb_type[topleft_xy];
420  sl->top_type = h->cur_pic.mb_type[top_xy];
421  sl->topright_type = h->cur_pic.mb_type[topright_xy];
422  sl->left_type[LTOP] = h->cur_pic.mb_type[left_xy[LTOP]];
423  sl->left_type[LBOT] = h->cur_pic.mb_type[left_xy[LBOT]];
424 
425  if (FMO) {
426  if (h->slice_table[topleft_xy] != sl->slice_num)
427  sl->topleft_type = 0;
428  if (h->slice_table[top_xy] != sl->slice_num)
429  sl->top_type = 0;
430  if (h->slice_table[left_xy[LTOP]] != sl->slice_num)
431  sl->left_type[LTOP] = sl->left_type[LBOT] = 0;
432  } else {
433  if (h->slice_table[topleft_xy] != sl->slice_num) {
434  sl->topleft_type = 0;
435  if (h->slice_table[top_xy] != sl->slice_num)
436  sl->top_type = 0;
437  if (h->slice_table[left_xy[LTOP]] != sl->slice_num)
438  sl->left_type[LTOP] = sl->left_type[LBOT] = 0;
439  }
440  }
441  if (h->slice_table[topright_xy] != sl->slice_num)
442  sl->topright_type = 0;
443 }
444 
445 static void fill_decode_caches(const H264Context *h, H264SliceContext *sl, int mb_type)
446 {
447  int topleft_xy, top_xy, topright_xy, left_xy[LEFT_MBS];
448  int topleft_type, top_type, topright_type, left_type[LEFT_MBS];
449  const uint8_t *left_block = sl->left_block;
450  int i;
451  uint8_t *nnz;
452  uint8_t *nnz_cache;
453 
454  topleft_xy = sl->topleft_mb_xy;
455  top_xy = sl->top_mb_xy;
456  topright_xy = sl->topright_mb_xy;
457  left_xy[LTOP] = sl->left_mb_xy[LTOP];
458  left_xy[LBOT] = sl->left_mb_xy[LBOT];
459  topleft_type = sl->topleft_type;
460  top_type = sl->top_type;
461  topright_type = sl->topright_type;
462  left_type[LTOP] = sl->left_type[LTOP];
463  left_type[LBOT] = sl->left_type[LBOT];
464 
465  if (!IS_SKIP(mb_type)) {
466  if (IS_INTRA(mb_type)) {
467  int type_mask = h->ps.pps->constrained_intra_pred ? IS_INTRA(-1) : -1;
470  sl->left_samples_available = 0xFFFF;
471  sl->topright_samples_available = 0xEEEA;
472 
473  if (!(top_type & type_mask)) {
474  sl->topleft_samples_available = 0xB3FF;
475  sl->top_samples_available = 0x33FF;
476  sl->topright_samples_available = 0x26EA;
477  }
478  if (IS_INTERLACED(mb_type) != IS_INTERLACED(left_type[LTOP])) {
479  if (IS_INTERLACED(mb_type)) {
480  if (!(left_type[LTOP] & type_mask)) {
481  sl->topleft_samples_available &= 0xDFFF;
482  sl->left_samples_available &= 0x5FFF;
483  }
484  if (!(left_type[LBOT] & type_mask)) {
485  sl->topleft_samples_available &= 0xFF5F;
486  sl->left_samples_available &= 0xFF5F;
487  }
488  } else {
489  int left_typei = h->cur_pic.mb_type[left_xy[LTOP] + h->mb_stride];
490 
491  av_assert2(left_xy[LTOP] == left_xy[LBOT]);
492  if (!((left_typei & type_mask) && (left_type[LTOP] & type_mask))) {
493  sl->topleft_samples_available &= 0xDF5F;
494  sl->left_samples_available &= 0x5F5F;
495  }
496  }
497  } else {
498  if (!(left_type[LTOP] & type_mask)) {
499  sl->topleft_samples_available &= 0xDF5F;
500  sl->left_samples_available &= 0x5F5F;
501  }
502  }
503 
504  if (!(topleft_type & type_mask))
505  sl->topleft_samples_available &= 0x7FFF;
506 
507  if (!(topright_type & type_mask))
508  sl->topright_samples_available &= 0xFBFF;
509 
510  if (IS_INTRA4x4(mb_type)) {
511  if (IS_INTRA4x4(top_type)) {
512  AV_COPY32(sl->intra4x4_pred_mode_cache + 4 + 8 * 0, sl->intra4x4_pred_mode + h->mb2br_xy[top_xy]);
513  } else {
514  sl->intra4x4_pred_mode_cache[4 + 8 * 0] =
515  sl->intra4x4_pred_mode_cache[5 + 8 * 0] =
516  sl->intra4x4_pred_mode_cache[6 + 8 * 0] =
517  sl->intra4x4_pred_mode_cache[7 + 8 * 0] = 2 - 3 * !(top_type & type_mask);
518  }
519  for (i = 0; i < 2; i++) {
520  if (IS_INTRA4x4(left_type[LEFT(i)])) {
521  int8_t *mode = sl->intra4x4_pred_mode + h->mb2br_xy[left_xy[LEFT(i)]];
522  sl->intra4x4_pred_mode_cache[3 + 8 * 1 + 2 * 8 * i] = mode[6 - left_block[0 + 2 * i]];
523  sl->intra4x4_pred_mode_cache[3 + 8 * 2 + 2 * 8 * i] = mode[6 - left_block[1 + 2 * i]];
524  } else {
525  sl->intra4x4_pred_mode_cache[3 + 8 * 1 + 2 * 8 * i] =
526  sl->intra4x4_pred_mode_cache[3 + 8 * 2 + 2 * 8 * i] = 2 - 3 * !(left_type[LEFT(i)] & type_mask);
527  }
528  }
529  }
530  }
531 
532  /*
533  * 0 . T T. T T T T
534  * 1 L . .L . . . .
535  * 2 L . .L . . . .
536  * 3 . T TL . . . .
537  * 4 L . .L . . . .
538  * 5 L . .. . . . .
539  */
540  /* FIXME: constraint_intra_pred & partitioning & nnz
541  * (let us hope this is just a typo in the spec) */
542  nnz_cache = sl->non_zero_count_cache;
543  if (top_type) {
544  nnz = h->non_zero_count[top_xy];
545  AV_COPY32(&nnz_cache[4 + 8 * 0], &nnz[4 * 3]);
546  if (!h->chroma_y_shift) {
547  AV_COPY32(&nnz_cache[4 + 8 * 5], &nnz[4 * 7]);
548  AV_COPY32(&nnz_cache[4 + 8 * 10], &nnz[4 * 11]);
549  } else {
550  AV_COPY32(&nnz_cache[4 + 8 * 5], &nnz[4 * 5]);
551  AV_COPY32(&nnz_cache[4 + 8 * 10], &nnz[4 * 9]);
552  }
553  } else {
554  uint32_t top_empty = CABAC(h) && !IS_INTRA(mb_type) ? 0 : 0x40404040;
555  AV_WN32A(&nnz_cache[4 + 8 * 0], top_empty);
556  AV_WN32A(&nnz_cache[4 + 8 * 5], top_empty);
557  AV_WN32A(&nnz_cache[4 + 8 * 10], top_empty);
558  }
559 
560  for (i = 0; i < 2; i++) {
561  if (left_type[LEFT(i)]) {
562  nnz = h->non_zero_count[left_xy[LEFT(i)]];
563  nnz_cache[3 + 8 * 1 + 2 * 8 * i] = nnz[left_block[8 + 0 + 2 * i]];
564  nnz_cache[3 + 8 * 2 + 2 * 8 * i] = nnz[left_block[8 + 1 + 2 * i]];
565  if (CHROMA444(h)) {
566  nnz_cache[3 + 8 * 6 + 2 * 8 * i] = nnz[left_block[8 + 0 + 2 * i] + 4 * 4];
567  nnz_cache[3 + 8 * 7 + 2 * 8 * i] = nnz[left_block[8 + 1 + 2 * i] + 4 * 4];
568  nnz_cache[3 + 8 * 11 + 2 * 8 * i] = nnz[left_block[8 + 0 + 2 * i] + 8 * 4];
569  nnz_cache[3 + 8 * 12 + 2 * 8 * i] = nnz[left_block[8 + 1 + 2 * i] + 8 * 4];
570  } else if (CHROMA422(h)) {
571  nnz_cache[3 + 8 * 6 + 2 * 8 * i] = nnz[left_block[8 + 0 + 2 * i] - 2 + 4 * 4];
572  nnz_cache[3 + 8 * 7 + 2 * 8 * i] = nnz[left_block[8 + 1 + 2 * i] - 2 + 4 * 4];
573  nnz_cache[3 + 8 * 11 + 2 * 8 * i] = nnz[left_block[8 + 0 + 2 * i] - 2 + 8 * 4];
574  nnz_cache[3 + 8 * 12 + 2 * 8 * i] = nnz[left_block[8 + 1 + 2 * i] - 2 + 8 * 4];
575  } else {
576  nnz_cache[3 + 8 * 6 + 8 * i] = nnz[left_block[8 + 4 + 2 * i]];
577  nnz_cache[3 + 8 * 11 + 8 * i] = nnz[left_block[8 + 5 + 2 * i]];
578  }
579  } else {
580  nnz_cache[3 + 8 * 1 + 2 * 8 * i] =
581  nnz_cache[3 + 8 * 2 + 2 * 8 * i] =
582  nnz_cache[3 + 8 * 6 + 2 * 8 * i] =
583  nnz_cache[3 + 8 * 7 + 2 * 8 * i] =
584  nnz_cache[3 + 8 * 11 + 2 * 8 * i] =
585  nnz_cache[3 + 8 * 12 + 2 * 8 * i] = CABAC(h) && !IS_INTRA(mb_type) ? 0 : 64;
586  }
587  }
588 
589  if (CABAC(h)) {
590  // top_cbp
591  if (top_type)
592  sl->top_cbp = h->cbp_table[top_xy];
593  else
594  sl->top_cbp = IS_INTRA(mb_type) ? 0x7CF : 0x00F;
595  // left_cbp
596  if (left_type[LTOP]) {
597  sl->left_cbp = (h->cbp_table[left_xy[LTOP]] & 0x7F0) |
598  ((h->cbp_table[left_xy[LTOP]] >> (left_block[0] & (~1))) & 2) |
599  (((h->cbp_table[left_xy[LBOT]] >> (left_block[2] & (~1))) & 2) << 2);
600  } else {
601  sl->left_cbp = IS_INTRA(mb_type) ? 0x7CF : 0x00F;
602  }
603  }
604  }
605 
606  if (IS_INTER(mb_type) || (IS_DIRECT(mb_type) && sl->direct_spatial_mv_pred)) {
607  int list;
608  int b_stride = h->b_stride;
609  for (list = 0; list < sl->list_count; list++) {
610  int8_t *ref_cache = &sl->ref_cache[list][scan8[0]];
611  int8_t *ref = h->cur_pic.ref_index[list];
612  int16_t(*mv_cache)[2] = &sl->mv_cache[list][scan8[0]];
613  int16_t(*mv)[2] = h->cur_pic.motion_val[list];
614  if (!USES_LIST(mb_type, list))
615  continue;
616  av_assert2(!(IS_DIRECT(mb_type) && !sl->direct_spatial_mv_pred));
617 
618  if (USES_LIST(top_type, list)) {
619  const int b_xy = h->mb2b_xy[top_xy] + 3 * b_stride;
620  AV_COPY128(mv_cache[0 - 1 * 8], mv[b_xy + 0]);
621  ref_cache[0 - 1 * 8] =
622  ref_cache[1 - 1 * 8] = ref[4 * top_xy + 2];
623  ref_cache[2 - 1 * 8] =
624  ref_cache[3 - 1 * 8] = ref[4 * top_xy + 3];
625  } else {
626  AV_ZERO128(mv_cache[0 - 1 * 8]);
627  AV_WN32A(&ref_cache[0 - 1 * 8],
628  ((top_type ? LIST_NOT_USED : PART_NOT_AVAILABLE) & 0xFF) * 0x01010101u);
629  }
630 
631  if (mb_type & (MB_TYPE_16x8 | MB_TYPE_8x8)) {
632  for (i = 0; i < 2; i++) {
633  int cache_idx = -1 + i * 2 * 8;
634  if (USES_LIST(left_type[LEFT(i)], list)) {
635  const int b_xy = h->mb2b_xy[left_xy[LEFT(i)]] + 3;
636  const int b8_xy = 4 * left_xy[LEFT(i)] + 1;
637  AV_COPY32(mv_cache[cache_idx],
638  mv[b_xy + b_stride * left_block[0 + i * 2]]);
639  AV_COPY32(mv_cache[cache_idx + 8],
640  mv[b_xy + b_stride * left_block[1 + i * 2]]);
641  ref_cache[cache_idx] = ref[b8_xy + (left_block[0 + i * 2] & ~1)];
642  ref_cache[cache_idx + 8] = ref[b8_xy + (left_block[1 + i * 2] & ~1)];
643  } else {
644  AV_ZERO32(mv_cache[cache_idx]);
645  AV_ZERO32(mv_cache[cache_idx + 8]);
646  ref_cache[cache_idx] =
647  ref_cache[cache_idx + 8] = (left_type[LEFT(i)]) ? LIST_NOT_USED
649  }
650  }
651  } else {
652  if (USES_LIST(left_type[LTOP], list)) {
653  const int b_xy = h->mb2b_xy[left_xy[LTOP]] + 3;
654  const int b8_xy = 4 * left_xy[LTOP] + 1;
655  AV_COPY32(mv_cache[-1], mv[b_xy + b_stride * left_block[0]]);
656  ref_cache[-1] = ref[b8_xy + (left_block[0] & ~1)];
657  } else {
658  AV_ZERO32(mv_cache[-1]);
659  ref_cache[-1] = left_type[LTOP] ? LIST_NOT_USED
661  }
662  }
663 
664  if (USES_LIST(topright_type, list)) {
665  const int b_xy = h->mb2b_xy[topright_xy] + 3 * b_stride;
666  AV_COPY32(mv_cache[4 - 1 * 8], mv[b_xy]);
667  ref_cache[4 - 1 * 8] = ref[4 * topright_xy + 2];
668  } else {
669  AV_ZERO32(mv_cache[4 - 1 * 8]);
670  ref_cache[4 - 1 * 8] = topright_type ? LIST_NOT_USED
672  }
673  if(ref_cache[2 - 1*8] < 0 || ref_cache[4 - 1 * 8] < 0) {
674  if (USES_LIST(topleft_type, list)) {
675  const int b_xy = h->mb2b_xy[topleft_xy] + 3 + b_stride +
676  (sl->topleft_partition & 2 * b_stride);
677  const int b8_xy = 4 * topleft_xy + 1 + (sl->topleft_partition & 2);
678  AV_COPY32(mv_cache[-1 - 1 * 8], mv[b_xy]);
679  ref_cache[-1 - 1 * 8] = ref[b8_xy];
680  } else {
681  AV_ZERO32(mv_cache[-1 - 1 * 8]);
682  ref_cache[-1 - 1 * 8] = topleft_type ? LIST_NOT_USED
684  }
685  }
686 
687  if ((mb_type & (MB_TYPE_SKIP | MB_TYPE_DIRECT2)) && !FRAME_MBAFF(h))
688  continue;
689 
690  if (!(mb_type & (MB_TYPE_SKIP | MB_TYPE_DIRECT2))) {
691  uint8_t(*mvd_cache)[2] = &sl->mvd_cache[list][scan8[0]];
692  uint8_t(*mvd)[2] = sl->mvd_table[list];
693  ref_cache[2 + 8 * 0] =
694  ref_cache[2 + 8 * 2] = PART_NOT_AVAILABLE;
695  AV_ZERO32(mv_cache[2 + 8 * 0]);
696  AV_ZERO32(mv_cache[2 + 8 * 2]);
697 
698  if (CABAC(h)) {
699  if (USES_LIST(top_type, list)) {
700  const int b_xy = h->mb2br_xy[top_xy];
701  AV_COPY64(mvd_cache[0 - 1 * 8], mvd[b_xy + 0]);
702  } else {
703  AV_ZERO64(mvd_cache[0 - 1 * 8]);
704  }
705  if (USES_LIST(left_type[LTOP], list)) {
706  const int b_xy = h->mb2br_xy[left_xy[LTOP]] + 6;
707  AV_COPY16(mvd_cache[-1 + 0 * 8], mvd[b_xy - left_block[0]]);
708  AV_COPY16(mvd_cache[-1 + 1 * 8], mvd[b_xy - left_block[1]]);
709  } else {
710  AV_ZERO16(mvd_cache[-1 + 0 * 8]);
711  AV_ZERO16(mvd_cache[-1 + 1 * 8]);
712  }
713  if (USES_LIST(left_type[LBOT], list)) {
714  const int b_xy = h->mb2br_xy[left_xy[LBOT]] + 6;
715  AV_COPY16(mvd_cache[-1 + 2 * 8], mvd[b_xy - left_block[2]]);
716  AV_COPY16(mvd_cache[-1 + 3 * 8], mvd[b_xy - left_block[3]]);
717  } else {
718  AV_ZERO16(mvd_cache[-1 + 2 * 8]);
719  AV_ZERO16(mvd_cache[-1 + 3 * 8]);
720  }
721  AV_ZERO16(mvd_cache[2 + 8 * 0]);
722  AV_ZERO16(mvd_cache[2 + 8 * 2]);
723  if (sl->slice_type_nos == AV_PICTURE_TYPE_B) {
724  uint8_t *direct_cache = &sl->direct_cache[scan8[0]];
725  uint8_t *direct_table = h->direct_table;
726  fill_rectangle(direct_cache, 4, 4, 8, MB_TYPE_16x16 >> 1, 1);
727 
728  if (IS_DIRECT(top_type)) {
729  AV_WN32A(&direct_cache[-1 * 8],
730  0x01010101u * (MB_TYPE_DIRECT2 >> 1));
731  } else if (IS_8X8(top_type)) {
732  int b8_xy = 4 * top_xy;
733  direct_cache[0 - 1 * 8] = direct_table[b8_xy + 2];
734  direct_cache[2 - 1 * 8] = direct_table[b8_xy + 3];
735  } else {
736  AV_WN32A(&direct_cache[-1 * 8],
737  0x01010101 * (MB_TYPE_16x16 >> 1));
738  }
739 
740  if (IS_DIRECT(left_type[LTOP]))
741  direct_cache[-1 + 0 * 8] = MB_TYPE_DIRECT2 >> 1;
742  else if (IS_8X8(left_type[LTOP]))
743  direct_cache[-1 + 0 * 8] = direct_table[4 * left_xy[LTOP] + 1 + (left_block[0] & ~1)];
744  else
745  direct_cache[-1 + 0 * 8] = MB_TYPE_16x16 >> 1;
746 
747  if (IS_DIRECT(left_type[LBOT]))
748  direct_cache[-1 + 2 * 8] = MB_TYPE_DIRECT2 >> 1;
749  else if (IS_8X8(left_type[LBOT]))
750  direct_cache[-1 + 2 * 8] = direct_table[4 * left_xy[LBOT] + 1 + (left_block[2] & ~1)];
751  else
752  direct_cache[-1 + 2 * 8] = MB_TYPE_16x16 >> 1;
753  }
754  }
755  }
756 
757 #define MAP_MVS \
758  MAP_F2F(scan8[0] - 1 - 1 * 8, topleft_type) \
759  MAP_F2F(scan8[0] + 0 - 1 * 8, top_type) \
760  MAP_F2F(scan8[0] + 1 - 1 * 8, top_type) \
761  MAP_F2F(scan8[0] + 2 - 1 * 8, top_type) \
762  MAP_F2F(scan8[0] + 3 - 1 * 8, top_type) \
763  MAP_F2F(scan8[0] + 4 - 1 * 8, topright_type) \
764  MAP_F2F(scan8[0] - 1 + 0 * 8, left_type[LTOP]) \
765  MAP_F2F(scan8[0] - 1 + 1 * 8, left_type[LTOP]) \
766  MAP_F2F(scan8[0] - 1 + 2 * 8, left_type[LBOT]) \
767  MAP_F2F(scan8[0] - 1 + 3 * 8, left_type[LBOT])
768 
769  if (FRAME_MBAFF(h)) {
770  if (MB_FIELD(sl)) {
771 
772 #define MAP_F2F(idx, mb_type) \
773  if (!IS_INTERLACED(mb_type) && sl->ref_cache[list][idx] >= 0) { \
774  sl->ref_cache[list][idx] *= 2; \
775  sl->mv_cache[list][idx][1] /= 2; \
776  sl->mvd_cache[list][idx][1] >>= 1; \
777  }
778 
779  MAP_MVS
780  } else {
781 
782 #undef MAP_F2F
783 #define MAP_F2F(idx, mb_type) \
784  if (IS_INTERLACED(mb_type) && sl->ref_cache[list][idx] >= 0) { \
785  sl->ref_cache[list][idx] >>= 1; \
786  sl->mv_cache[list][idx][1] *= 2; \
787  sl->mvd_cache[list][idx][1] <<= 1; \
788  }
789 
790  MAP_MVS
791 #undef MAP_F2F
792  }
793  }
794  }
795  }
796 
797  sl->neighbor_transform_size = !!IS_8x8DCT(top_type) + !!IS_8x8DCT(left_type[LTOP]);
798 }
799 
800 /**
801  * decodes a P_SKIP or B_SKIP macroblock
802  */
804 {
805  const int mb_xy = sl->mb_xy;
806  int mb_type = 0;
807 
808  memset(h->non_zero_count[mb_xy], 0, 48);
809 
810  if (MB_FIELD(sl))
811  mb_type |= MB_TYPE_INTERLACED;
812 
813  if (sl->slice_type_nos == AV_PICTURE_TYPE_B) {
814  // just for fill_caches. pred_direct_motion will set the real mb_type
816  if (sl->direct_spatial_mv_pred) {
817  fill_decode_neighbors(h, sl, mb_type);
818  fill_decode_caches(h, sl, mb_type); //FIXME check what is needed and what not ...
819  }
820  ff_h264_pred_direct_motion(h, sl, &mb_type);
821  mb_type |= MB_TYPE_SKIP;
822  } else {
824 
825  fill_decode_neighbors(h, sl, mb_type);
826  pred_pskip_motion(h, sl);
827  }
828 
829  write_back_motion(h, sl, mb_type);
830  h->cur_pic.mb_type[mb_xy] = mb_type;
831  h->cur_pic.qscale_table[mb_xy] = sl->qscale;
832  h->slice_table[mb_xy] = sl->slice_num;
833  sl->prev_mb_skipped = 1;
834 }
835 
836 #endif /* AVCODEC_H264_MVPRED_H */
const uint8_t * left_block
Definition: h264dec.h:218
#define ff_tlog(ctx,...)
Definition: internal.h:75
int topright_mb_xy
Definition: h264dec.h:210
int topleft_partition
Definition: h264dec.h:219
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
Definition: h264dec.h:299
int neighbor_transform_size
number of neighbors (top and/or left) that used 8x8 dct
Definition: h264dec.h:249
#define MB_TYPE_DIRECT2
Definition: mpegutils.h:59
#define C
#define FIX_MV_MBAFF(type, refn, mvn, idx)
Definition: h264_mvpred.h:238
int left_mb_xy[LEFT_MBS]
Definition: h264dec.h:211
unsigned int topleft_samples_available
Definition: h264dec.h:221
uint16_t * cbp_table
Definition: h264dec.h:413
uint8_t mvd_cache[2][5 *8][2]
Definition: h264dec.h:301
int prev_mb_skipped
Definition: h264dec.h:199
int16_t(*[2] motion_val)[2]
Definition: h264dec.h:136
#define MB_TYPE_16x8
Definition: mpegutils.h:55
void ff_h264_pred_direct_motion(const H264Context *const h, H264SliceContext *sl, int *mb_type)
Definition: h264_direct.c:721
H264Context.
Definition: h264dec.h:337
#define AV_WN32A(p, v)
Definition: intreadwrite.h:538
#define AV_COPY32(d, s)
Definition: intreadwrite.h:586
#define AV_RN32A(p)
Definition: intreadwrite.h:526
#define USES_LIST(a, list)
Definition: mpegutils.h:99
const PPS * pps
Definition: h264_ps.h:145
uint8_t
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
#define SET_DIAG_MV(MV_OP, REF_OP, XY, Y4)
#define MB_FIELD(sl)
Definition: h264dec.h:72
#define MB_TYPE_16x16
Definition: mpegutils.h:54
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
Definition: mem.h:112
int chroma_y_shift
Definition: h264dec.h:360
#define AV_COPY64(d, s)
Definition: intreadwrite.h:590
#define A(x)
Definition: vp56_arith.h:28
static av_always_inline void write_back_motion(const H264Context *h, H264SliceContext *sl, int mb_type)
Definition: h264dec.h:777
#define MAP_MVS
#define MB_TYPE_P1L0
Definition: mpegutils.h:64
unsigned int topright_samples_available
Definition: h264dec.h:223
static av_always_inline void pred_pskip_motion(const H264Context *const h, H264SliceContext *sl)
Definition: h264_mvpred.h:257
int constrained_intra_pred
constrained_intra_pred_flag
Definition: h264_ps.h:122
#define B
Definition: huffyuvdsp.h:32
int8_t intra4x4_pred_mode_cache[5 *8]
Definition: h264dec.h:205
static av_always_inline void pred_16x8_motion(const H264Context *const h, H264SliceContext *sl, int n, int list, int ref, int *const mx, int *const my)
Get the directionally predicted 16x8 MV.
Definition: h264_mvpred.h:160
simple assert() macros that are a bit more flexible than ISO C assert().
#define IS_SKIP(a)
Definition: mpegutils.h:81
int direct_spatial_mv_pred
Definition: h264dec.h:251
unsigned int top_samples_available
Definition: h264dec.h:222
int slice_type_nos
S free slice type (SI/SP are remapped to I/P)
Definition: h264dec.h:184
uint8_t * direct_table
Definition: h264dec.h:418
#define AV_COPY16(d, s)
Definition: intreadwrite.h:582
uint32_t * mb2br_xy
Definition: h264dec.h:400
uint16_t * slice_table
slice_table_base + 2*mb_stride + 1
Definition: h264dec.h:403
#define IS_DIRECT(a)
Definition: mpegutils.h:84
#define LEFT_MBS
Definition: h264dec.h:75
static av_always_inline void pred_8x16_motion(const H264Context *const h, H264SliceContext *sl, int n, int list, int ref, int *const mx, int *const my)
Get the directionally predicted 8x16 MV.
Definition: h264_mvpred.h:201
#define MB_TYPE_P0L0
Definition: mpegutils.h:63
uint32_t * mb_type
Definition: h264dec.h:139
H.264 / AVC / MPEG-4 part10 codec.
int n
Definition: avisynth_c.h:684
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:825
#define MB_TYPE_INTERLACED
Definition: mpegutils.h:58
static const int8_t mv[256][2]
Definition: 4xm.c:77
#define MB_TYPE_8x8
Definition: mpegutils.h:57
int mb_stride
Definition: h264dec.h:436
#define IS_INTERLACED(a)
Definition: mpegutils.h:83
AVCodecContext * avctx
Definition: h264dec.h:339
Libavcodec external API header.
int8_t * qscale_table
Definition: h264dec.h:133
static const uint8_t scan8[16 *3+3]
Definition: h264dec.h:644
#define CABAC(h)
Definition: h264_cabac.c:28
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
Definition: h264dec.h:294
#define FRAME_MBAFF(h)
Definition: h264dec.h:73
#define LBOT
Definition: h264dec.h:77
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
Definition: h264dec.h:660
int8_t * ref_index[2]
Definition: h264dec.h:145
H264Picture * cur_pic_ptr
Definition: h264dec.h:346
#define LIST_NOT_USED
Definition: h264dec.h:390
#define mid_pred
Definition: mathops.h:97
#define FMO
Definition: h264dec.h:62
uint8_t direct_cache[5 *8]
Definition: h264dec.h:302
#define IS_INTER(a)
Definition: mpegutils.h:79
static void fill_decode_caches(const H264Context *h, H264SliceContext *sl, int mb_type)
Definition: h264_mvpred.h:445
#define MB_TYPE_SKIP
Definition: mpegutils.h:62
#define CHROMA444(h)
Definition: h264dec.h:99
unsigned int list_count
Definition: h264dec.h:268
#define AV_ZERO128(d)
Definition: intreadwrite.h:622
int left_type[LEFT_MBS]
Definition: h264dec.h:216
#define AV_ZERO64(d)
Definition: intreadwrite.h:618
#define MB_TYPE_L0L1
Definition: mpegutils.h:69
#define IS_8x8DCT(a)
Definition: h264dec.h:104
common internal api header.
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
#define AV_COPY128(d, s)
Definition: intreadwrite.h:594
H264ParamSets ps
Definition: h264dec.h:456
Bi-dir predicted.
Definition: avutil.h:276
static void fill_decode_neighbors(const H264Context *h, H264SliceContext *sl, int mb_type)
Definition: h264_mvpred.h:356
int8_t * intra4x4_pred_mode
Definition: h264dec.h:206
#define AV_ZERO16(d)
Definition: intreadwrite.h:610
#define IS_INTRA(x, y)
#define LTOP
Definition: h264dec.h:76
#define IS_INTRA4x4(a)
Definition: mpegutils.h:75
int8_t ref_cache[2][5 *8]
Definition: h264dec.h:300
#define IS_8X8(a)
Definition: mpegutils.h:89
#define CHROMA422(h)
Definition: h264dec.h:98
static av_always_inline int fetch_diagonal_mv(const H264Context *h, H264SliceContext *sl, const int16_t **C, int i, int list, int part_width)
Definition: h264_mvpred.h:38
H264Picture cur_pic
Definition: h264dec.h:347
#define PART_NOT_AVAILABLE
Definition: h264dec.h:391
static void av_unused decode_mb_skip(const H264Context *h, H264SliceContext *sl)
decodes a P_SKIP or B_SKIP macroblock
Definition: h264_mvpred.h:803
#define AV_ZERO32(d)
Definition: intreadwrite.h:614
uint32_t * mb2b_xy
Definition: h264dec.h:399
unsigned int left_samples_available
Definition: h264dec.h:224
uint8_t(*[2] mvd_table)[2]
Definition: h264dec.h:313
#define av_always_inline
Definition: attributes.h:39
#define LEFT
Definition: cdgraphics.c:169
uint8_t(* non_zero_count)[48]
Definition: h264dec.h:388
mode
Use these values in ebur128_init (or'ed).
Definition: ebur128.h:83
int b_stride
Definition: h264dec.h:401
#define av_unused
Definition: attributes.h:125
static av_always_inline void pred_motion(const H264Context *const h, H264SliceContext *sl, int n, int part_width, int list, int ref, int *const mx, int *const my)
Get the predicted MV.
Definition: h264_mvpred.h:95