FFmpeg
h264_mvpred.h
Go to the documentation of this file.
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... motion vector prediction
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * H.264 / AVC / MPEG-4 part10 motion vector prediction.
25  * @author Michael Niedermayer <michaelni@gmx.at>
26  */
27 
28 #ifndef AVCODEC_H264_MVPRED_H
29 #define AVCODEC_H264_MVPRED_H
30 
31 #include "h264dec.h"
32 #include "mpegutils.h"
33 #include "rectangle.h"
34 
35 #include "libavutil/avassert.h"
36 #include "libavutil/mem_internal.h"
37 
38 
39 /**
40  * Get the predicted intra4x4 prediction mode.
41  */
43  H264SliceContext *sl, int n)
44 {
45  const int index8 = scan8[n];
46  const int left = sl->intra4x4_pred_mode_cache[index8 - 1];
47  const int top = sl->intra4x4_pred_mode_cache[index8 - 8];
48  const int min = FFMIN(left, top);
49 
50  ff_tlog(h->avctx, "mode:%d %d min:%d\n", left, top, min);
51 
52  if (min < 0)
53  return DC_PRED;
54  else
55  return min;
56 }
57 
59  H264SliceContext *sl)
60 {
61  int8_t *i4x4 = sl->intra4x4_pred_mode + h->mb2br_xy[sl->mb_xy];
62  int8_t *i4x4_cache = sl->intra4x4_pred_mode_cache;
63 
64  AV_COPY32(i4x4, i4x4_cache + 4 + 8 * 4);
65  i4x4[4] = i4x4_cache[7 + 8 * 3];
66  i4x4[5] = i4x4_cache[7 + 8 * 2];
67  i4x4[6] = i4x4_cache[7 + 8 * 1];
68 }
69 
71  H264SliceContext *sl)
72 {
73  const int mb_xy = sl->mb_xy;
74  uint8_t *nnz = h->non_zero_count[mb_xy];
75  uint8_t *nnz_cache = sl->non_zero_count_cache;
76 
77  AV_COPY32(&nnz[ 0], &nnz_cache[4 + 8 * 1]);
78  AV_COPY32(&nnz[ 4], &nnz_cache[4 + 8 * 2]);
79  AV_COPY32(&nnz[ 8], &nnz_cache[4 + 8 * 3]);
80  AV_COPY32(&nnz[12], &nnz_cache[4 + 8 * 4]);
81  AV_COPY32(&nnz[16], &nnz_cache[4 + 8 * 6]);
82  AV_COPY32(&nnz[20], &nnz_cache[4 + 8 * 7]);
83  AV_COPY32(&nnz[32], &nnz_cache[4 + 8 * 11]);
84  AV_COPY32(&nnz[36], &nnz_cache[4 + 8 * 12]);
85 
86  if (!h->chroma_y_shift) {
87  AV_COPY32(&nnz[24], &nnz_cache[4 + 8 * 8]);
88  AV_COPY32(&nnz[28], &nnz_cache[4 + 8 * 9]);
89  AV_COPY32(&nnz[40], &nnz_cache[4 + 8 * 13]);
90  AV_COPY32(&nnz[44], &nnz_cache[4 + 8 * 14]);
91  }
92 }
93 
95  H264SliceContext *sl,
96  int b_stride,
97  int b_xy, int b8_xy,
98  int mb_type, int list)
99 {
100  int16_t(*mv_dst)[2] = &h->cur_pic.motion_val[list][b_xy];
101  int16_t(*mv_src)[2] = &sl->mv_cache[list][scan8[0]];
102  AV_COPY128(mv_dst + 0 * b_stride, mv_src + 8 * 0);
103  AV_COPY128(mv_dst + 1 * b_stride, mv_src + 8 * 1);
104  AV_COPY128(mv_dst + 2 * b_stride, mv_src + 8 * 2);
105  AV_COPY128(mv_dst + 3 * b_stride, mv_src + 8 * 3);
106  if (CABAC(h)) {
107  uint8_t (*mvd_dst)[2] = &sl->mvd_table[list][FMO ? 8 * sl->mb_xy
108  : h->mb2br_xy[sl->mb_xy]];
109  uint8_t(*mvd_src)[2] = &sl->mvd_cache[list][scan8[0]];
110  if (IS_SKIP(mb_type)) {
111  AV_ZERO128(mvd_dst);
112  } else {
113  AV_COPY64(mvd_dst, mvd_src + 8 * 3);
114  AV_COPY16(mvd_dst + 3 + 3, mvd_src + 3 + 8 * 0);
115  AV_COPY16(mvd_dst + 3 + 2, mvd_src + 3 + 8 * 1);
116  AV_COPY16(mvd_dst + 3 + 1, mvd_src + 3 + 8 * 2);
117  }
118  }
119 
120  {
121  int8_t *ref_index = &h->cur_pic.ref_index[list][b8_xy];
122  int8_t *ref_cache = sl->ref_cache[list];
123  ref_index[0 + 0 * 2] = ref_cache[scan8[0]];
124  ref_index[1 + 0 * 2] = ref_cache[scan8[4]];
125  ref_index[0 + 1 * 2] = ref_cache[scan8[8]];
126  ref_index[1 + 1 * 2] = ref_cache[scan8[12]];
127  }
128 }
129 
131  H264SliceContext *sl,
132  int mb_type)
133 {
134  const int b_stride = h->b_stride;
135  const int b_xy = 4 * sl->mb_x + 4 * sl->mb_y * h->b_stride; // try mb2b(8)_xy
136  const int b8_xy = 4 * sl->mb_xy;
137 
138  if (USES_LIST(mb_type, 0)) {
139  write_back_motion_list(h, sl, b_stride, b_xy, b8_xy, mb_type, 0);
140  } else {
141  fill_rectangle(&h->cur_pic.ref_index[0][b8_xy],
142  2, 2, 2, (uint8_t)LIST_NOT_USED, 1);
143  }
144  if (USES_LIST(mb_type, 1))
145  write_back_motion_list(h, sl, b_stride, b_xy, b8_xy, mb_type, 1);
146 
147  if (sl->slice_type_nos == AV_PICTURE_TYPE_B && CABAC(h)) {
148  if (IS_8X8(mb_type)) {
149  uint8_t *direct_table = &h->direct_table[4 * sl->mb_xy];
150  direct_table[1] = sl->sub_mb_type[1] >> 1;
151  direct_table[2] = sl->sub_mb_type[2] >> 1;
152  direct_table[3] = sl->sub_mb_type[3] >> 1;
153  }
154  }
155 }
156 
158 {
159  if (h->ps.sps->direct_8x8_inference_flag)
160  return !(AV_RN64A(sl->sub_mb_type) &
162  0x0001000100010001ULL));
163  else
164  return !(AV_RN64A(sl->sub_mb_type) &
166  0x0001000100010001ULL));
167 }
168 
170  const int16_t **C,
171  int i, int list, int part_width)
172 {
173  const int topright_ref = sl->ref_cache[list][i - 8 + part_width];
174 
175  /* there is no consistent mapping of mvs to neighboring locations that will
176  * make mbaff happy, so we can't move all this logic to fill_caches */
177  if (FRAME_MBAFF(h)) {
178 #define SET_DIAG_MV(MV_OP, REF_OP, XY, Y4) \
179  const int xy = XY, y4 = Y4; \
180  const int mb_type = mb_types[xy + (y4 >> 2) * h->mb_stride]; \
181  if (!USES_LIST(mb_type, list)) \
182  return LIST_NOT_USED; \
183  mv = h->cur_pic_ptr->motion_val[list][h->mb2b_xy[xy] + 3 + y4 * h->b_stride]; \
184  sl->mv_cache[list][scan8[0] - 2][0] = mv[0]; \
185  sl->mv_cache[list][scan8[0] - 2][1] = mv[1] MV_OP; \
186  return h->cur_pic_ptr->ref_index[list][4 * xy + 1 + (y4 & ~1)] REF_OP;
187 
188  if (topright_ref == PART_NOT_AVAILABLE
189  && i >= scan8[0] + 8 && (i & 7) == 4
190  && sl->ref_cache[list][scan8[0] - 1] != PART_NOT_AVAILABLE) {
191  const uint32_t *mb_types = h->cur_pic_ptr->mb_type;
192  const int16_t *mv;
193  AV_ZERO32(sl->mv_cache[list][scan8[0] - 2]);
194  *C = sl->mv_cache[list][scan8[0] - 2];
195 
196  if (!MB_FIELD(sl) && IS_INTERLACED(sl->left_type[0])) {
197  SET_DIAG_MV(* 2, >> 1, sl->left_mb_xy[0] + h->mb_stride,
198  (sl->mb_y & 1) * 2 + (i >> 5));
199  }
200  if (MB_FIELD(sl) && !IS_INTERLACED(sl->left_type[0])) {
201  // left shift will turn LIST_NOT_USED into PART_NOT_AVAILABLE, but that's OK.
202  SET_DIAG_MV(/ 2, *2, sl->left_mb_xy[i >= 36], ((i >> 2)) & 3);
203  }
204  }
205 #undef SET_DIAG_MV
206  }
207 
208  if (topright_ref != PART_NOT_AVAILABLE) {
209  *C = sl->mv_cache[list][i - 8 + part_width];
210  return topright_ref;
211  } else {
212  ff_tlog(h->avctx, "topright MV not available\n");
213 
214  *C = sl->mv_cache[list][i - 8 - 1];
215  return sl->ref_cache[list][i - 8 - 1];
216  }
217 }
218 
219 /**
220  * Get the predicted MV.
221  * @param n the block index
222  * @param part_width the width of the partition (4, 8,16) -> (1, 2, 4)
223  * @param mx the x component of the predicted motion vector
224  * @param my the y component of the predicted motion vector
225  */
226 static av_always_inline void pred_motion(const H264Context *const h,
227  H264SliceContext *sl,
228  int n,
229  int part_width, int list, int ref,
230  int *const mx, int *const my)
231 {
232  const int index8 = scan8[n];
233  const int top_ref = sl->ref_cache[list][index8 - 8];
234  const int left_ref = sl->ref_cache[list][index8 - 1];
235  const int16_t *const A = sl->mv_cache[list][index8 - 1];
236  const int16_t *const B = sl->mv_cache[list][index8 - 8];
237  const int16_t *C;
238  int diagonal_ref, match_count;
239 
240  av_assert2(part_width == 1 || part_width == 2 || part_width == 4);
241 
242 /* mv_cache
243  * B . . A T T T T
244  * U . . L . . , .
245  * U . . L . . . .
246  * U . . L . . , .
247  * . . . L . . . .
248  */
249 
250  diagonal_ref = fetch_diagonal_mv(h, sl, &C, index8, list, part_width);
251  match_count = (diagonal_ref == ref) + (top_ref == ref) + (left_ref == ref);
252  ff_tlog(h->avctx, "pred_motion match_count=%d\n", match_count);
253  if (match_count > 1) { //most common
254  *mx = mid_pred(A[0], B[0], C[0]);
255  *my = mid_pred(A[1], B[1], C[1]);
256  } else if (match_count == 1) {
257  if (left_ref == ref) {
258  *mx = A[0];
259  *my = A[1];
260  } else if (top_ref == ref) {
261  *mx = B[0];
262  *my = B[1];
263  } else {
264  *mx = C[0];
265  *my = C[1];
266  }
267  } else {
268  if (top_ref == PART_NOT_AVAILABLE &&
269  diagonal_ref == PART_NOT_AVAILABLE &&
270  left_ref != PART_NOT_AVAILABLE) {
271  *mx = A[0];
272  *my = A[1];
273  } else {
274  *mx = mid_pred(A[0], B[0], C[0]);
275  *my = mid_pred(A[1], B[1], C[1]);
276  }
277  }
278 
279  ff_tlog(h->avctx,
280  "pred_motion (%2d %2d %2d) (%2d %2d %2d) (%2d %2d %2d) -> (%2d %2d %2d) at %2d %2d %d list %d\n",
281  top_ref, B[0], B[1], diagonal_ref, C[0], C[1], left_ref,
282  A[0], A[1], ref, *mx, *my, sl->mb_x, sl->mb_y, n, list);
283 }
284 
285 /**
286  * Get the directionally predicted 16x8 MV.
287  * @param n the block index
288  * @param mx the x component of the predicted motion vector
289  * @param my the y component of the predicted motion vector
290  */
292  H264SliceContext *sl,
293  int n, int list, int ref,
294  int *const mx, int *const my)
295 {
296  if (n == 0) {
297  const int top_ref = sl->ref_cache[list][scan8[0] - 8];
298  const int16_t *const B = sl->mv_cache[list][scan8[0] - 8];
299 
300  ff_tlog(h->avctx, "pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n",
301  top_ref, B[0], B[1], sl->mb_x, sl->mb_y, n, list);
302 
303  if (top_ref == ref) {
304  *mx = B[0];
305  *my = B[1];
306  return;
307  }
308  } else {
309  const int left_ref = sl->ref_cache[list][scan8[8] - 1];
310  const int16_t *const A = sl->mv_cache[list][scan8[8] - 1];
311 
312  ff_tlog(h->avctx, "pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n",
313  left_ref, A[0], A[1], sl->mb_x, sl->mb_y, n, list);
314 
315  if (left_ref == ref) {
316  *mx = A[0];
317  *my = A[1];
318  return;
319  }
320  }
321 
322  //RARE
323  pred_motion(h, sl, n, 4, list, ref, mx, my);
324 }
325 
326 /**
327  * Get the directionally predicted 8x16 MV.
328  * @param n the block index
329  * @param mx the x component of the predicted motion vector
330  * @param my the y component of the predicted motion vector
331  */
333  H264SliceContext *sl,
334  int n, int list, int ref,
335  int *const mx, int *const my)
336 {
337  if (n == 0) {
338  const int left_ref = sl->ref_cache[list][scan8[0] - 1];
339  const int16_t *const A = sl->mv_cache[list][scan8[0] - 1];
340 
341  ff_tlog(h->avctx, "pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n",
342  left_ref, A[0], A[1], sl->mb_x, sl->mb_y, n, list);
343 
344  if (left_ref == ref) {
345  *mx = A[0];
346  *my = A[1];
347  return;
348  }
349  } else {
350  const int16_t *C;
351  int diagonal_ref;
352 
353  diagonal_ref = fetch_diagonal_mv(h, sl, &C, scan8[4], list, 2);
354 
355  ff_tlog(h->avctx, "pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n",
356  diagonal_ref, C[0], C[1], sl->mb_x, sl->mb_y, n, list);
357 
358  if (diagonal_ref == ref) {
359  *mx = C[0];
360  *my = C[1];
361  return;
362  }
363  }
364 
365  //RARE
366  pred_motion(h, sl, n, 2, list, ref, mx, my);
367 }
368 
369 #define FIX_MV_MBAFF(type, refn, mvn, idx) \
370  if (FRAME_MBAFF(h)) { \
371  if (MB_FIELD(sl)) { \
372  if (!IS_INTERLACED(type)) { \
373  refn <<= 1; \
374  AV_COPY32(mvbuf[idx], mvn); \
375  mvbuf[idx][1] /= 2; \
376  mvn = mvbuf[idx]; \
377  } \
378  } else { \
379  if (IS_INTERLACED(type)) { \
380  refn >>= 1; \
381  AV_COPY32(mvbuf[idx], mvn); \
382  mvbuf[idx][1] *= 2; \
383  mvn = mvbuf[idx]; \
384  } \
385  } \
386  }
387 
389  H264SliceContext *sl)
390 {
391  DECLARE_ALIGNED(4, static const int16_t, zeromv)[2] = { 0 };
392  DECLARE_ALIGNED(4, int16_t, mvbuf)[3][2];
393  int8_t *ref = h->cur_pic.ref_index[0];
394  int16_t(*mv)[2] = h->cur_pic.motion_val[0];
395  int top_ref, left_ref, diagonal_ref, match_count, mx, my;
396  const int16_t *A, *B, *C;
397  int b_stride = h->b_stride;
398 
399  fill_rectangle(&sl->ref_cache[0][scan8[0]], 4, 4, 8, 0, 1);
400 
401  /* To avoid doing an entire fill_decode_caches, we inline the relevant
402  * parts here.
403  * FIXME: this is a partial duplicate of the logic in fill_decode_caches,
404  * but it's faster this way. Is there a way to avoid this duplication?
405  */
406  if (USES_LIST(sl->left_type[LTOP], 0)) {
407  left_ref = ref[4 * sl->left_mb_xy[LTOP] + 1 + (sl->left_block[0] & ~1)];
408  A = mv[h->mb2b_xy[sl->left_mb_xy[LTOP]] + 3 + b_stride * sl->left_block[0]];
409  FIX_MV_MBAFF(sl->left_type[LTOP], left_ref, A, 0);
410  if (!(left_ref | AV_RN32A(A)))
411  goto zeromv;
412  } else if (sl->left_type[LTOP]) {
413  left_ref = LIST_NOT_USED;
414  A = zeromv;
415  } else {
416  goto zeromv;
417  }
418 
419  if (USES_LIST(sl->top_type, 0)) {
420  top_ref = ref[4 * sl->top_mb_xy + 2];
421  B = mv[h->mb2b_xy[sl->top_mb_xy] + 3 * b_stride];
422  FIX_MV_MBAFF(sl->top_type, top_ref, B, 1);
423  if (!(top_ref | AV_RN32A(B)))
424  goto zeromv;
425  } else if (sl->top_type) {
426  top_ref = LIST_NOT_USED;
427  B = zeromv;
428  } else {
429  goto zeromv;
430  }
431 
432  ff_tlog(h->avctx, "pred_pskip: (%d) (%d) at %2d %2d\n",
433  top_ref, left_ref, sl->mb_x, sl->mb_y);
434 
435  if (USES_LIST(sl->topright_type, 0)) {
436  diagonal_ref = ref[4 * sl->topright_mb_xy + 2];
437  C = mv[h->mb2b_xy[sl->topright_mb_xy] + 3 * b_stride];
438  FIX_MV_MBAFF(sl->topright_type, diagonal_ref, C, 2);
439  } else if (sl->topright_type) {
440  diagonal_ref = LIST_NOT_USED;
441  C = zeromv;
442  } else {
443  if (USES_LIST(sl->topleft_type, 0)) {
444  diagonal_ref = ref[4 * sl->topleft_mb_xy + 1 +
445  (sl->topleft_partition & 2)];
446  C = mv[h->mb2b_xy[sl->topleft_mb_xy] + 3 + b_stride +
447  (sl->topleft_partition & 2 * b_stride)];
448  FIX_MV_MBAFF(sl->topleft_type, diagonal_ref, C, 2);
449  } else if (sl->topleft_type) {
450  diagonal_ref = LIST_NOT_USED;
451  C = zeromv;
452  } else {
453  diagonal_ref = PART_NOT_AVAILABLE;
454  C = zeromv;
455  }
456  }
457 
458  match_count = !diagonal_ref + !top_ref + !left_ref;
459  ff_tlog(h->avctx, "pred_pskip_motion match_count=%d\n", match_count);
460  if (match_count > 1) {
461  mx = mid_pred(A[0], B[0], C[0]);
462  my = mid_pred(A[1], B[1], C[1]);
463  } else if (match_count == 1) {
464  if (!left_ref) {
465  mx = A[0];
466  my = A[1];
467  } else if (!top_ref) {
468  mx = B[0];
469  my = B[1];
470  } else {
471  mx = C[0];
472  my = C[1];
473  }
474  } else {
475  mx = mid_pred(A[0], B[0], C[0]);
476  my = mid_pred(A[1], B[1], C[1]);
477  }
478 
479  fill_rectangle(sl->mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mx, my), 4);
480  return;
481 
482 zeromv:
483  fill_rectangle(sl->mv_cache[0][scan8[0]], 4, 4, 8, 0, 4);
484  return;
485 }
486 
487 static void fill_decode_neighbors(const H264Context *h, H264SliceContext *sl, int mb_type)
488 {
489  const int mb_xy = sl->mb_xy;
490  int topleft_xy, top_xy, topright_xy, left_xy[LEFT_MBS];
491  static const uint8_t left_block_options[4][32] = {
492  { 0, 1, 2, 3, 7, 10, 8, 11, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4, 3 + 3 * 4, 1 + 4 * 4, 1 + 8 * 4, 1 + 5 * 4, 1 + 9 * 4 },
493  { 2, 2, 3, 3, 8, 11, 8, 11, 3 + 2 * 4, 3 + 2 * 4, 3 + 3 * 4, 3 + 3 * 4, 1 + 5 * 4, 1 + 9 * 4, 1 + 5 * 4, 1 + 9 * 4 },
494  { 0, 0, 1, 1, 7, 10, 7, 10, 3 + 0 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 1 * 4, 1 + 4 * 4, 1 + 8 * 4, 1 + 4 * 4, 1 + 8 * 4 },
495  { 0, 2, 0, 2, 7, 10, 7, 10, 3 + 0 * 4, 3 + 2 * 4, 3 + 0 * 4, 3 + 2 * 4, 1 + 4 * 4, 1 + 8 * 4, 1 + 4 * 4, 1 + 8 * 4 }
496  };
497 
498  sl->topleft_partition = -1;
499 
500  top_xy = mb_xy - (h->mb_stride << MB_FIELD(sl));
501 
502  /* Wow, what a mess, why didn't they simplify the interlacing & intra
503  * stuff, I can't imagine that these complex rules are worth it. */
504 
505  topleft_xy = top_xy - 1;
506  topright_xy = top_xy + 1;
507  left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1;
508  sl->left_block = left_block_options[0];
509  if (FRAME_MBAFF(h)) {
510  const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.mb_type[mb_xy - 1]);
511  const int curr_mb_field_flag = IS_INTERLACED(mb_type);
512  if (sl->mb_y & 1) {
513  if (left_mb_field_flag != curr_mb_field_flag) {
514  left_xy[LBOT] = left_xy[LTOP] = mb_xy - h->mb_stride - 1;
515  if (curr_mb_field_flag) {
516  left_xy[LBOT] += h->mb_stride;
517  sl->left_block = left_block_options[3];
518  } else {
519  topleft_xy += h->mb_stride;
520  /* take top left mv from the middle of the mb, as opposed
521  * to all other modes which use the bottom right partition */
522  sl->topleft_partition = 0;
523  sl->left_block = left_block_options[1];
524  }
525  }
526  } else {
527  if (curr_mb_field_flag) {
528  topleft_xy += h->mb_stride & (((h->cur_pic.mb_type[top_xy - 1] >> 7) & 1) - 1);
529  topright_xy += h->mb_stride & (((h->cur_pic.mb_type[top_xy + 1] >> 7) & 1) - 1);
530  top_xy += h->mb_stride & (((h->cur_pic.mb_type[top_xy] >> 7) & 1) - 1);
531  }
532  if (left_mb_field_flag != curr_mb_field_flag) {
533  if (curr_mb_field_flag) {
534  left_xy[LBOT] += h->mb_stride;
535  sl->left_block = left_block_options[3];
536  } else {
537  sl->left_block = left_block_options[2];
538  }
539  }
540  }
541  }
542 
543  sl->topleft_mb_xy = topleft_xy;
544  sl->top_mb_xy = top_xy;
545  sl->topright_mb_xy = topright_xy;
546  sl->left_mb_xy[LTOP] = left_xy[LTOP];
547  sl->left_mb_xy[LBOT] = left_xy[LBOT];
548  //FIXME do we need all in the context?
549 
550  sl->topleft_type = h->cur_pic.mb_type[topleft_xy];
551  sl->top_type = h->cur_pic.mb_type[top_xy];
552  sl->topright_type = h->cur_pic.mb_type[topright_xy];
553  sl->left_type[LTOP] = h->cur_pic.mb_type[left_xy[LTOP]];
554  sl->left_type[LBOT] = h->cur_pic.mb_type[left_xy[LBOT]];
555 
556  if (FMO) {
557  if (h->slice_table[topleft_xy] != sl->slice_num)
558  sl->topleft_type = 0;
559  if (h->slice_table[top_xy] != sl->slice_num)
560  sl->top_type = 0;
561  if (h->slice_table[left_xy[LTOP]] != sl->slice_num)
562  sl->left_type[LTOP] = sl->left_type[LBOT] = 0;
563  } else {
564  if (h->slice_table[topleft_xy] != sl->slice_num) {
565  sl->topleft_type = 0;
566  if (h->slice_table[top_xy] != sl->slice_num)
567  sl->top_type = 0;
568  if (h->slice_table[left_xy[LTOP]] != sl->slice_num)
569  sl->left_type[LTOP] = sl->left_type[LBOT] = 0;
570  }
571  }
572  if (h->slice_table[topright_xy] != sl->slice_num)
573  sl->topright_type = 0;
574 }
575 
576 static void fill_decode_caches(const H264Context *h, H264SliceContext *sl, int mb_type)
577 {
578  int topleft_xy, top_xy, topright_xy, left_xy[LEFT_MBS];
579  int topleft_type, top_type, topright_type, left_type[LEFT_MBS];
580  const uint8_t *left_block = sl->left_block;
581  int i;
582  uint8_t *nnz;
583  uint8_t *nnz_cache;
584 
585  topleft_xy = sl->topleft_mb_xy;
586  top_xy = sl->top_mb_xy;
587  topright_xy = sl->topright_mb_xy;
588  left_xy[LTOP] = sl->left_mb_xy[LTOP];
589  left_xy[LBOT] = sl->left_mb_xy[LBOT];
590  topleft_type = sl->topleft_type;
591  top_type = sl->top_type;
592  topright_type = sl->topright_type;
593  left_type[LTOP] = sl->left_type[LTOP];
594  left_type[LBOT] = sl->left_type[LBOT];
595 
596  if (!IS_SKIP(mb_type)) {
597  if (IS_INTRA(mb_type)) {
598  int type_mask = h->ps.pps->constrained_intra_pred ? IS_INTRA(-1) : -1;
601  sl->left_samples_available = 0xFFFF;
602  sl->topright_samples_available = 0xEEEA;
603 
604  if (!(top_type & type_mask)) {
605  sl->topleft_samples_available = 0xB3FF;
606  sl->top_samples_available = 0x33FF;
607  sl->topright_samples_available = 0x26EA;
608  }
609  if (IS_INTERLACED(mb_type) != IS_INTERLACED(left_type[LTOP])) {
610  if (IS_INTERLACED(mb_type)) {
611  if (!(left_type[LTOP] & type_mask)) {
612  sl->topleft_samples_available &= 0xDFFF;
613  sl->left_samples_available &= 0x5FFF;
614  }
615  if (!(left_type[LBOT] & type_mask)) {
616  sl->topleft_samples_available &= 0xFF5F;
617  sl->left_samples_available &= 0xFF5F;
618  }
619  } else {
620  int left_typei = h->cur_pic.mb_type[left_xy[LTOP] + h->mb_stride];
621 
622  av_assert2(left_xy[LTOP] == left_xy[LBOT]);
623  if (!((left_typei & type_mask) && (left_type[LTOP] & type_mask))) {
624  sl->topleft_samples_available &= 0xDF5F;
625  sl->left_samples_available &= 0x5F5F;
626  }
627  }
628  } else {
629  if (!(left_type[LTOP] & type_mask)) {
630  sl->topleft_samples_available &= 0xDF5F;
631  sl->left_samples_available &= 0x5F5F;
632  }
633  }
634 
635  if (!(topleft_type & type_mask))
636  sl->topleft_samples_available &= 0x7FFF;
637 
638  if (!(topright_type & type_mask))
639  sl->topright_samples_available &= 0xFBFF;
640 
641  if (IS_INTRA4x4(mb_type)) {
642  if (IS_INTRA4x4(top_type)) {
643  AV_COPY32(sl->intra4x4_pred_mode_cache + 4 + 8 * 0, sl->intra4x4_pred_mode + h->mb2br_xy[top_xy]);
644  } else {
645  sl->intra4x4_pred_mode_cache[4 + 8 * 0] =
646  sl->intra4x4_pred_mode_cache[5 + 8 * 0] =
647  sl->intra4x4_pred_mode_cache[6 + 8 * 0] =
648  sl->intra4x4_pred_mode_cache[7 + 8 * 0] = 2 - 3 * !(top_type & type_mask);
649  }
650  for (i = 0; i < 2; i++) {
651  if (IS_INTRA4x4(left_type[LEFT(i)])) {
652  int8_t *mode = sl->intra4x4_pred_mode + h->mb2br_xy[left_xy[LEFT(i)]];
653  sl->intra4x4_pred_mode_cache[3 + 8 * 1 + 2 * 8 * i] = mode[6 - left_block[0 + 2 * i]];
654  sl->intra4x4_pred_mode_cache[3 + 8 * 2 + 2 * 8 * i] = mode[6 - left_block[1 + 2 * i]];
655  } else {
656  sl->intra4x4_pred_mode_cache[3 + 8 * 1 + 2 * 8 * i] =
657  sl->intra4x4_pred_mode_cache[3 + 8 * 2 + 2 * 8 * i] = 2 - 3 * !(left_type[LEFT(i)] & type_mask);
658  }
659  }
660  }
661  }
662 
663  /*
664  * 0 . T T. T T T T
665  * 1 L . .L . . . .
666  * 2 L . .L . . . .
667  * 3 . T TL . . . .
668  * 4 L . .L . . . .
669  * 5 L . .. . . . .
670  */
671  /* FIXME: constraint_intra_pred & partitioning & nnz
672  * (let us hope this is just a typo in the spec) */
673  nnz_cache = sl->non_zero_count_cache;
674  if (top_type) {
675  nnz = h->non_zero_count[top_xy];
676  AV_COPY32(&nnz_cache[4 + 8 * 0], &nnz[4 * 3]);
677  if (!h->chroma_y_shift) {
678  AV_COPY32(&nnz_cache[4 + 8 * 5], &nnz[4 * 7]);
679  AV_COPY32(&nnz_cache[4 + 8 * 10], &nnz[4 * 11]);
680  } else {
681  AV_COPY32(&nnz_cache[4 + 8 * 5], &nnz[4 * 5]);
682  AV_COPY32(&nnz_cache[4 + 8 * 10], &nnz[4 * 9]);
683  }
684  } else {
685  uint32_t top_empty = CABAC(h) && !IS_INTRA(mb_type) ? 0 : 0x40404040;
686  AV_WN32A(&nnz_cache[4 + 8 * 0], top_empty);
687  AV_WN32A(&nnz_cache[4 + 8 * 5], top_empty);
688  AV_WN32A(&nnz_cache[4 + 8 * 10], top_empty);
689  }
690 
691  for (i = 0; i < 2; i++) {
692  if (left_type[LEFT(i)]) {
693  nnz = h->non_zero_count[left_xy[LEFT(i)]];
694  nnz_cache[3 + 8 * 1 + 2 * 8 * i] = nnz[left_block[8 + 0 + 2 * i]];
695  nnz_cache[3 + 8 * 2 + 2 * 8 * i] = nnz[left_block[8 + 1 + 2 * i]];
696  if (CHROMA444(h)) {
697  nnz_cache[3 + 8 * 6 + 2 * 8 * i] = nnz[left_block[8 + 0 + 2 * i] + 4 * 4];
698  nnz_cache[3 + 8 * 7 + 2 * 8 * i] = nnz[left_block[8 + 1 + 2 * i] + 4 * 4];
699  nnz_cache[3 + 8 * 11 + 2 * 8 * i] = nnz[left_block[8 + 0 + 2 * i] + 8 * 4];
700  nnz_cache[3 + 8 * 12 + 2 * 8 * i] = nnz[left_block[8 + 1 + 2 * i] + 8 * 4];
701  } else if (CHROMA422(h)) {
702  nnz_cache[3 + 8 * 6 + 2 * 8 * i] = nnz[left_block[8 + 0 + 2 * i] - 2 + 4 * 4];
703  nnz_cache[3 + 8 * 7 + 2 * 8 * i] = nnz[left_block[8 + 1 + 2 * i] - 2 + 4 * 4];
704  nnz_cache[3 + 8 * 11 + 2 * 8 * i] = nnz[left_block[8 + 0 + 2 * i] - 2 + 8 * 4];
705  nnz_cache[3 + 8 * 12 + 2 * 8 * i] = nnz[left_block[8 + 1 + 2 * i] - 2 + 8 * 4];
706  } else {
707  nnz_cache[3 + 8 * 6 + 8 * i] = nnz[left_block[8 + 4 + 2 * i]];
708  nnz_cache[3 + 8 * 11 + 8 * i] = nnz[left_block[8 + 5 + 2 * i]];
709  }
710  } else {
711  nnz_cache[3 + 8 * 1 + 2 * 8 * i] =
712  nnz_cache[3 + 8 * 2 + 2 * 8 * i] =
713  nnz_cache[3 + 8 * 6 + 2 * 8 * i] =
714  nnz_cache[3 + 8 * 7 + 2 * 8 * i] =
715  nnz_cache[3 + 8 * 11 + 2 * 8 * i] =
716  nnz_cache[3 + 8 * 12 + 2 * 8 * i] = CABAC(h) && !IS_INTRA(mb_type) ? 0 : 64;
717  }
718  }
719 
720  if (CABAC(h)) {
721  // top_cbp
722  if (top_type)
723  sl->top_cbp = h->cbp_table[top_xy];
724  else
725  sl->top_cbp = IS_INTRA(mb_type) ? 0x7CF : 0x00F;
726  // left_cbp
727  if (left_type[LTOP]) {
728  sl->left_cbp = (h->cbp_table[left_xy[LTOP]] & 0x7F0) |
729  ((h->cbp_table[left_xy[LTOP]] >> (left_block[0] & (~1))) & 2) |
730  (((h->cbp_table[left_xy[LBOT]] >> (left_block[2] & (~1))) & 2) << 2);
731  } else {
732  sl->left_cbp = IS_INTRA(mb_type) ? 0x7CF : 0x00F;
733  }
734  }
735  }
736 
737  if (IS_INTER(mb_type) || (IS_DIRECT(mb_type) && sl->direct_spatial_mv_pred)) {
738  int list;
739  int b_stride = h->b_stride;
740  for (list = 0; list < sl->list_count; list++) {
741  int8_t *ref_cache = &sl->ref_cache[list][scan8[0]];
742  int8_t *ref = h->cur_pic.ref_index[list];
743  int16_t(*mv_cache)[2] = &sl->mv_cache[list][scan8[0]];
744  int16_t(*mv)[2] = h->cur_pic.motion_val[list];
745  if (!USES_LIST(mb_type, list))
746  continue;
747  av_assert2(!(IS_DIRECT(mb_type) && !sl->direct_spatial_mv_pred));
748 
749  if (USES_LIST(top_type, list)) {
750  const int b_xy = h->mb2b_xy[top_xy] + 3 * b_stride;
751  AV_COPY128(mv_cache[0 - 1 * 8], mv[b_xy + 0]);
752  ref_cache[0 - 1 * 8] =
753  ref_cache[1 - 1 * 8] = ref[4 * top_xy + 2];
754  ref_cache[2 - 1 * 8] =
755  ref_cache[3 - 1 * 8] = ref[4 * top_xy + 3];
756  } else {
757  AV_ZERO128(mv_cache[0 - 1 * 8]);
758  AV_WN32A(&ref_cache[0 - 1 * 8],
759  ((top_type ? LIST_NOT_USED : PART_NOT_AVAILABLE) & 0xFF) * 0x01010101u);
760  }
761 
762  if (mb_type & (MB_TYPE_16x8 | MB_TYPE_8x8)) {
763  for (i = 0; i < 2; i++) {
764  int cache_idx = -1 + i * 2 * 8;
765  if (USES_LIST(left_type[LEFT(i)], list)) {
766  const int b_xy = h->mb2b_xy[left_xy[LEFT(i)]] + 3;
767  const int b8_xy = 4 * left_xy[LEFT(i)] + 1;
768  AV_COPY32(mv_cache[cache_idx],
769  mv[b_xy + b_stride * left_block[0 + i * 2]]);
770  AV_COPY32(mv_cache[cache_idx + 8],
771  mv[b_xy + b_stride * left_block[1 + i * 2]]);
772  ref_cache[cache_idx] = ref[b8_xy + (left_block[0 + i * 2] & ~1)];
773  ref_cache[cache_idx + 8] = ref[b8_xy + (left_block[1 + i * 2] & ~1)];
774  } else {
775  AV_ZERO32(mv_cache[cache_idx]);
776  AV_ZERO32(mv_cache[cache_idx + 8]);
777  ref_cache[cache_idx] =
778  ref_cache[cache_idx + 8] = (left_type[LEFT(i)]) ? LIST_NOT_USED
780  }
781  }
782  } else {
783  if (USES_LIST(left_type[LTOP], list)) {
784  const int b_xy = h->mb2b_xy[left_xy[LTOP]] + 3;
785  const int b8_xy = 4 * left_xy[LTOP] + 1;
786  AV_COPY32(mv_cache[-1], mv[b_xy + b_stride * left_block[0]]);
787  ref_cache[-1] = ref[b8_xy + (left_block[0] & ~1)];
788  } else {
789  AV_ZERO32(mv_cache[-1]);
790  ref_cache[-1] = left_type[LTOP] ? LIST_NOT_USED
792  }
793  }
794 
795  if (USES_LIST(topright_type, list)) {
796  const int b_xy = h->mb2b_xy[topright_xy] + 3 * b_stride;
797  AV_COPY32(mv_cache[4 - 1 * 8], mv[b_xy]);
798  ref_cache[4 - 1 * 8] = ref[4 * topright_xy + 2];
799  } else {
800  AV_ZERO32(mv_cache[4 - 1 * 8]);
801  ref_cache[4 - 1 * 8] = topright_type ? LIST_NOT_USED
803  }
804  if(ref_cache[2 - 1*8] < 0 || ref_cache[4 - 1 * 8] < 0) {
805  if (USES_LIST(topleft_type, list)) {
806  const int b_xy = h->mb2b_xy[topleft_xy] + 3 + b_stride +
807  (sl->topleft_partition & 2 * b_stride);
808  const int b8_xy = 4 * topleft_xy + 1 + (sl->topleft_partition & 2);
809  AV_COPY32(mv_cache[-1 - 1 * 8], mv[b_xy]);
810  ref_cache[-1 - 1 * 8] = ref[b8_xy];
811  } else {
812  AV_ZERO32(mv_cache[-1 - 1 * 8]);
813  ref_cache[-1 - 1 * 8] = topleft_type ? LIST_NOT_USED
815  }
816  }
817 
818  if ((mb_type & (MB_TYPE_SKIP | MB_TYPE_DIRECT2)) && !FRAME_MBAFF(h))
819  continue;
820 
821  if (!(mb_type & (MB_TYPE_SKIP | MB_TYPE_DIRECT2))) {
822  uint8_t(*mvd_cache)[2] = &sl->mvd_cache[list][scan8[0]];
823  uint8_t(*mvd)[2] = sl->mvd_table[list];
824  ref_cache[2 + 8 * 0] =
825  ref_cache[2 + 8 * 2] = PART_NOT_AVAILABLE;
826  AV_ZERO32(mv_cache[2 + 8 * 0]);
827  AV_ZERO32(mv_cache[2 + 8 * 2]);
828 
829  if (CABAC(h)) {
830  if (USES_LIST(top_type, list)) {
831  const int b_xy = h->mb2br_xy[top_xy];
832  AV_COPY64(mvd_cache[0 - 1 * 8], mvd[b_xy + 0]);
833  } else {
834  AV_ZERO64(mvd_cache[0 - 1 * 8]);
835  }
836  if (USES_LIST(left_type[LTOP], list)) {
837  const int b_xy = h->mb2br_xy[left_xy[LTOP]] + 6;
838  AV_COPY16(mvd_cache[-1 + 0 * 8], mvd[b_xy - left_block[0]]);
839  AV_COPY16(mvd_cache[-1 + 1 * 8], mvd[b_xy - left_block[1]]);
840  } else {
841  AV_ZERO16(mvd_cache[-1 + 0 * 8]);
842  AV_ZERO16(mvd_cache[-1 + 1 * 8]);
843  }
844  if (USES_LIST(left_type[LBOT], list)) {
845  const int b_xy = h->mb2br_xy[left_xy[LBOT]] + 6;
846  AV_COPY16(mvd_cache[-1 + 2 * 8], mvd[b_xy - left_block[2]]);
847  AV_COPY16(mvd_cache[-1 + 3 * 8], mvd[b_xy - left_block[3]]);
848  } else {
849  AV_ZERO16(mvd_cache[-1 + 2 * 8]);
850  AV_ZERO16(mvd_cache[-1 + 3 * 8]);
851  }
852  AV_ZERO16(mvd_cache[2 + 8 * 0]);
853  AV_ZERO16(mvd_cache[2 + 8 * 2]);
854  if (sl->slice_type_nos == AV_PICTURE_TYPE_B) {
855  uint8_t *direct_cache = &sl->direct_cache[scan8[0]];
856  uint8_t *direct_table = h->direct_table;
857  fill_rectangle(direct_cache, 4, 4, 8, MB_TYPE_16x16 >> 1, 1);
858 
859  if (IS_DIRECT(top_type)) {
860  AV_WN32A(&direct_cache[-1 * 8],
861  0x01010101u * (MB_TYPE_DIRECT2 >> 1));
862  } else if (IS_8X8(top_type)) {
863  int b8_xy = 4 * top_xy;
864  direct_cache[0 - 1 * 8] = direct_table[b8_xy + 2];
865  direct_cache[2 - 1 * 8] = direct_table[b8_xy + 3];
866  } else {
867  AV_WN32A(&direct_cache[-1 * 8],
868  0x01010101 * (MB_TYPE_16x16 >> 1));
869  }
870 
871  if (IS_DIRECT(left_type[LTOP]))
872  direct_cache[-1 + 0 * 8] = MB_TYPE_DIRECT2 >> 1;
873  else if (IS_8X8(left_type[LTOP]))
874  direct_cache[-1 + 0 * 8] = direct_table[4 * left_xy[LTOP] + 1 + (left_block[0] & ~1)];
875  else
876  direct_cache[-1 + 0 * 8] = MB_TYPE_16x16 >> 1;
877 
878  if (IS_DIRECT(left_type[LBOT]))
879  direct_cache[-1 + 2 * 8] = MB_TYPE_DIRECT2 >> 1;
880  else if (IS_8X8(left_type[LBOT]))
881  direct_cache[-1 + 2 * 8] = direct_table[4 * left_xy[LBOT] + 1 + (left_block[2] & ~1)];
882  else
883  direct_cache[-1 + 2 * 8] = MB_TYPE_16x16 >> 1;
884  }
885  }
886  }
887 
888 #define MAP_MVS \
889  MAP_F2F(scan8[0] - 1 - 1 * 8, topleft_type) \
890  MAP_F2F(scan8[0] + 0 - 1 * 8, top_type) \
891  MAP_F2F(scan8[0] + 1 - 1 * 8, top_type) \
892  MAP_F2F(scan8[0] + 2 - 1 * 8, top_type) \
893  MAP_F2F(scan8[0] + 3 - 1 * 8, top_type) \
894  MAP_F2F(scan8[0] + 4 - 1 * 8, topright_type) \
895  MAP_F2F(scan8[0] - 1 + 0 * 8, left_type[LTOP]) \
896  MAP_F2F(scan8[0] - 1 + 1 * 8, left_type[LTOP]) \
897  MAP_F2F(scan8[0] - 1 + 2 * 8, left_type[LBOT]) \
898  MAP_F2F(scan8[0] - 1 + 3 * 8, left_type[LBOT])
899 
900  if (FRAME_MBAFF(h)) {
901  if (MB_FIELD(sl)) {
902 
903 #define MAP_F2F(idx, mb_type) \
904  if (!IS_INTERLACED(mb_type) && sl->ref_cache[list][idx] >= 0) { \
905  sl->ref_cache[list][idx] *= 2; \
906  sl->mv_cache[list][idx][1] /= 2; \
907  sl->mvd_cache[list][idx][1] >>= 1; \
908  }
909 
910  MAP_MVS
911  } else {
912 
913 #undef MAP_F2F
914 #define MAP_F2F(idx, mb_type) \
915  if (IS_INTERLACED(mb_type) && sl->ref_cache[list][idx] >= 0) { \
916  sl->ref_cache[list][idx] >>= 1; \
917  sl->mv_cache[list][idx][1] *= 2; \
918  sl->mvd_cache[list][idx][1] <<= 1; \
919  }
920 
921  MAP_MVS
922 #undef MAP_F2F
923  }
924  }
925  }
926  }
927 
928  sl->neighbor_transform_size = !!IS_8x8DCT(top_type) + !!IS_8x8DCT(left_type[LTOP]);
929 }
930 
931 /**
932  * decodes a P_SKIP or B_SKIP macroblock
933  */
935 {
936  const int mb_xy = sl->mb_xy;
937  int mb_type = 0;
938 
939  memset(h->non_zero_count[mb_xy], 0, 48);
940 
941  if (MB_FIELD(sl))
942  mb_type |= MB_TYPE_INTERLACED;
943 
944  if (sl->slice_type_nos == AV_PICTURE_TYPE_B) {
945  // just for fill_caches. pred_direct_motion will set the real mb_type
947  if (sl->direct_spatial_mv_pred) {
948  fill_decode_neighbors(h, sl, mb_type);
949  fill_decode_caches(h, sl, mb_type); //FIXME check what is needed and what not ...
950  }
951  ff_h264_pred_direct_motion(h, sl, &mb_type);
952  mb_type |= MB_TYPE_SKIP;
953  } else {
955 
956  fill_decode_neighbors(h, sl, mb_type);
957  pred_pskip_motion(h, sl);
958  }
959 
960  write_back_motion(h, sl, mb_type);
961  h->cur_pic.mb_type[mb_xy] = mb_type;
962  h->cur_pic.qscale_table[mb_xy] = sl->qscale;
963  h->slice_table[mb_xy] = sl->slice_num;
964  sl->prev_mb_skipped = 1;
965 }
966 
967 #endif /* AVCODEC_H264_MVPRED_H */
IS_INTRA4x4
#define IS_INTRA4x4(a)
Definition: mpegutils.h:68
A
#define A(x)
Definition: vpx_arith.h:28
IS_8X8
#define IS_8X8(a)
Definition: mpegutils.h:82
H264SliceContext::mb_xy
int mb_xy
Definition: h264dec.h:226
H264SliceContext::ref_cache
int8_t ref_cache[2][5 *8]
Definition: h264dec.h:294
H264SliceContext::topleft_samples_available
unsigned int topleft_samples_available
Definition: h264dec.h:216
CHROMA422
#define CHROMA422(h)
Definition: h264dec.h:91
mem_internal.h
pred_16x8_motion
static av_always_inline void pred_16x8_motion(const H264Context *const h, H264SliceContext *sl, int n, int list, int ref, int *const mx, int *const my)
Get the directionally predicted 16x8 MV.
Definition: h264_mvpred.h:291
H264SliceContext::topright_mb_xy
int topright_mb_xy
Definition: h264dec.h:205
MB_TYPE_16x8
#define MB_TYPE_16x8
Definition: mpegutils.h:48
mv
static const int8_t mv[256][2]
Definition: 4xm.c:80
H264SliceContext::mvd_table
uint8_t(*[2] mvd_table)[2]
Definition: h264dec.h:307
av_unused
#define av_unused
Definition: attributes.h:131
H264SliceContext::topleft_partition
int topleft_partition
Definition: h264dec.h:214
DC_PRED
@ DC_PRED
Definition: vp9.h:48
MB_TYPE_16x16
#define MB_TYPE_16x16
Definition: mpegutils.h:47
H264SliceContext::left_block
const uint8_t * left_block
Definition: h264dec.h:213
H264SliceContext::left_mb_xy
int left_mb_xy[LEFT_MBS]
Definition: h264dec.h:206
AV_WN32A
#define AV_WN32A(p, v)
Definition: intreadwrite.h:536
H264SliceContext::sub_mb_type
uint16_t sub_mb_type[4]
as a DCT coefficient is int32_t in high depth, we need to reserve twice the space.
Definition: h264dec.h:298
write_back_intra_pred_mode
static av_always_inline void write_back_intra_pred_mode(const H264Context *h, H264SliceContext *sl)
Definition: h264_mvpred.h:58
mpegutils.h
H264SliceContext::mb_x
int mb_x
Definition: h264dec.h:225
H264SliceContext
Definition: h264dec.h:172
MB_FIELD
#define MB_FIELD(sl)
Definition: h264dec.h:65
H264SliceContext::mv_cache
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
Definition: h264dec.h:293
H264SliceContext::mvd_cache
uint8_t mvd_cache[2][5 *8][2]
Definition: h264dec.h:295
USES_LIST
#define USES_LIST(a, list)
Definition: mpegutils.h:92
scan8
static const uint8_t scan8[16 *3+3]
Definition: h264_parse.h:40
H264SliceContext::direct_spatial_mv_pred
int direct_spatial_mv_pred
Definition: h264dec.h:246
H264SliceContext::slice_num
int slice_num
Definition: h264dec.h:177
pack16to32
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
Definition: h264_parse.h:127
fill_decode_neighbors
static void fill_decode_neighbors(const H264Context *h, H264SliceContext *sl, int mb_type)
Definition: h264_mvpred.h:487
C
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
Definition: writing_filters.txt:58
avassert.h
MB_TYPE_P1L0
#define MB_TYPE_P1L0
Definition: mpegutils.h:57
fetch_diagonal_mv
static av_always_inline int fetch_diagonal_mv(const H264Context *h, H264SliceContext *sl, const int16_t **C, int i, int list, int part_width)
Definition: h264_mvpred.h:169
H264SliceContext::topleft_mb_xy
int topleft_mb_xy
Definition: h264dec.h:203
AV_ZERO64
#define AV_ZERO64(d)
Definition: intreadwrite.h:629
AV_ZERO32
#define AV_ZERO32(d)
Definition: intreadwrite.h:625
get_dct8x8_allowed
static av_always_inline int get_dct8x8_allowed(const H264Context *h, H264SliceContext *sl)
Definition: h264_mvpred.h:157
B
#define B
Definition: huffyuv.h:42
IS_SKIP
#define IS_SKIP(a)
Definition: mpegutils.h:74
IS_INTRA
#define IS_INTRA(x, y)
ff_h264_pred_direct_motion
void ff_h264_pred_direct_motion(const H264Context *const h, H264SliceContext *sl, int *mb_type)
Definition: h264_direct.c:720
MB_TYPE_8x16
#define MB_TYPE_8x16
Definition: mpegutils.h:49
H264SliceContext::topright_samples_available
unsigned int topright_samples_available
Definition: h264dec.h:218
AV_COPY128
#define AV_COPY128(d, s)
Definition: intreadwrite.h:605
decode_mb_skip
static void av_unused decode_mb_skip(const H264Context *h, H264SliceContext *sl)
decodes a P_SKIP or B_SKIP macroblock
Definition: h264_mvpred.h:934
AV_COPY64
#define AV_COPY64(d, s)
Definition: intreadwrite.h:601
fill_decode_caches
static void fill_decode_caches(const H264Context *h, H264SliceContext *sl, int mb_type)
Definition: h264_mvpred.h:576
MB_TYPE_8x8
#define MB_TYPE_8x8
Definition: mpegutils.h:50
write_back_non_zero_count
static av_always_inline void write_back_non_zero_count(const H264Context *h, H264SliceContext *sl)
Definition: h264_mvpred.h:70
MB_TYPE_P0L0
#define MB_TYPE_P0L0
Definition: mpegutils.h:56
list
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
Definition: filter_design.txt:25
IS_INTERLACED
#define IS_INTERLACED(a)
Definition: mpegutils.h:76
H264SliceContext::top_samples_available
unsigned int top_samples_available
Definition: h264dec.h:217
H264SliceContext::qscale
int qscale
Definition: h264dec.h:182
AV_ZERO128
#define AV_ZERO128(d)
Definition: intreadwrite.h:633
H264SliceContext::top_type
int top_type
Definition: h264dec.h:209
H264SliceContext::intra4x4_pred_mode_cache
int8_t intra4x4_pred_mode_cache[5 *8]
Definition: h264dec.h:200
CABAC
#define CABAC(h)
Definition: h264_cabac.c:28
LEFT_MBS
#define LEFT_MBS
Definition: h264dec.h:68
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:109
rectangle.h
H264SliceContext::top_mb_xy
int top_mb_xy
Definition: h264dec.h:204
SET_DIAG_MV
#define SET_DIAG_MV(MV_OP, REF_OP, XY, Y4)
MB_TYPE_SKIP
#define MB_TYPE_SKIP
Definition: mpegutils.h:55
AV_RN64A
#define AV_RN64A(p)
Definition: intreadwrite.h:528
LEFT
#define LEFT
Definition: cdgraphics.c:171
H264SliceContext::left_type
int left_type[LEFT_MBS]
Definition: h264dec.h:211
MB_TYPE_INTERLACED
#define MB_TYPE_INTERLACED
Definition: mpegutils.h:51
H264SliceContext::mb_y
int mb_y
Definition: h264dec.h:225
pred_motion
static av_always_inline void pred_motion(const H264Context *const h, H264SliceContext *sl, int n, int part_width, int list, int ref, int *const mx, int *const my)
Get the predicted MV.
Definition: h264_mvpred.h:226
H264SliceContext::slice_type_nos
int slice_type_nos
S free slice type (SI/SP are remapped to I/P)
Definition: h264dec.h:179
FRAME_MBAFF
#define FRAME_MBAFF(h)
Definition: h264dec.h:66
IS_DIRECT
#define IS_DIRECT(a)
Definition: mpegutils.h:77
MB_TYPE_L0L1
#define MB_TYPE_L0L1
Definition: mpegutils.h:62
AV_COPY16
#define AV_COPY16(d, s)
Definition: intreadwrite.h:593
LIST_NOT_USED
#define LIST_NOT_USED
Definition: h264dec.h:391
h264dec.h
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
H264SliceContext::top_cbp
int top_cbp
Definition: h264dec.h:251
H264SliceContext::topleft_type
int topleft_type
Definition: h264dec.h:208
H264Context
H264Context.
Definition: h264dec.h:332
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_COPY32
#define AV_COPY32(d, s)
Definition: intreadwrite.h:597
pred_pskip_motion
static av_always_inline void pred_pskip_motion(const H264Context *const h, H264SliceContext *sl)
Definition: h264_mvpred.h:388
H264SliceContext::list_count
unsigned int list_count
Definition: h264dec.h:263
AV_RN32A
#define AV_RN32A(p)
Definition: intreadwrite.h:524
mid_pred
#define mid_pred
Definition: mathops.h:98
FMO
#define FMO
Definition: h264dec.h:55
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
pred_8x16_motion
static av_always_inline void pred_8x16_motion(const H264Context *const h, H264SliceContext *sl, int n, int list, int ref, int *const mx, int *const my)
Get the directionally predicted 8x16 MV.
Definition: h264_mvpred.h:332
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:281
mode
mode
Definition: ebur128.h:83
pred_intra_mode
static av_always_inline int pred_intra_mode(const H264Context *h, H264SliceContext *sl, int n)
Get the predicted intra4x4 prediction mode.
Definition: h264_mvpred.h:42
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:829
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
H264SliceContext::left_cbp
int left_cbp
Definition: h264dec.h:252
PART_NOT_AVAILABLE
#define PART_NOT_AVAILABLE
Definition: h264pred.h:89
LBOT
#define LBOT
Definition: h264dec.h:70
H264SliceContext::non_zero_count_cache
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
Definition: h264dec.h:288
H264SliceContext::direct_cache
uint8_t direct_cache[5 *8]
Definition: h264dec.h:296
IS_INTER
#define IS_INTER(a)
Definition: mpegutils.h:72
H264SliceContext::topright_type
int topright_type
Definition: h264dec.h:210
H264SliceContext::left_samples_available
unsigned int left_samples_available
Definition: h264dec.h:219
AV_ZERO16
#define AV_ZERO16(d)
Definition: intreadwrite.h:621
H264SliceContext::neighbor_transform_size
int neighbor_transform_size
number of neighbors (top and/or left) that used 8x8 dct
Definition: h264dec.h:244
ff_tlog
#define ff_tlog(ctx,...)
Definition: internal.h:153
MAP_MVS
#define MAP_MVS
H264SliceContext::intra4x4_pred_mode
int8_t * intra4x4_pred_mode
Definition: h264dec.h:201
LTOP
#define LTOP
Definition: h264dec.h:69
FIX_MV_MBAFF
#define FIX_MV_MBAFF(type, refn, mvn, idx)
Definition: h264_mvpred.h:369
MB_TYPE_DIRECT2
#define MB_TYPE_DIRECT2
Definition: mpegutils.h:52
CHROMA444
#define CHROMA444(h)
Definition: h264dec.h:92
write_back_motion
static av_always_inline void write_back_motion(const H264Context *h, H264SliceContext *sl, int mb_type)
Definition: h264_mvpred.h:130
h
h
Definition: vp9dsp_template.c:2038
write_back_motion_list
static av_always_inline void write_back_motion_list(const H264Context *h, H264SliceContext *sl, int b_stride, int b_xy, int b8_xy, int mb_type, int list)
Definition: h264_mvpred.h:94
IS_8x8DCT
#define IS_8x8DCT(a)
Definition: h264dec.h:95
H264SliceContext::prev_mb_skipped
int prev_mb_skipped
Definition: h264dec.h:194
min
float min
Definition: vorbis_enc_data.h:429