FFmpeg
h264_direct.c
Go to the documentation of this file.
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... direct mb/block decoding
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * H.264 / AVC / MPEG-4 part10 direct mb/block decoding.
25  * @author Michael Niedermayer <michaelni@gmx.at>
26  */
27 
28 #include "internal.h"
29 #include "avcodec.h"
30 #include "h264dec.h"
31 #include "h264_ps.h"
32 #include "mpegutils.h"
33 #include "rectangle.h"
34 #include "thread.h"
35 
36 #include <assert.h>
37 
39  int poc, int poc1, int i)
40 {
41  int poc0 = sl->ref_list[0][i].poc;
42  int64_t pocdiff = poc1 - (int64_t)poc0;
43  int td = av_clip_int8(pocdiff);
44 
45  if (pocdiff != (int)pocdiff)
46  avpriv_request_sample(sl->h264->avctx, "pocdiff overflow\n");
47 
48  if (td == 0 || sl->ref_list[0][i].parent->long_ref) {
49  return 256;
50  } else {
51  int64_t pocdiff0 = poc - (int64_t)poc0;
52  int tb = av_clip_int8(pocdiff0);
53  int tx = (16384 + (FFABS(td) >> 1)) / td;
54 
55  if (pocdiff0 != (int)pocdiff0)
56  av_log(sl->h264->avctx, AV_LOG_DEBUG, "pocdiff0 overflow\n");
57 
58  return av_clip_intp2((tb * tx + 32) >> 6, 10);
59  }
60 }
61 
63  H264SliceContext *sl)
64 {
65  const int poc = FIELD_PICTURE(h) ? h->cur_pic_ptr->field_poc[h->picture_structure == PICT_BOTTOM_FIELD]
66  : h->cur_pic_ptr->poc;
67  const int poc1 = sl->ref_list[1][0].poc;
68  int i, field;
69 
70  if (FRAME_MBAFF(h))
71  for (field = 0; field < 2; field++) {
72  const int poc = h->cur_pic_ptr->field_poc[field];
73  const int poc1 = sl->ref_list[1][0].parent->field_poc[field];
74  for (i = 0; i < 2 * sl->ref_count[0]; i++)
76  get_scale_factor(sl, poc, poc1, i + 16);
77  }
78 
79  for (i = 0; i < sl->ref_count[0]; i++)
80  sl->dist_scale_factor[i] = get_scale_factor(sl, poc, poc1, i);
81 }
82 
83 static void fill_colmap(const H264Context *h, H264SliceContext *sl,
84  int map[2][16 + 32], int list,
85  int field, int colfield, int mbafi)
86 {
87  H264Picture *const ref1 = sl->ref_list[1][0].parent;
88  int j, old_ref, rfield;
89  int start = mbafi ? 16 : 0;
90  int end = mbafi ? 16 + 2 * sl->ref_count[0] : sl->ref_count[0];
91  int interl = mbafi || h->picture_structure != PICT_FRAME;
92 
93  /* bogus; fills in for missing frames */
94  memset(map[list], 0, sizeof(map[list]));
95 
96  for (rfield = 0; rfield < 2; rfield++) {
97  for (old_ref = 0; old_ref < ref1->ref_count[colfield][list]; old_ref++) {
98  int poc = ref1->ref_poc[colfield][list][old_ref];
99 
100  if (!interl)
101  poc |= 3;
102  // FIXME: store all MBAFF references so this is not needed
103  else if (interl && (poc & 3) == 3)
104  poc = (poc & ~3) + rfield + 1;
105 
106  for (j = start; j < end; j++) {
107  if (4 * sl->ref_list[0][j].parent->frame_num +
108  (sl->ref_list[0][j].reference & 3) == poc) {
109  int cur_ref = mbafi ? (j - 16) ^ field : j;
110  if (ref1->mbaff)
111  map[list][2 * old_ref + (rfield ^ field) + 16] = cur_ref;
112  if (rfield == field || !interl)
113  map[list][old_ref] = cur_ref;
114  break;
115  }
116  }
117  }
118  }
119 }
120 
122 {
123  H264Ref *const ref1 = &sl->ref_list[1][0];
124  H264Picture *const cur = h->cur_pic_ptr;
125  int list, j, field;
126  int sidx = (h->picture_structure & 1) ^ 1;
127  int ref1sidx = (ref1->reference & 1) ^ 1;
128 
129  for (list = 0; list < sl->list_count; list++) {
130  cur->ref_count[sidx][list] = sl->ref_count[list];
131  for (j = 0; j < sl->ref_count[list]; j++)
132  cur->ref_poc[sidx][list][j] = 4 * sl->ref_list[list][j].parent->frame_num +
133  (sl->ref_list[list][j].reference & 3);
134  }
135 
136  if (h->picture_structure == PICT_FRAME) {
137  memcpy(cur->ref_count[1], cur->ref_count[0], sizeof(cur->ref_count[0]));
138  memcpy(cur->ref_poc[1], cur->ref_poc[0], sizeof(cur->ref_poc[0]));
139  }
140 
141  if (h->current_slice == 0) {
142  cur->mbaff = FRAME_MBAFF(h);
143  } else {
144  av_assert0(cur->mbaff == FRAME_MBAFF(h));
145  }
146 
147  sl->col_fieldoff = 0;
148 
149  if (sl->list_count != 2 || !sl->ref_count[1])
150  return;
151 
152  if (h->picture_structure == PICT_FRAME) {
153  int cur_poc = h->cur_pic_ptr->poc;
154  int *col_poc = sl->ref_list[1][0].parent->field_poc;
155  if (col_poc[0] == INT_MAX && col_poc[1] == INT_MAX) {
156  av_log(h->avctx, AV_LOG_ERROR, "co located POCs unavailable\n");
157  sl->col_parity = 1;
158  } else
159  sl->col_parity = (FFABS(col_poc[0] - (int64_t)cur_poc) >=
160  FFABS(col_poc[1] - (int64_t)cur_poc));
161  ref1sidx =
162  sidx = sl->col_parity;
163  // FL -> FL & differ parity
164  } else if (!(h->picture_structure & sl->ref_list[1][0].reference) &&
165  !sl->ref_list[1][0].parent->mbaff) {
166  sl->col_fieldoff = 2 * sl->ref_list[1][0].reference - 3;
167  }
168 
170  return;
171 
172  for (list = 0; list < 2; list++) {
173  fill_colmap(h, sl, sl->map_col_to_list0, list, sidx, ref1sidx, 0);
174  if (FRAME_MBAFF(h))
175  for (field = 0; field < 2; field++)
177  field, 1);
178  }
179 }
180 
181 static void await_reference_mb_row(const H264Context *const h, H264Ref *ref,
182  int mb_y)
183 {
184  int ref_field = ref->reference - 1;
185  int ref_field_picture = ref->parent->field_picture;
186  int ref_height = 16 * h->mb_height >> ref_field_picture;
187 
188  if (!HAVE_THREADS || !(h->avctx->active_thread_type & FF_THREAD_FRAME))
189  return;
190 
191  /* FIXME: It can be safe to access mb stuff
192  * even if pixels aren't deblocked yet. */
193 
194  ff_thread_await_progress(&ref->parent->tf,
195  FFMIN(16 * mb_y >> ref_field_picture,
196  ref_height - 1),
197  ref_field_picture && ref_field);
198 }
199 
201  int *mb_type)
202 {
203  int b8_stride = 2;
204  int b4_stride = h->b_stride;
205  int mb_xy = sl->mb_xy, mb_y = sl->mb_y;
206  int mb_type_col[2];
207  const int16_t (*l1mv0)[2], (*l1mv1)[2];
208  const int8_t *l1ref0, *l1ref1;
209  const int is_b8x8 = IS_8X8(*mb_type);
210  unsigned int sub_mb_type = MB_TYPE_L0L1;
211  int i8, i4;
212  int ref[2];
213  int mv[2];
214  int list;
215 
216  assert(sl->ref_list[1][0].reference & 3);
217 
218  await_reference_mb_row(h, &sl->ref_list[1][0],
219  sl->mb_y + !!IS_INTERLACED(*mb_type));
220 
221 #define MB_TYPE_16x16_OR_INTRA (MB_TYPE_16x16 | MB_TYPE_INTRA4x4 | \
222  MB_TYPE_INTRA16x16 | MB_TYPE_INTRA_PCM)
223 
224  /* ref = min(neighbors) */
225  for (list = 0; list < 2; list++) {
226  int left_ref = sl->ref_cache[list][scan8[0] - 1];
227  int top_ref = sl->ref_cache[list][scan8[0] - 8];
228  int refc = sl->ref_cache[list][scan8[0] - 8 + 4];
229  const int16_t *C = sl->mv_cache[list][scan8[0] - 8 + 4];
230  if (refc == PART_NOT_AVAILABLE) {
231  refc = sl->ref_cache[list][scan8[0] - 8 - 1];
232  C = sl->mv_cache[list][scan8[0] - 8 - 1];
233  }
234  ref[list] = FFMIN3((unsigned)left_ref,
235  (unsigned)top_ref,
236  (unsigned)refc);
237  if (ref[list] >= 0) {
238  /* This is just pred_motion() but with the cases removed that
239  * cannot happen for direct blocks. */
240  const int16_t *const A = sl->mv_cache[list][scan8[0] - 1];
241  const int16_t *const B = sl->mv_cache[list][scan8[0] - 8];
242 
243  int match_count = (left_ref == ref[list]) +
244  (top_ref == ref[list]) +
245  (refc == ref[list]);
246 
247  if (match_count > 1) { // most common
248  mv[list] = pack16to32(mid_pred(A[0], B[0], C[0]),
249  mid_pred(A[1], B[1], C[1]));
250  } else {
251  assert(match_count == 1);
252  if (left_ref == ref[list])
253  mv[list] = AV_RN32A(A);
254  else if (top_ref == ref[list])
255  mv[list] = AV_RN32A(B);
256  else
257  mv[list] = AV_RN32A(C);
258  }
259  av_assert2(ref[list] < (sl->ref_count[list] << !!FRAME_MBAFF(h)));
260  } else {
261  int mask = ~(MB_TYPE_L0 << (2 * list));
262  mv[list] = 0;
263  ref[list] = -1;
264  if (!is_b8x8)
265  *mb_type &= mask;
266  sub_mb_type &= mask;
267  }
268  }
269  if (ref[0] < 0 && ref[1] < 0) {
270  ref[0] = ref[1] = 0;
271  if (!is_b8x8)
272  *mb_type |= MB_TYPE_L0L1;
273  sub_mb_type |= MB_TYPE_L0L1;
274  }
275 
276  if (!(is_b8x8 | mv[0] | mv[1])) {
277  fill_rectangle(&sl->ref_cache[0][scan8[0]], 4, 4, 8, (uint8_t)ref[0], 1);
278  fill_rectangle(&sl->ref_cache[1][scan8[0]], 4, 4, 8, (uint8_t)ref[1], 1);
279  fill_rectangle(&sl->mv_cache[0][scan8[0]], 4, 4, 8, 0, 4);
280  fill_rectangle(&sl->mv_cache[1][scan8[0]], 4, 4, 8, 0, 4);
281  *mb_type = (*mb_type & ~(MB_TYPE_8x8 | MB_TYPE_16x8 | MB_TYPE_8x16 |
284  return;
285  }
286 
287  if (IS_INTERLACED(sl->ref_list[1][0].parent->mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL
288  if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL
289  mb_y = (sl->mb_y & ~1) + sl->col_parity;
290  mb_xy = sl->mb_x +
291  ((sl->mb_y & ~1) + sl->col_parity) * h->mb_stride;
292  b8_stride = 0;
293  } else {
294  mb_y += sl->col_fieldoff;
295  mb_xy += h->mb_stride * sl->col_fieldoff; // non-zero for FL -> FL & differ parity
296  }
297  goto single_col;
298  } else { // AFL/AFR/FR/FL -> AFR/FR
299  if (IS_INTERLACED(*mb_type)) { // AFL /FL -> AFR/FR
300  mb_y = sl->mb_y & ~1;
301  mb_xy = (sl->mb_y & ~1) * h->mb_stride + sl->mb_x;
302  mb_type_col[0] = sl->ref_list[1][0].parent->mb_type[mb_xy];
303  mb_type_col[1] = sl->ref_list[1][0].parent->mb_type[mb_xy + h->mb_stride];
304  b8_stride = 2 + 4 * h->mb_stride;
305  b4_stride *= 6;
306  if (IS_INTERLACED(mb_type_col[0]) !=
307  IS_INTERLACED(mb_type_col[1])) {
308  mb_type_col[0] &= ~MB_TYPE_INTERLACED;
309  mb_type_col[1] &= ~MB_TYPE_INTERLACED;
310  }
311 
312  sub_mb_type |= MB_TYPE_16x16 | MB_TYPE_DIRECT2; /* B_SUB_8x8 */
313  if ((mb_type_col[0] & MB_TYPE_16x16_OR_INTRA) &&
314  (mb_type_col[1] & MB_TYPE_16x16_OR_INTRA) &&
315  !is_b8x8) {
316  *mb_type |= MB_TYPE_16x8 | MB_TYPE_DIRECT2; /* B_16x8 */
317  } else {
318  *mb_type |= MB_TYPE_8x8;
319  }
320  } else { // AFR/FR -> AFR/FR
321 single_col:
322  mb_type_col[0] =
323  mb_type_col[1] = sl->ref_list[1][0].parent->mb_type[mb_xy];
324 
325  sub_mb_type |= MB_TYPE_16x16 | MB_TYPE_DIRECT2; /* B_SUB_8x8 */
326  if (!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)) {
327  *mb_type |= MB_TYPE_16x16 | MB_TYPE_DIRECT2; /* B_16x16 */
328  } else if (!is_b8x8 &&
329  (mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16))) {
330  *mb_type |= MB_TYPE_DIRECT2 |
331  (mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16));
332  } else {
333  if (!h->ps.sps->direct_8x8_inference_flag) {
334  /* FIXME: Save sub mb types from previous frames (or derive
335  * from MVs) so we know exactly what block size to use. */
336  sub_mb_type += (MB_TYPE_8x8 - MB_TYPE_16x16); /* B_SUB_4x4 */
337  }
338  *mb_type |= MB_TYPE_8x8;
339  }
340  }
341  }
342 
343  await_reference_mb_row(h, &sl->ref_list[1][0], mb_y);
344 
345  l1mv0 = (void*)&sl->ref_list[1][0].parent->motion_val[0][h->mb2b_xy[mb_xy]];
346  l1mv1 = (void*)&sl->ref_list[1][0].parent->motion_val[1][h->mb2b_xy[mb_xy]];
347  l1ref0 = &sl->ref_list[1][0].parent->ref_index[0][4 * mb_xy];
348  l1ref1 = &sl->ref_list[1][0].parent->ref_index[1][4 * mb_xy];
349  if (!b8_stride) {
350  if (sl->mb_y & 1) {
351  l1ref0 += 2;
352  l1ref1 += 2;
353  l1mv0 += 2 * b4_stride;
354  l1mv1 += 2 * b4_stride;
355  }
356  }
357 
358  if (IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col[0])) {
359  int n = 0;
360  for (i8 = 0; i8 < 4; i8++) {
361  int x8 = i8 & 1;
362  int y8 = i8 >> 1;
363  int xy8 = x8 + y8 * b8_stride;
364  int xy4 = x8 * 3 + y8 * b4_stride;
365  int a, b;
366 
367  if (is_b8x8 && !IS_DIRECT(sl->sub_mb_type[i8]))
368  continue;
369  sl->sub_mb_type[i8] = sub_mb_type;
370 
371  fill_rectangle(&sl->ref_cache[0][scan8[i8 * 4]], 2, 2, 8,
372  (uint8_t)ref[0], 1);
373  fill_rectangle(&sl->ref_cache[1][scan8[i8 * 4]], 2, 2, 8,
374  (uint8_t)ref[1], 1);
375  if (!IS_INTRA(mb_type_col[y8]) && !sl->ref_list[1][0].parent->long_ref &&
376  ((l1ref0[xy8] == 0 &&
377  FFABS(l1mv0[xy4][0]) <= 1 &&
378  FFABS(l1mv0[xy4][1]) <= 1) ||
379  (l1ref0[xy8] < 0 &&
380  l1ref1[xy8] == 0 &&
381  FFABS(l1mv1[xy4][0]) <= 1 &&
382  FFABS(l1mv1[xy4][1]) <= 1))) {
383  a =
384  b = 0;
385  if (ref[0] > 0)
386  a = mv[0];
387  if (ref[1] > 0)
388  b = mv[1];
389  n++;
390  } else {
391  a = mv[0];
392  b = mv[1];
393  }
394  fill_rectangle(&sl->mv_cache[0][scan8[i8 * 4]], 2, 2, 8, a, 4);
395  fill_rectangle(&sl->mv_cache[1][scan8[i8 * 4]], 2, 2, 8, b, 4);
396  }
397  if (!is_b8x8 && !(n & 3))
398  *mb_type = (*mb_type & ~(MB_TYPE_8x8 | MB_TYPE_16x8 | MB_TYPE_8x16 |
401  } else if (IS_16X16(*mb_type)) {
402  int a, b;
403 
404  fill_rectangle(&sl->ref_cache[0][scan8[0]], 4, 4, 8, (uint8_t)ref[0], 1);
405  fill_rectangle(&sl->ref_cache[1][scan8[0]], 4, 4, 8, (uint8_t)ref[1], 1);
406  if (!IS_INTRA(mb_type_col[0]) && !sl->ref_list[1][0].parent->long_ref &&
407  ((l1ref0[0] == 0 &&
408  FFABS(l1mv0[0][0]) <= 1 &&
409  FFABS(l1mv0[0][1]) <= 1) ||
410  (l1ref0[0] < 0 && !l1ref1[0] &&
411  FFABS(l1mv1[0][0]) <= 1 &&
412  FFABS(l1mv1[0][1]) <= 1 &&
413  h->x264_build > 33U))) {
414  a = b = 0;
415  if (ref[0] > 0)
416  a = mv[0];
417  if (ref[1] > 0)
418  b = mv[1];
419  } else {
420  a = mv[0];
421  b = mv[1];
422  }
423  fill_rectangle(&sl->mv_cache[0][scan8[0]], 4, 4, 8, a, 4);
424  fill_rectangle(&sl->mv_cache[1][scan8[0]], 4, 4, 8, b, 4);
425  } else {
426  int n = 0;
427  for (i8 = 0; i8 < 4; i8++) {
428  const int x8 = i8 & 1;
429  const int y8 = i8 >> 1;
430 
431  if (is_b8x8 && !IS_DIRECT(sl->sub_mb_type[i8]))
432  continue;
433  sl->sub_mb_type[i8] = sub_mb_type;
434 
435  fill_rectangle(&sl->mv_cache[0][scan8[i8 * 4]], 2, 2, 8, mv[0], 4);
436  fill_rectangle(&sl->mv_cache[1][scan8[i8 * 4]], 2, 2, 8, mv[1], 4);
437  fill_rectangle(&sl->ref_cache[0][scan8[i8 * 4]], 2, 2, 8,
438  (uint8_t)ref[0], 1);
439  fill_rectangle(&sl->ref_cache[1][scan8[i8 * 4]], 2, 2, 8,
440  (uint8_t)ref[1], 1);
441 
442  assert(b8_stride == 2);
443  /* col_zero_flag */
444  if (!IS_INTRA(mb_type_col[0]) && !sl->ref_list[1][0].parent->long_ref &&
445  (l1ref0[i8] == 0 ||
446  (l1ref0[i8] < 0 &&
447  l1ref1[i8] == 0 &&
448  h->x264_build > 33U))) {
449  const int16_t (*l1mv)[2] = l1ref0[i8] == 0 ? l1mv0 : l1mv1;
450  if (IS_SUB_8X8(sub_mb_type)) {
451  const int16_t *mv_col = l1mv[x8 * 3 + y8 * 3 * b4_stride];
452  if (FFABS(mv_col[0]) <= 1 && FFABS(mv_col[1]) <= 1) {
453  if (ref[0] == 0)
454  fill_rectangle(&sl->mv_cache[0][scan8[i8 * 4]], 2, 2,
455  8, 0, 4);
456  if (ref[1] == 0)
457  fill_rectangle(&sl->mv_cache[1][scan8[i8 * 4]], 2, 2,
458  8, 0, 4);
459  n += 4;
460  }
461  } else {
462  int m = 0;
463  for (i4 = 0; i4 < 4; i4++) {
464  const int16_t *mv_col = l1mv[x8 * 2 + (i4 & 1) +
465  (y8 * 2 + (i4 >> 1)) * b4_stride];
466  if (FFABS(mv_col[0]) <= 1 && FFABS(mv_col[1]) <= 1) {
467  if (ref[0] == 0)
468  AV_ZERO32(sl->mv_cache[0][scan8[i8 * 4 + i4]]);
469  if (ref[1] == 0)
470  AV_ZERO32(sl->mv_cache[1][scan8[i8 * 4 + i4]]);
471  m++;
472  }
473  }
474  if (!(m & 3))
476  n += m;
477  }
478  }
479  }
480  if (!is_b8x8 && !(n & 15))
481  *mb_type = (*mb_type & ~(MB_TYPE_8x8 | MB_TYPE_16x8 | MB_TYPE_8x16 |
484  }
485 }
486 
488  int *mb_type)
489 {
490  int b8_stride = 2;
491  int b4_stride = h->b_stride;
492  int mb_xy = sl->mb_xy, mb_y = sl->mb_y;
493  int mb_type_col[2];
494  const int16_t (*l1mv0)[2], (*l1mv1)[2];
495  const int8_t *l1ref0, *l1ref1;
496  const int is_b8x8 = IS_8X8(*mb_type);
497  unsigned int sub_mb_type;
498  int i8, i4;
499 
500  assert(sl->ref_list[1][0].reference & 3);
501 
502  await_reference_mb_row(h, &sl->ref_list[1][0],
503  sl->mb_y + !!IS_INTERLACED(*mb_type));
504 
505  if (IS_INTERLACED(sl->ref_list[1][0].parent->mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL
506  if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL
507  mb_y = (sl->mb_y & ~1) + sl->col_parity;
508  mb_xy = sl->mb_x +
509  ((sl->mb_y & ~1) + sl->col_parity) * h->mb_stride;
510  b8_stride = 0;
511  } else {
512  mb_y += sl->col_fieldoff;
513  mb_xy += h->mb_stride * sl->col_fieldoff; // non-zero for FL -> FL & differ parity
514  }
515  goto single_col;
516  } else { // AFL/AFR/FR/FL -> AFR/FR
517  if (IS_INTERLACED(*mb_type)) { // AFL /FL -> AFR/FR
518  mb_y = sl->mb_y & ~1;
519  mb_xy = sl->mb_x + (sl->mb_y & ~1) * h->mb_stride;
520  mb_type_col[0] = sl->ref_list[1][0].parent->mb_type[mb_xy];
521  mb_type_col[1] = sl->ref_list[1][0].parent->mb_type[mb_xy + h->mb_stride];
522  b8_stride = 2 + 4 * h->mb_stride;
523  b4_stride *= 6;
524  if (IS_INTERLACED(mb_type_col[0]) !=
525  IS_INTERLACED(mb_type_col[1])) {
526  mb_type_col[0] &= ~MB_TYPE_INTERLACED;
527  mb_type_col[1] &= ~MB_TYPE_INTERLACED;
528  }
529 
530  sub_mb_type = MB_TYPE_16x16 | MB_TYPE_P0L0 | MB_TYPE_P0L1 |
531  MB_TYPE_DIRECT2; /* B_SUB_8x8 */
532 
533  if ((mb_type_col[0] & MB_TYPE_16x16_OR_INTRA) &&
534  (mb_type_col[1] & MB_TYPE_16x16_OR_INTRA) &&
535  !is_b8x8) {
536  *mb_type |= MB_TYPE_16x8 | MB_TYPE_L0L1 |
537  MB_TYPE_DIRECT2; /* B_16x8 */
538  } else {
539  *mb_type |= MB_TYPE_8x8 | MB_TYPE_L0L1;
540  }
541  } else { // AFR/FR -> AFR/FR
542 single_col:
543  mb_type_col[0] =
544  mb_type_col[1] = sl->ref_list[1][0].parent->mb_type[mb_xy];
545 
546  sub_mb_type = MB_TYPE_16x16 | MB_TYPE_P0L0 | MB_TYPE_P0L1 |
547  MB_TYPE_DIRECT2; /* B_SUB_8x8 */
548  if (!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)) {
549  *mb_type |= MB_TYPE_16x16 | MB_TYPE_P0L0 | MB_TYPE_P0L1 |
550  MB_TYPE_DIRECT2; /* B_16x16 */
551  } else if (!is_b8x8 &&
552  (mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16))) {
553  *mb_type |= MB_TYPE_L0L1 | MB_TYPE_DIRECT2 |
554  (mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16));
555  } else {
556  if (!h->ps.sps->direct_8x8_inference_flag) {
557  /* FIXME: save sub mb types from previous frames (or derive
558  * from MVs) so we know exactly what block size to use */
559  sub_mb_type = MB_TYPE_8x8 | MB_TYPE_P0L0 | MB_TYPE_P0L1 |
560  MB_TYPE_DIRECT2; /* B_SUB_4x4 */
561  }
562  *mb_type |= MB_TYPE_8x8 | MB_TYPE_L0L1;
563  }
564  }
565  }
566 
567  await_reference_mb_row(h, &sl->ref_list[1][0], mb_y);
568 
569  l1mv0 = (void*)&sl->ref_list[1][0].parent->motion_val[0][h->mb2b_xy[mb_xy]];
570  l1mv1 = (void*)&sl->ref_list[1][0].parent->motion_val[1][h->mb2b_xy[mb_xy]];
571  l1ref0 = &sl->ref_list[1][0].parent->ref_index[0][4 * mb_xy];
572  l1ref1 = &sl->ref_list[1][0].parent->ref_index[1][4 * mb_xy];
573  if (!b8_stride) {
574  if (sl->mb_y & 1) {
575  l1ref0 += 2;
576  l1ref1 += 2;
577  l1mv0 += 2 * b4_stride;
578  l1mv1 += 2 * b4_stride;
579  }
580  }
581 
582  {
583  const int *map_col_to_list0[2] = { sl->map_col_to_list0[0],
584  sl->map_col_to_list0[1] };
585  const int *dist_scale_factor = sl->dist_scale_factor;
586  int ref_offset;
587 
588  if (FRAME_MBAFF(h) && IS_INTERLACED(*mb_type)) {
589  map_col_to_list0[0] = sl->map_col_to_list0_field[sl->mb_y & 1][0];
590  map_col_to_list0[1] = sl->map_col_to_list0_field[sl->mb_y & 1][1];
591  dist_scale_factor = sl->dist_scale_factor_field[sl->mb_y & 1];
592  }
593  ref_offset = (sl->ref_list[1][0].parent->mbaff << 4) & (mb_type_col[0] >> 3);
594 
595  if (IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col[0])) {
596  int y_shift = 2 * !IS_INTERLACED(*mb_type);
597  assert(h->ps.sps->direct_8x8_inference_flag);
598 
599  for (i8 = 0; i8 < 4; i8++) {
600  const int x8 = i8 & 1;
601  const int y8 = i8 >> 1;
602  int ref0, scale;
603  const int16_t (*l1mv)[2] = l1mv0;
604 
605  if (is_b8x8 && !IS_DIRECT(sl->sub_mb_type[i8]))
606  continue;
607  sl->sub_mb_type[i8] = sub_mb_type;
608 
609  fill_rectangle(&sl->ref_cache[1][scan8[i8 * 4]], 2, 2, 8, 0, 1);
610  if (IS_INTRA(mb_type_col[y8])) {
611  fill_rectangle(&sl->ref_cache[0][scan8[i8 * 4]], 2, 2, 8, 0, 1);
612  fill_rectangle(&sl->mv_cache[0][scan8[i8 * 4]], 2, 2, 8, 0, 4);
613  fill_rectangle(&sl->mv_cache[1][scan8[i8 * 4]], 2, 2, 8, 0, 4);
614  continue;
615  }
616 
617  ref0 = l1ref0[x8 + y8 * b8_stride];
618  if (ref0 >= 0)
619  ref0 = map_col_to_list0[0][ref0 + ref_offset];
620  else {
621  ref0 = map_col_to_list0[1][l1ref1[x8 + y8 * b8_stride] +
622  ref_offset];
623  l1mv = l1mv1;
624  }
625  scale = dist_scale_factor[ref0];
626  fill_rectangle(&sl->ref_cache[0][scan8[i8 * 4]], 2, 2, 8,
627  ref0, 1);
628 
629  {
630  const int16_t *mv_col = l1mv[x8 * 3 + y8 * b4_stride];
631  int my_col = (mv_col[1] * (1 << y_shift)) / 2;
632  int mx = (scale * mv_col[0] + 128) >> 8;
633  int my = (scale * my_col + 128) >> 8;
634  fill_rectangle(&sl->mv_cache[0][scan8[i8 * 4]], 2, 2, 8,
635  pack16to32(mx, my), 4);
636  fill_rectangle(&sl->mv_cache[1][scan8[i8 * 4]], 2, 2, 8,
637  pack16to32(mx - mv_col[0], my - my_col), 4);
638  }
639  }
640  return;
641  }
642 
643  /* one-to-one mv scaling */
644 
645  if (IS_16X16(*mb_type)) {
646  int ref, mv0, mv1;
647 
648  fill_rectangle(&sl->ref_cache[1][scan8[0]], 4, 4, 8, 0, 1);
649  if (IS_INTRA(mb_type_col[0])) {
650  ref = mv0 = mv1 = 0;
651  } else {
652  const int ref0 = l1ref0[0] >= 0 ? map_col_to_list0[0][l1ref0[0] + ref_offset]
653  : map_col_to_list0[1][l1ref1[0] + ref_offset];
654  const int scale = dist_scale_factor[ref0];
655  const int16_t *mv_col = l1ref0[0] >= 0 ? l1mv0[0] : l1mv1[0];
656  int mv_l0[2];
657  mv_l0[0] = (scale * mv_col[0] + 128) >> 8;
658  mv_l0[1] = (scale * mv_col[1] + 128) >> 8;
659  ref = ref0;
660  mv0 = pack16to32(mv_l0[0], mv_l0[1]);
661  mv1 = pack16to32(mv_l0[0] - mv_col[0], mv_l0[1] - mv_col[1]);
662  }
663  fill_rectangle(&sl->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
664  fill_rectangle(&sl->mv_cache[0][scan8[0]], 4, 4, 8, mv0, 4);
665  fill_rectangle(&sl->mv_cache[1][scan8[0]], 4, 4, 8, mv1, 4);
666  } else {
667  for (i8 = 0; i8 < 4; i8++) {
668  const int x8 = i8 & 1;
669  const int y8 = i8 >> 1;
670  int ref0, scale;
671  const int16_t (*l1mv)[2] = l1mv0;
672 
673  if (is_b8x8 && !IS_DIRECT(sl->sub_mb_type[i8]))
674  continue;
675  sl->sub_mb_type[i8] = sub_mb_type;
676  fill_rectangle(&sl->ref_cache[1][scan8[i8 * 4]], 2, 2, 8, 0, 1);
677  if (IS_INTRA(mb_type_col[0])) {
678  fill_rectangle(&sl->ref_cache[0][scan8[i8 * 4]], 2, 2, 8, 0, 1);
679  fill_rectangle(&sl->mv_cache[0][scan8[i8 * 4]], 2, 2, 8, 0, 4);
680  fill_rectangle(&sl->mv_cache[1][scan8[i8 * 4]], 2, 2, 8, 0, 4);
681  continue;
682  }
683 
684  assert(b8_stride == 2);
685  ref0 = l1ref0[i8];
686  if (ref0 >= 0)
687  ref0 = map_col_to_list0[0][ref0 + ref_offset];
688  else {
689  ref0 = map_col_to_list0[1][l1ref1[i8] + ref_offset];
690  l1mv = l1mv1;
691  }
692  scale = dist_scale_factor[ref0];
693 
694  fill_rectangle(&sl->ref_cache[0][scan8[i8 * 4]], 2, 2, 8,
695  ref0, 1);
696  if (IS_SUB_8X8(sub_mb_type)) {
697  const int16_t *mv_col = l1mv[x8 * 3 + y8 * 3 * b4_stride];
698  int mx = (scale * mv_col[0] + 128) >> 8;
699  int my = (scale * mv_col[1] + 128) >> 8;
700  fill_rectangle(&sl->mv_cache[0][scan8[i8 * 4]], 2, 2, 8,
701  pack16to32(mx, my), 4);
702  fill_rectangle(&sl->mv_cache[1][scan8[i8 * 4]], 2, 2, 8,
703  pack16to32(mx - mv_col[0], my - mv_col[1]), 4);
704  } else {
705  for (i4 = 0; i4 < 4; i4++) {
706  const int16_t *mv_col = l1mv[x8 * 2 + (i4 & 1) +
707  (y8 * 2 + (i4 >> 1)) * b4_stride];
708  int16_t *mv_l0 = sl->mv_cache[0][scan8[i8 * 4 + i4]];
709  mv_l0[0] = (scale * mv_col[0] + 128) >> 8;
710  mv_l0[1] = (scale * mv_col[1] + 128) >> 8;
711  AV_WN32A(sl->mv_cache[1][scan8[i8 * 4 + i4]],
712  pack16to32(mv_l0[0] - mv_col[0],
713  mv_l0[1] - mv_col[1]));
714  }
715  }
716  }
717  }
718  }
719 }
720 
722  int *mb_type)
723 {
724  if (sl->direct_spatial_mv_pred)
725  pred_spatial_direct_motion(h, sl, mb_type);
726  else
727  pred_temp_direct_motion(h, sl, mb_type);
728 }
await_reference_mb_row
static void await_reference_mb_row(const H264Context *const h, H264Ref *ref, int mb_y)
Definition: h264_direct.c:181
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:39
IS_8X8
#define IS_8X8(a)
Definition: mpegutils.h:89
MB_TYPE_L0
#define MB_TYPE_L0
Definition: mpegutils.h:67
H264SliceContext::mb_xy
int mb_xy
Definition: h264dec.h:231
td
#define td
Definition: regdef.h:70
H264SliceContext::ref_cache
int8_t ref_cache[2][5 *8]
Definition: h264dec.h:300
n
int n
Definition: avisynth_c.h:760
H264Ref
Definition: h264dec.h:166
H264Picture::ref_count
int ref_count[2][2]
number of entries in ref_poc (FIXME need per slice)
Definition: h264dec.h:156
H264Picture::ref_index
int8_t * ref_index[2]
Definition: h264dec.h:145
MB_TYPE_16x8
#define MB_TYPE_16x8
Definition: mpegutils.h:55
mv
static const int8_t mv[256][2]
Definition: 4xm.c:77
end
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
internal.h
b
#define b
Definition: input.c:41
H264SliceContext::ref_count
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264dec.h:267
MB_TYPE_16x16
#define MB_TYPE_16x16
Definition: mpegutils.h:54
H264SliceContext::h264
struct H264Context * h264
Definition: h264dec.h:178
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:38
AV_WN32A
#define AV_WN32A(p, v)
Definition: intreadwrite.h:538
H264SliceContext::sub_mb_type
uint16_t sub_mb_type[4]
as a DCT coefficient is int32_t in high depth, we need to reserve twice the space.
Definition: h264dec.h:304
mpegutils.h
thread.h
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
H264SliceContext::dist_scale_factor
int dist_scale_factor[32]
Definition: h264dec.h:259
H264SliceContext::mb_x
int mb_x
Definition: h264dec.h:230
H264Picture::frame_num
int frame_num
frame_num (raw frame_num from slice header)
Definition: h264dec.h:149
H264SliceContext
Definition: h264dec.h:177
A
#define A(x)
Definition: vp56_arith.h:28
H264SliceContext::mv_cache
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
Definition: h264dec.h:299
H264SliceContext::map_col_to_list0
int map_col_to_list0[2][16+32]
Definition: h264dec.h:261
H264SliceContext::map_col_to_list0_field
int map_col_to_list0_field[2][2][16+32]
Definition: h264dec.h:262
U
#define U(x)
Definition: vp56_arith.h:37
get_scale_factor
static int get_scale_factor(H264SliceContext *sl, int poc, int poc1, int i)
Definition: h264_direct.c:38
start
void INT64 start
Definition: avisynth_c.h:767
H264Picture::ref_poc
int ref_poc[2][2][32]
POCs of the frames/fields used as reference (FIXME need per slice)
Definition: h264dec.h:155
pred_spatial_direct_motion
static void pred_spatial_direct_motion(const H264Context *const h, H264SliceContext *sl, int *mb_type)
Definition: h264_direct.c:200
FFMIN3
#define FFMIN3(a, b, c)
Definition: common.h:97
H264SliceContext::direct_spatial_mv_pred
int direct_spatial_mv_pred
Definition: h264dec.h:251
H264Context::avctx
AVCodecContext * avctx
Definition: h264dec.h:339
C
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
Definition: writing_filters.txt:58
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
MB_TYPE_P1L0
#define MB_TYPE_P1L0
Definition: mpegutils.h:64
H264Picture::mbaff
int mbaff
1 -> MBAFF frame 0-> not MBAFF
Definition: h264dec.h:157
mask
static const uint16_t mask[17]
Definition: lzw.c:38
AV_ZERO32
#define AV_ZERO32(d)
Definition: intreadwrite.h:629
FIELD_PICTURE
#define FIELD_PICTURE(h)
Definition: h264dec.h:74
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
H264Ref::parent
H264Picture * parent
Definition: h264dec.h:174
IS_INTRA
#define IS_INTRA(x, y)
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
ff_h264_pred_direct_motion
void ff_h264_pred_direct_motion(const H264Context *const h, H264SliceContext *sl, int *mb_type)
Definition: h264_direct.c:721
MB_TYPE_8x16
#define MB_TYPE_8x16
Definition: mpegutils.h:56
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
if
if(ret)
Definition: filter_design.txt:179
MB_TYPE_8x8
#define MB_TYPE_8x8
Definition: mpegutils.h:57
MB_TYPE_P0L0
#define MB_TYPE_P0L0
Definition: mpegutils.h:63
list
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
Definition: filter_design.txt:25
IS_INTERLACED
#define IS_INTERLACED(a)
Definition: mpegutils.h:83
MB_TYPE_P0L1
#define MB_TYPE_P0L1
Definition: mpegutils.h:65
h264_ps.h
fill_colmap
static void fill_colmap(const H264Context *h, H264SliceContext *sl, int map[2][16+32], int list, int field, int colfield, int mbafi)
Definition: h264_direct.c:83
rectangle.h
H264Picture::mb_type
uint32_t * mb_type
Definition: h264dec.h:139
ff_h264_direct_ref_list_init
void ff_h264_direct_ref_list_init(const H264Context *const h, H264SliceContext *sl)
Definition: h264_direct.c:121
MB_TYPE_INTERLACED
#define MB_TYPE_INTERLACED
Definition: mpegutils.h:58
H264SliceContext::mb_y
int mb_y
Definition: h264dec.h:230
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
H264SliceContext::slice_type_nos
int slice_type_nos
S free slice type (SI/SP are remapped to I/P)
Definition: h264dec.h:184
FRAME_MBAFF
#define FRAME_MBAFF(h)
Definition: h264dec.h:73
IS_DIRECT
#define IS_DIRECT(a)
Definition: mpegutils.h:84
IS_16X16
#define IS_16X16(a)
Definition: mpegutils.h:86
pack16to32
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
Definition: h264dec.h:660
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:2835
MB_TYPE_L0L1
#define MB_TYPE_L0L1
Definition: mpegutils.h:69
h264dec.h
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
ff_h264_direct_dist_scale_factor
void ff_h264_direct_dist_scale_factor(const H264Context *const h, H264SliceContext *sl)
Definition: h264_direct.c:62
H264Context
H264Context.
Definition: h264dec.h:337
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
H264SliceContext::col_fieldoff
int col_fieldoff
Definition: h264dec.h:253
uint8_t
uint8_t
Definition: audio_convert.c:194
tb
#define tb
Definition: regdef.h:68
PART_NOT_AVAILABLE
#define PART_NOT_AVAILABLE
Definition: h264dec.h:391
H264SliceContext::list_count
unsigned int list_count
Definition: h264dec.h:268
avcodec.h
AV_RN32A
#define AV_RN32A(p)
Definition: intreadwrite.h:526
mid_pred
#define mid_pred
Definition: mathops.h:97
B
#define B
Definition: huffyuvdsp.h:32
H264Picture::field_poc
int field_poc[2]
top/bottom POC
Definition: h264dec.h:147
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
MB_TYPE_16x16_OR_INTRA
#define MB_TYPE_16x16_OR_INTRA
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:827
H264Picture
Definition: h264dec.h:128
scan8
static const uint8_t scan8[16 *3+3]
Definition: h264dec.h:644
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
H264SliceContext::col_parity
int col_parity
Definition: h264dec.h:252
H264SliceContext::ref_list
H264Ref ref_list[2][48]
0..15: frame refs, 16..47: mbaff field refs.
Definition: h264dec.h:269
H264SliceContext::dist_scale_factor_field
int dist_scale_factor_field[2][32]
Definition: h264dec.h:260
pred_temp_direct_motion
static void pred_temp_direct_motion(const H264Context *const h, H264SliceContext *sl, int *mb_type)
Definition: h264_direct.c:487
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:39
map
const VDPAUPixFmtMap * map
Definition: hwcontext_vdpau.c:85
MB_TYPE_DIRECT2
#define MB_TYPE_DIRECT2
Definition: mpegutils.h:59
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
h
h
Definition: vp9dsp_template.c:2038
H264Ref::poc
int poc
Definition: h264dec.h:171
H264Picture::long_ref
int long_ref
1->long term reference 0->short term reference
Definition: h264dec.h:154
H264Ref::reference
int reference
Definition: h264dec.h:170
H264Picture::motion_val
int16_t(*[2] motion_val)[2]
Definition: h264dec.h:136
MB_TYPE_P1L1
#define MB_TYPE_P1L1
Definition: mpegutils.h:66
IS_SUB_8X8
#define IS_SUB_8X8(a)
Definition: mpegutils.h:90