FFmpeg
h264_mb.c
Go to the documentation of this file.
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... decoder
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * H.264 / AVC / MPEG-4 part10 macroblock decoding
25  */
26 
27 #include <stdint.h>
28 
29 #include "config.h"
30 
31 #include "libavutil/common.h"
32 #include "libavutil/intreadwrite.h"
33 #include "avcodec.h"
34 #include "h264dec.h"
35 #include "h264_ps.h"
36 #include "qpeldsp.h"
37 #include "rectangle.h"
38 #include "threadframe.h"
39 
41  int n, int height, int y_offset, int list)
42 {
43  int raw_my = sl->mv_cache[list][scan8[n]][1];
44  int filter_height_down = (raw_my & 3) ? 3 : 0;
45  int full_my = (raw_my >> 2) + y_offset;
46  int bottom = full_my + filter_height_down + height;
47 
48  av_assert2(height >= 0);
49 
50  return FFMAX(0, bottom);
51 }
52 
53 static inline void get_lowest_part_y(const H264Context *h, H264SliceContext *sl,
54  int16_t refs[2][48], int n,
55  int height, int y_offset, int list0,
56  int list1, int *nrefs)
57 {
58  int my;
59 
60  y_offset += 16 * (sl->mb_y >> MB_FIELD(sl));
61 
62  if (list0) {
63  int ref_n = sl->ref_cache[0][scan8[n]];
64  H264Ref *ref = &sl->ref_list[0][ref_n];
65 
66  // Error resilience puts the current picture in the ref list.
67  // Don't try to wait on these as it will cause a deadlock.
68  // Fields can wait on each other, though.
69  if (ref->parent->tf.progress != h->cur_pic.tf.progress ||
70  (ref->reference & 3) != h->picture_structure) {
71  my = get_lowest_part_list_y(sl, n, height, y_offset, 0);
72  if (refs[0][ref_n] < 0)
73  nrefs[0] += 1;
74  refs[0][ref_n] = FFMAX(refs[0][ref_n], my);
75  }
76  }
77 
78  if (list1) {
79  int ref_n = sl->ref_cache[1][scan8[n]];
80  H264Ref *ref = &sl->ref_list[1][ref_n];
81 
82  if (ref->parent->tf.progress != h->cur_pic.tf.progress ||
83  (ref->reference & 3) != h->picture_structure) {
84  my = get_lowest_part_list_y(sl, n, height, y_offset, 1);
85  if (refs[1][ref_n] < 0)
86  nrefs[1] += 1;
87  refs[1][ref_n] = FFMAX(refs[1][ref_n], my);
88  }
89  }
90 }
91 
92 /**
93  * Wait until all reference frames are available for MC operations.
94  *
95  * @param h the H.264 context
96  */
98 {
99  const int mb_xy = sl->mb_xy;
100  const int mb_type = h->cur_pic.mb_type[mb_xy];
101  int16_t refs[2][48];
102  int nrefs[2] = { 0 };
103  int ref, list;
104 
105  memset(refs, -1, sizeof(refs));
106 
107  if (IS_16X16(mb_type)) {
108  get_lowest_part_y(h, sl, refs, 0, 16, 0,
109  IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), nrefs);
110  } else if (IS_16X8(mb_type)) {
111  get_lowest_part_y(h, sl, refs, 0, 8, 0,
112  IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), nrefs);
113  get_lowest_part_y(h, sl, refs, 8, 8, 8,
114  IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1), nrefs);
115  } else if (IS_8X16(mb_type)) {
116  get_lowest_part_y(h, sl, refs, 0, 16, 0,
117  IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), nrefs);
118  get_lowest_part_y(h, sl, refs, 4, 16, 0,
119  IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1), nrefs);
120  } else {
121  int i;
122 
123  av_assert2(IS_8X8(mb_type));
124 
125  for (i = 0; i < 4; i++) {
126  const int sub_mb_type = sl->sub_mb_type[i];
127  const int n = 4 * i;
128  int y_offset = (i & 2) << 2;
129 
130  if (IS_SUB_8X8(sub_mb_type)) {
131  get_lowest_part_y(h, sl, refs, n, 8, y_offset,
132  IS_DIR(sub_mb_type, 0, 0),
133  IS_DIR(sub_mb_type, 0, 1),
134  nrefs);
135  } else if (IS_SUB_8X4(sub_mb_type)) {
136  get_lowest_part_y(h, sl, refs, n, 4, y_offset,
137  IS_DIR(sub_mb_type, 0, 0),
138  IS_DIR(sub_mb_type, 0, 1),
139  nrefs);
140  get_lowest_part_y(h, sl, refs, n + 2, 4, y_offset + 4,
141  IS_DIR(sub_mb_type, 0, 0),
142  IS_DIR(sub_mb_type, 0, 1),
143  nrefs);
144  } else if (IS_SUB_4X8(sub_mb_type)) {
145  get_lowest_part_y(h, sl, refs, n, 8, y_offset,
146  IS_DIR(sub_mb_type, 0, 0),
147  IS_DIR(sub_mb_type, 0, 1),
148  nrefs);
149  get_lowest_part_y(h, sl, refs, n + 1, 8, y_offset,
150  IS_DIR(sub_mb_type, 0, 0),
151  IS_DIR(sub_mb_type, 0, 1),
152  nrefs);
153  } else {
154  int j;
155  av_assert2(IS_SUB_4X4(sub_mb_type));
156  for (j = 0; j < 4; j++) {
157  int sub_y_offset = y_offset + 2 * (j & 2);
158  get_lowest_part_y(h, sl, refs, n + j, 4, sub_y_offset,
159  IS_DIR(sub_mb_type, 0, 0),
160  IS_DIR(sub_mb_type, 0, 1),
161  nrefs);
162  }
163  }
164  }
165  }
166 
167  for (list = sl->list_count - 1; list >= 0; list--)
168  for (ref = 0; ref < 48 && nrefs[list]; ref++) {
169  int row = refs[list][ref];
170  if (row >= 0) {
171  H264Ref *ref_pic = &sl->ref_list[list][ref];
172  int ref_field = ref_pic->reference - 1;
173  int ref_field_picture = ref_pic->parent->field_picture;
174  int pic_height = 16 * h->mb_height >> ref_field_picture;
175 
176  row <<= MB_MBAFF(sl);
177  nrefs[list]--;
178 
179  if (!FIELD_PICTURE(h) && ref_field_picture) { // frame referencing two fields
180  av_assert2((ref_pic->parent->reference & 3) == 3);
182  FFMIN((row >> 1) - !(row & 1),
183  pic_height - 1),
184  1);
186  FFMIN((row >> 1), pic_height - 1),
187  0);
188  } else if (FIELD_PICTURE(h) && !ref_field_picture) { // field referencing one field of a frame
190  FFMIN(row * 2 + ref_field,
191  pic_height - 1),
192  0);
193  } else if (FIELD_PICTURE(h)) {
195  FFMIN(row, pic_height - 1),
196  ref_field);
197  } else {
199  FFMIN(row, pic_height - 1),
200  0);
201  }
202  }
203  }
204 }
205 
207  H264Ref *pic,
208  int n, int square, int height,
209  int delta, int list,
210  uint8_t *dest_y, uint8_t *dest_cb,
211  uint8_t *dest_cr,
212  int src_x_offset, int src_y_offset,
213  const qpel_mc_func *qpix_op,
214  h264_chroma_mc_func chroma_op,
215  int pixel_shift, int chroma_idc)
216 {
217  const int mx = sl->mv_cache[list][scan8[n]][0] + src_x_offset * 8;
218  int my = sl->mv_cache[list][scan8[n]][1] + src_y_offset * 8;
219  const int luma_xy = (mx & 3) + ((my & 3) << 2);
220  ptrdiff_t offset = (mx >> 2) * (1 << pixel_shift) + (my >> 2) * sl->mb_linesize;
221  uint8_t *src_y = pic->data[0] + offset;
222  uint8_t *src_cb, *src_cr;
223  int extra_width = 0;
224  int extra_height = 0;
225  int emu = 0;
226  const int full_mx = mx >> 2;
227  const int full_my = my >> 2;
228  const int pic_width = 16 * h->mb_width;
229  const int pic_height = 16 * h->mb_height >> MB_FIELD(sl);
230  int ysh;
231 
232  if (mx & 7)
233  extra_width -= 3;
234  if (my & 7)
235  extra_height -= 3;
236 
237  if (full_mx < 0 - extra_width ||
238  full_my < 0 - extra_height ||
239  full_mx + 16 /*FIXME*/ > pic_width + extra_width ||
240  full_my + 16 /*FIXME*/ > pic_height + extra_height) {
241  h->vdsp.emulated_edge_mc(sl->edge_emu_buffer,
242  src_y - (2 << pixel_shift) - 2 * sl->mb_linesize,
243  sl->mb_linesize, sl->mb_linesize,
244  16 + 5, 16 + 5 /*FIXME*/, full_mx - 2,
245  full_my - 2, pic_width, pic_height);
246  src_y = sl->edge_emu_buffer + (2 << pixel_shift) + 2 * sl->mb_linesize;
247  emu = 1;
248  }
249 
250  qpix_op[luma_xy](dest_y, src_y, sl->mb_linesize); // FIXME try variable height perhaps?
251  if (!square)
252  qpix_op[luma_xy](dest_y + delta, src_y + delta, sl->mb_linesize);
253 
254  if (CONFIG_GRAY && h->flags & AV_CODEC_FLAG_GRAY)
255  return;
256 
257  if (chroma_idc == 3 /* yuv444 */) {
258  src_cb = pic->data[1] + offset;
259  if (emu) {
260  h->vdsp.emulated_edge_mc(sl->edge_emu_buffer,
261  src_cb - (2 << pixel_shift) - 2 * sl->mb_linesize,
262  sl->mb_linesize, sl->mb_linesize,
263  16 + 5, 16 + 5 /*FIXME*/,
264  full_mx - 2, full_my - 2,
265  pic_width, pic_height);
266  src_cb = sl->edge_emu_buffer + (2 << pixel_shift) + 2 * sl->mb_linesize;
267  }
268  qpix_op[luma_xy](dest_cb, src_cb, sl->mb_linesize); // FIXME try variable height perhaps?
269  if (!square)
270  qpix_op[luma_xy](dest_cb + delta, src_cb + delta, sl->mb_linesize);
271 
272  src_cr = pic->data[2] + offset;
273  if (emu) {
274  h->vdsp.emulated_edge_mc(sl->edge_emu_buffer,
275  src_cr - (2 << pixel_shift) - 2 * sl->mb_linesize,
276  sl->mb_linesize, sl->mb_linesize,
277  16 + 5, 16 + 5 /*FIXME*/,
278  full_mx - 2, full_my - 2,
279  pic_width, pic_height);
280  src_cr = sl->edge_emu_buffer + (2 << pixel_shift) + 2 * sl->mb_linesize;
281  }
282  qpix_op[luma_xy](dest_cr, src_cr, sl->mb_linesize); // FIXME try variable height perhaps?
283  if (!square)
284  qpix_op[luma_xy](dest_cr + delta, src_cr + delta, sl->mb_linesize);
285  return;
286  }
287 
288  ysh = 3 - (chroma_idc == 2 /* yuv422 */);
289  if (chroma_idc == 1 /* yuv420 */ && MB_FIELD(sl)) {
290  // chroma offset when predicting from a field of opposite parity
291  my += 2 * ((sl->mb_y & 1) - (pic->reference - 1));
292  emu |= (my >> 3) < 0 || (my >> 3) + 8 >= (pic_height >> 1);
293  }
294 
295  src_cb = pic->data[1] + ((mx >> 3) * (1 << pixel_shift)) +
296  (my >> ysh) * sl->mb_uvlinesize;
297  src_cr = pic->data[2] + ((mx >> 3) * (1 << pixel_shift)) +
298  (my >> ysh) * sl->mb_uvlinesize;
299 
300  if (emu) {
301  h->vdsp.emulated_edge_mc(sl->edge_emu_buffer, src_cb,
302  sl->mb_uvlinesize, sl->mb_uvlinesize,
303  9, 8 * chroma_idc + 1, (mx >> 3), (my >> ysh),
304  pic_width >> 1, pic_height >> (chroma_idc == 1 /* yuv420 */));
305  src_cb = sl->edge_emu_buffer;
306  }
307  chroma_op(dest_cb, src_cb, sl->mb_uvlinesize,
308  height >> (chroma_idc == 1 /* yuv420 */),
309  mx & 7, ((unsigned)my << (chroma_idc == 2 /* yuv422 */)) & 7);
310 
311  if (emu) {
312  h->vdsp.emulated_edge_mc(sl->edge_emu_buffer, src_cr,
313  sl->mb_uvlinesize, sl->mb_uvlinesize,
314  9, 8 * chroma_idc + 1, (mx >> 3), (my >> ysh),
315  pic_width >> 1, pic_height >> (chroma_idc == 1 /* yuv420 */));
316  src_cr = sl->edge_emu_buffer;
317  }
318  chroma_op(dest_cr, src_cr, sl->mb_uvlinesize, height >> (chroma_idc == 1 /* yuv420 */),
319  mx & 7, ((unsigned)my << (chroma_idc == 2 /* yuv422 */)) & 7);
320 }
321 
323  int n, int square,
324  int height, int delta,
325  uint8_t *dest_y, uint8_t *dest_cb,
326  uint8_t *dest_cr,
327  int x_offset, int y_offset,
328  const qpel_mc_func *qpix_put,
329  h264_chroma_mc_func chroma_put,
330  const qpel_mc_func *qpix_avg,
331  h264_chroma_mc_func chroma_avg,
332  int list0, int list1,
333  int pixel_shift, int chroma_idc)
334 {
335  const qpel_mc_func *qpix_op = qpix_put;
336  h264_chroma_mc_func chroma_op = chroma_put;
337 
338  dest_y += (2 * x_offset << pixel_shift) + 2 * y_offset * sl->mb_linesize;
339  if (chroma_idc == 3 /* yuv444 */) {
340  dest_cb += (2 * x_offset << pixel_shift) + 2 * y_offset * sl->mb_linesize;
341  dest_cr += (2 * x_offset << pixel_shift) + 2 * y_offset * sl->mb_linesize;
342  } else if (chroma_idc == 2 /* yuv422 */) {
343  dest_cb += (x_offset << pixel_shift) + 2 * y_offset * sl->mb_uvlinesize;
344  dest_cr += (x_offset << pixel_shift) + 2 * y_offset * sl->mb_uvlinesize;
345  } else { /* yuv420 */
346  dest_cb += (x_offset << pixel_shift) + y_offset * sl->mb_uvlinesize;
347  dest_cr += (x_offset << pixel_shift) + y_offset * sl->mb_uvlinesize;
348  }
349  x_offset += 8 * sl->mb_x;
350  y_offset += 8 * (sl->mb_y >> MB_FIELD(sl));
351 
352  if (list0) {
353  H264Ref *ref = &sl->ref_list[0][sl->ref_cache[0][scan8[n]]];
354  mc_dir_part(h, sl, ref, n, square, height, delta, 0,
355  dest_y, dest_cb, dest_cr, x_offset, y_offset,
356  qpix_op, chroma_op, pixel_shift, chroma_idc);
357 
358  qpix_op = qpix_avg;
359  chroma_op = chroma_avg;
360  }
361 
362  if (list1) {
363  H264Ref *ref = &sl->ref_list[1][sl->ref_cache[1][scan8[n]]];
364  mc_dir_part(h, sl, ref, n, square, height, delta, 1,
365  dest_y, dest_cb, dest_cr, x_offset, y_offset,
366  qpix_op, chroma_op, pixel_shift, chroma_idc);
367  }
368 }
369 
371  int n, int square,
372  int height, int delta,
373  uint8_t *dest_y, uint8_t *dest_cb,
374  uint8_t *dest_cr,
375  int x_offset, int y_offset,
376  const qpel_mc_func *qpix_put,
377  h264_chroma_mc_func chroma_put,
378  h264_weight_func luma_weight_op,
379  h264_weight_func chroma_weight_op,
380  h264_biweight_func luma_weight_avg,
381  h264_biweight_func chroma_weight_avg,
382  int list0, int list1,
383  int pixel_shift, int chroma_idc)
384 {
385  int chroma_height;
386 
387  dest_y += (2 * x_offset << pixel_shift) + 2 * y_offset * sl->mb_linesize;
388  if (chroma_idc == 3 /* yuv444 */) {
389  chroma_height = height;
390  chroma_weight_avg = luma_weight_avg;
391  chroma_weight_op = luma_weight_op;
392  dest_cb += (2 * x_offset << pixel_shift) + 2 * y_offset * sl->mb_linesize;
393  dest_cr += (2 * x_offset << pixel_shift) + 2 * y_offset * sl->mb_linesize;
394  } else if (chroma_idc == 2 /* yuv422 */) {
395  chroma_height = height;
396  dest_cb += (x_offset << pixel_shift) + 2 * y_offset * sl->mb_uvlinesize;
397  dest_cr += (x_offset << pixel_shift) + 2 * y_offset * sl->mb_uvlinesize;
398  } else { /* yuv420 */
399  chroma_height = height >> 1;
400  dest_cb += (x_offset << pixel_shift) + y_offset * sl->mb_uvlinesize;
401  dest_cr += (x_offset << pixel_shift) + y_offset * sl->mb_uvlinesize;
402  }
403  x_offset += 8 * sl->mb_x;
404  y_offset += 8 * (sl->mb_y >> MB_FIELD(sl));
405 
406  if (list0 && list1) {
407  /* don't optimize for luma-only case, since B-frames usually
408  * use implicit weights => chroma too. */
409  uint8_t *tmp_cb = sl->bipred_scratchpad;
410  uint8_t *tmp_cr = sl->bipred_scratchpad + (16 << pixel_shift);
411  uint8_t *tmp_y = sl->bipred_scratchpad + 16 * sl->mb_uvlinesize;
412  int refn0 = sl->ref_cache[0][scan8[n]];
413  int refn1 = sl->ref_cache[1][scan8[n]];
414 
415  mc_dir_part(h, sl, &sl->ref_list[0][refn0], n, square, height, delta, 0,
416  dest_y, dest_cb, dest_cr,
417  x_offset, y_offset, qpix_put, chroma_put,
418  pixel_shift, chroma_idc);
419  mc_dir_part(h, sl, &sl->ref_list[1][refn1], n, square, height, delta, 1,
420  tmp_y, tmp_cb, tmp_cr,
421  x_offset, y_offset, qpix_put, chroma_put,
422  pixel_shift, chroma_idc);
423 
424  if (sl->pwt.use_weight == 2) {
425  int weight0 = sl->pwt.implicit_weight[refn0][refn1][sl->mb_y & 1];
426  int weight1 = 64 - weight0;
427  luma_weight_avg(dest_y, tmp_y, sl->mb_linesize,
428  height, 5, weight0, weight1, 0);
429  if (!CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
430  chroma_weight_avg(dest_cb, tmp_cb, sl->mb_uvlinesize,
431  chroma_height, 5, weight0, weight1, 0);
432  chroma_weight_avg(dest_cr, tmp_cr, sl->mb_uvlinesize,
433  chroma_height, 5, weight0, weight1, 0);
434  }
435  } else {
436  luma_weight_avg(dest_y, tmp_y, sl->mb_linesize, height,
438  sl->pwt.luma_weight[refn0][0][0],
439  sl->pwt.luma_weight[refn1][1][0],
440  sl->pwt.luma_weight[refn0][0][1] +
441  sl->pwt.luma_weight[refn1][1][1]);
442  if (!CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
443  chroma_weight_avg(dest_cb, tmp_cb, sl->mb_uvlinesize, chroma_height,
445  sl->pwt.chroma_weight[refn0][0][0][0],
446  sl->pwt.chroma_weight[refn1][1][0][0],
447  sl->pwt.chroma_weight[refn0][0][0][1] +
448  sl->pwt.chroma_weight[refn1][1][0][1]);
449  chroma_weight_avg(dest_cr, tmp_cr, sl->mb_uvlinesize, chroma_height,
451  sl->pwt.chroma_weight[refn0][0][1][0],
452  sl->pwt.chroma_weight[refn1][1][1][0],
453  sl->pwt.chroma_weight[refn0][0][1][1] +
454  sl->pwt.chroma_weight[refn1][1][1][1]);
455  }
456  }
457  } else {
458  int list = list1 ? 1 : 0;
459  int refn = sl->ref_cache[list][scan8[n]];
460  H264Ref *ref = &sl->ref_list[list][refn];
461  mc_dir_part(h, sl, ref, n, square, height, delta, list,
462  dest_y, dest_cb, dest_cr, x_offset, y_offset,
463  qpix_put, chroma_put, pixel_shift, chroma_idc);
464 
465  luma_weight_op(dest_y, sl->mb_linesize, height,
467  sl->pwt.luma_weight[refn][list][0],
468  sl->pwt.luma_weight[refn][list][1]);
469  if (!CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
470  if (sl->pwt.use_weight_chroma) {
471  chroma_weight_op(dest_cb, sl->mb_uvlinesize, chroma_height,
473  sl->pwt.chroma_weight[refn][list][0][0],
474  sl->pwt.chroma_weight[refn][list][0][1]);
475  chroma_weight_op(dest_cr, sl->mb_uvlinesize, chroma_height,
477  sl->pwt.chroma_weight[refn][list][1][0],
478  sl->pwt.chroma_weight[refn][list][1][1]);
479  }
480  }
481  }
482 }
483 
485  int list, int pixel_shift,
486  int chroma_idc)
487 {
488  /* fetch pixels for estimated mv 4 macroblocks ahead
489  * optimized for 64byte cache lines */
490  const int refn = sl->ref_cache[list][scan8[0]];
491  if (refn >= 0) {
492  const int mx = (sl->mv_cache[list][scan8[0]][0] >> 2) + 16 * sl->mb_x + 8;
493  const int my = (sl->mv_cache[list][scan8[0]][1] >> 2) + 16 * sl->mb_y;
494  uint8_t **src = sl->ref_list[list][refn].data;
495  int off = mx * (1<< pixel_shift) +
496  (my + (sl->mb_x & 3) * 4) * sl->mb_linesize +
497  (64 << pixel_shift);
498  h->vdsp.prefetch(src[0] + off, sl->linesize, 4);
499  if (chroma_idc == 3 /* yuv444 */) {
500  h->vdsp.prefetch(src[1] + off, sl->linesize, 4);
501  h->vdsp.prefetch(src[2] + off, sl->linesize, 4);
502  } else {
503  off= ((mx>>1)+64) * (1<<pixel_shift) + ((my>>1) + (sl->mb_x&7))*sl->uvlinesize;
504  h->vdsp.prefetch(src[1] + off, src[2] - src[1], 2);
505  }
506  }
507 }
508 
510  uint8_t *src_y,
511  uint8_t *src_cb, uint8_t *src_cr,
512  int linesize, int uvlinesize,
513  int xchg, int chroma444,
514  int simple, int pixel_shift)
515 {
516  int deblock_topleft;
517  int deblock_top;
518  int top_idx = 1;
519  uint8_t *top_border_m1;
520  uint8_t *top_border;
521 
522  if (!simple && FRAME_MBAFF(h)) {
523  if (sl->mb_y & 1) {
524  if (!MB_MBAFF(sl))
525  return;
526  } else {
527  top_idx = MB_MBAFF(sl) ? 0 : 1;
528  }
529  }
530 
531  if (sl->deblocking_filter == 2) {
532  deblock_topleft = h->slice_table[sl->mb_xy - 1 - h->mb_stride] == sl->slice_num;
533  deblock_top = sl->top_type;
534  } else {
535  deblock_topleft = (sl->mb_x > 0);
536  deblock_top = (sl->mb_y > !!MB_FIELD(sl));
537  }
538 
539  src_y -= linesize + 1 + pixel_shift;
540  src_cb -= uvlinesize + 1 + pixel_shift;
541  src_cr -= uvlinesize + 1 + pixel_shift;
542 
543  top_border_m1 = sl->top_borders[top_idx][sl->mb_x - 1];
544  top_border = sl->top_borders[top_idx][sl->mb_x];
545 
546 #define XCHG(a, b, xchg) \
547  if (pixel_shift) { \
548  if (xchg) { \
549  AV_SWAP64(b + 0, a + 0); \
550  AV_SWAP64(b + 8, a + 8); \
551  } else { \
552  AV_COPY128(b, a); \
553  } \
554  } else if (xchg) \
555  AV_SWAP64(b, a); \
556  else \
557  AV_COPY64(b, a);
558 
559  if (deblock_top) {
560  if (deblock_topleft) {
561  XCHG(top_border_m1 + (8 << pixel_shift),
562  src_y - (7 << pixel_shift), 1);
563  }
564  XCHG(top_border + (0 << pixel_shift), src_y + (1 << pixel_shift), xchg);
565  XCHG(top_border + (8 << pixel_shift), src_y + (9 << pixel_shift), 1);
566  if (sl->mb_x + 1 < h->mb_width) {
567  XCHG(sl->top_borders[top_idx][sl->mb_x + 1],
568  src_y + (17 << pixel_shift), 1);
569  }
570  if (simple || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
571  if (chroma444) {
572  if (deblock_topleft) {
573  XCHG(top_border_m1 + (24 << pixel_shift), src_cb - (7 << pixel_shift), 1);
574  XCHG(top_border_m1 + (40 << pixel_shift), src_cr - (7 << pixel_shift), 1);
575  }
576  XCHG(top_border + (16 << pixel_shift), src_cb + (1 << pixel_shift), xchg);
577  XCHG(top_border + (24 << pixel_shift), src_cb + (9 << pixel_shift), 1);
578  XCHG(top_border + (32 << pixel_shift), src_cr + (1 << pixel_shift), xchg);
579  XCHG(top_border + (40 << pixel_shift), src_cr + (9 << pixel_shift), 1);
580  if (sl->mb_x + 1 < h->mb_width) {
581  XCHG(sl->top_borders[top_idx][sl->mb_x + 1] + (16 << pixel_shift), src_cb + (17 << pixel_shift), 1);
582  XCHG(sl->top_borders[top_idx][sl->mb_x + 1] + (32 << pixel_shift), src_cr + (17 << pixel_shift), 1);
583  }
584  } else {
585  if (deblock_topleft) {
586  XCHG(top_border_m1 + (16 << pixel_shift), src_cb - (7 << pixel_shift), 1);
587  XCHG(top_border_m1 + (24 << pixel_shift), src_cr - (7 << pixel_shift), 1);
588  }
589  XCHG(top_border + (16 << pixel_shift), src_cb + 1 + pixel_shift, 1);
590  XCHG(top_border + (24 << pixel_shift), src_cr + 1 + pixel_shift, 1);
591  }
592  }
593  }
594 }
595 
596 static av_always_inline int dctcoef_get(int16_t *mb, int high_bit_depth,
597  int index)
598 {
599  if (high_bit_depth) {
600  return AV_RN32A(((int32_t *)mb) + index);
601  } else
602  return AV_RN16A(mb + index);
603 }
604 
605 static av_always_inline void dctcoef_set(int16_t *mb, int high_bit_depth,
606  int index, int value)
607 {
608  if (high_bit_depth) {
609  AV_WN32A(((int32_t *)mb) + index, value);
610  } else
611  AV_WN16A(mb + index, value);
612 }
613 
615  H264SliceContext *sl,
616  int mb_type, int simple,
617  int transform_bypass,
618  int pixel_shift,
619  const int *block_offset,
620  int linesize,
621  uint8_t *dest_y, int p)
622 {
623  void (*idct_add)(uint8_t *dst, int16_t *block, int stride);
624  void (*idct_dc_add)(uint8_t *dst, int16_t *block, int stride);
625  int i;
626  int qscale = p == 0 ? sl->qscale : sl->chroma_qp[p - 1];
627  block_offset += 16 * p;
628  if (IS_INTRA4x4(mb_type)) {
629  if (IS_8x8DCT(mb_type)) {
630  if (transform_bypass) {
631  idct_dc_add =
632  idct_add = h->h264dsp.h264_add_pixels8_clear;
633  } else {
634  idct_dc_add = h->h264dsp.h264_idct8_dc_add;
635  idct_add = h->h264dsp.h264_idct8_add;
636  }
637  for (i = 0; i < 16; i += 4) {
638  uint8_t *const ptr = dest_y + block_offset[i];
639  const int dir = sl->intra4x4_pred_mode_cache[scan8[i]];
640  if (transform_bypass && h->ps.sps->profile_idc == 244 && dir <= 1) {
641  if (h->x264_build < 151U) {
642  h->hpc.pred8x8l_add[dir](ptr, sl->mb + (i * 16 + p * 256 << pixel_shift), linesize);
643  } else
644  h->hpc.pred8x8l_filter_add[dir](ptr, sl->mb + (i * 16 + p * 256 << pixel_shift),
645  (sl-> topleft_samples_available << i) & 0x8000,
646  (sl->topright_samples_available << i) & 0x4000, linesize);
647  } else {
648  const int nnz = sl->non_zero_count_cache[scan8[i + p * 16]];
649  h->hpc.pred8x8l[dir](ptr, (sl->topleft_samples_available << i) & 0x8000,
650  (sl->topright_samples_available << i) & 0x4000, linesize);
651  if (nnz) {
652  if (nnz == 1 && dctcoef_get(sl->mb, pixel_shift, i * 16 + p * 256))
653  idct_dc_add(ptr, sl->mb + (i * 16 + p * 256 << pixel_shift), linesize);
654  else
655  idct_add(ptr, sl->mb + (i * 16 + p * 256 << pixel_shift), linesize);
656  }
657  }
658  }
659  } else {
660  if (transform_bypass) {
661  idct_dc_add =
662  idct_add = h->h264dsp.h264_add_pixels4_clear;
663  } else {
664  idct_dc_add = h->h264dsp.h264_idct_dc_add;
665  idct_add = h->h264dsp.h264_idct_add;
666  }
667  for (i = 0; i < 16; i++) {
668  uint8_t *const ptr = dest_y + block_offset[i];
669  const int dir = sl->intra4x4_pred_mode_cache[scan8[i]];
670 
671  if (transform_bypass && h->ps.sps->profile_idc == 244 && dir <= 1) {
672  h->hpc.pred4x4_add[dir](ptr, sl->mb + (i * 16 + p * 256 << pixel_shift), linesize);
673  } else {
674  uint8_t *topright;
675  int nnz, tr;
676  uint64_t tr_high;
677  if (dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED) {
678  const int topright_avail = (sl->topright_samples_available << i) & 0x8000;
679  av_assert2(sl->mb_y || linesize <= block_offset[i]);
680  if (!topright_avail) {
681  if (pixel_shift) {
682  tr_high = ((uint16_t *)ptr)[3 - linesize / 2] * 0x0001000100010001ULL;
683  topright = (uint8_t *)&tr_high;
684  } else {
685  tr = ptr[3 - linesize] * 0x01010101u;
686  topright = (uint8_t *)&tr;
687  }
688  } else
689  topright = ptr + (4 << pixel_shift) - linesize;
690  } else
691  topright = NULL;
692 
693  h->hpc.pred4x4[dir](ptr, topright, linesize);
694  nnz = sl->non_zero_count_cache[scan8[i + p * 16]];
695  if (nnz) {
696  if (nnz == 1 && dctcoef_get(sl->mb, pixel_shift, i * 16 + p * 256))
697  idct_dc_add(ptr, sl->mb + (i * 16 + p * 256 << pixel_shift), linesize);
698  else
699  idct_add(ptr, sl->mb + (i * 16 + p * 256 << pixel_shift), linesize);
700  }
701  }
702  }
703  }
704  } else {
705  h->hpc.pred16x16[sl->intra16x16_pred_mode](dest_y, linesize);
707  if (!transform_bypass)
708  h->h264dsp.h264_luma_dc_dequant_idct(sl->mb + (p * 256 << pixel_shift),
709  sl->mb_luma_dc[p],
710  h->ps.pps->dequant4_coeff[p][qscale][0]);
711  else {
712  static const uint8_t dc_mapping[16] = {
713  0 * 16, 1 * 16, 4 * 16, 5 * 16,
714  2 * 16, 3 * 16, 6 * 16, 7 * 16,
715  8 * 16, 9 * 16, 12 * 16, 13 * 16,
716  10 * 16, 11 * 16, 14 * 16, 15 * 16
717  };
718  for (i = 0; i < 16; i++)
719  dctcoef_set(sl->mb + (p * 256 << pixel_shift),
720  pixel_shift, dc_mapping[i],
721  dctcoef_get(sl->mb_luma_dc[p],
722  pixel_shift, i));
723  }
724  }
725  }
726 }
727 
729  int mb_type, int simple,
730  int transform_bypass,
731  int pixel_shift,
732  const int *block_offset,
733  int linesize,
734  uint8_t *dest_y, int p)
735 {
736  void (*idct_add)(uint8_t *dst, int16_t *block, int stride);
737  int i;
738  block_offset += 16 * p;
739  if (!IS_INTRA4x4(mb_type)) {
740  if (IS_INTRA16x16(mb_type)) {
741  if (transform_bypass) {
742  if (h->ps.sps->profile_idc == 244 &&
745  h->hpc.pred16x16_add[sl->intra16x16_pred_mode](dest_y, block_offset,
746  sl->mb + (p * 256 << pixel_shift),
747  linesize);
748  } else {
749  for (i = 0; i < 16; i++)
750  if (sl->non_zero_count_cache[scan8[i + p * 16]] ||
751  dctcoef_get(sl->mb, pixel_shift, i * 16 + p * 256))
752  h->h264dsp.h264_add_pixels4_clear(dest_y + block_offset[i],
753  sl->mb + (i * 16 + p * 256 << pixel_shift),
754  linesize);
755  }
756  } else {
757  h->h264dsp.h264_idct_add16intra(dest_y, block_offset,
758  sl->mb + (p * 256 << pixel_shift),
759  linesize,
760  sl->non_zero_count_cache + p * 5 * 8);
761  }
762  } else if (sl->cbp & 15) {
763  if (transform_bypass) {
764  const int di = IS_8x8DCT(mb_type) ? 4 : 1;
765  idct_add = IS_8x8DCT(mb_type) ? h->h264dsp.h264_add_pixels8_clear
766  : h->h264dsp.h264_add_pixels4_clear;
767  for (i = 0; i < 16; i += di)
768  if (sl->non_zero_count_cache[scan8[i + p * 16]])
769  idct_add(dest_y + block_offset[i],
770  sl->mb + (i * 16 + p * 256 << pixel_shift),
771  linesize);
772  } else {
773  if (IS_8x8DCT(mb_type))
774  h->h264dsp.h264_idct8_add4(dest_y, block_offset,
775  sl->mb + (p * 256 << pixel_shift),
776  linesize,
777  sl->non_zero_count_cache + p * 5 * 8);
778  else
779  h->h264dsp.h264_idct_add16(dest_y, block_offset,
780  sl->mb + (p * 256 << pixel_shift),
781  linesize,
782  sl->non_zero_count_cache + p * 5 * 8);
783  }
784  }
785  }
786 }
787 
788 #define BITS 8
789 #define SIMPLE 1
790 #include "h264_mb_template.c"
791 
792 #undef BITS
793 #define BITS 16
794 #include "h264_mb_template.c"
795 
796 #undef SIMPLE
797 #define SIMPLE 0
798 #include "h264_mb_template.c"
799 
801 {
802  const int mb_xy = sl->mb_xy;
803  const int mb_type = h->cur_pic.mb_type[mb_xy];
804  int is_complex = CONFIG_SMALL || sl->is_complex ||
805  IS_INTRA_PCM(mb_type) || sl->qscale == 0;
806 
807  if (CHROMA444(h)) {
808  if (is_complex || h->pixel_shift)
809  hl_decode_mb_444_complex(h, sl);
810  else
811  hl_decode_mb_444_simple_8(h, sl);
812  } else if (is_complex) {
813  hl_decode_mb_complex(h, sl);
814  } else if (h->pixel_shift) {
815  hl_decode_mb_simple_16(h, sl);
816  } else
817  hl_decode_mb_simple_8(h, sl);
818 }
IS_INTRA4x4
#define IS_INTRA4x4(a)
Definition: mpegutils.h:68
IS_8X8
#define IS_8X8(a)
Definition: mpegutils.h:82
HOR_PRED8x8
#define HOR_PRED8x8
Definition: h264pred.h:69
H264SliceContext::mb_xy
int mb_xy
Definition: h264dec.h:225
h264_chroma_mc_func
void(* h264_chroma_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: h264chroma.h:25
H264SliceContext::ref_cache
int8_t ref_cache[2][5 *8]
Definition: h264dec.h:293
H264SliceContext::topleft_samples_available
unsigned int topleft_samples_available
Definition: h264dec.h:215
await_references
static void await_references(const H264Context *h, H264SliceContext *sl)
Wait until all reference frames are available for MC operations.
Definition: h264_mb.c:97
mc_dir_part
static av_always_inline void mc_dir_part(const H264Context *h, H264SliceContext *sl, H264Ref *pic, int n, int square, int height, int delta, int list, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int src_x_offset, int src_y_offset, const qpel_mc_func *qpix_op, h264_chroma_mc_func chroma_op, int pixel_shift, int chroma_idc)
Definition: h264_mb.c:206
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:250
h264_biweight_func
void(* h264_biweight_func)(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int height, int log2_denom, int weightd, int weights, int offset)
Definition: h264dsp.h:35
H264Ref
Definition: h264dec.h:160
XCHG
#define XCHG(a, b, xchg)
H264SliceContext::mb
int16_t mb[16 *48 *2]
Definition: h264dec.h:300
IS_SUB_4X4
#define IS_SUB_4X4(a)
Definition: mpegutils.h:86
MB_MBAFF
#define MB_MBAFF(h)
Definition: h264dec.h:64
VERT_LEFT_PRED
@ VERT_LEFT_PRED
Definition: vp9.h:53
H264PredWeightTable::use_weight_chroma
int use_weight_chroma
Definition: h264_parse.h:71
AV_WN32A
#define AV_WN32A(p, v)
Definition: intreadwrite.h:536
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
h264_mb_template.c
H264SliceContext::sub_mb_type
uint16_t sub_mb_type[4]
as a DCT coefficient is int32_t in high depth, we need to reserve twice the space.
Definition: h264dec.h:297
H264SliceContext::is_complex
int is_complex
Definition: h264dec.h:232
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
H264SliceContext::mb_x
int mb_x
Definition: h264dec.h:224
H264SliceContext
Definition: h264dec.h:171
MB_FIELD
#define MB_FIELD(sl)
Definition: h264dec.h:65
mc_part_weighted
static av_always_inline void mc_part_weighted(const H264Context *h, H264SliceContext *sl, int n, int square, int height, int delta, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int x_offset, int y_offset, const qpel_mc_func *qpix_put, h264_chroma_mc_func chroma_put, h264_weight_func luma_weight_op, h264_weight_func chroma_weight_op, h264_biweight_func luma_weight_avg, h264_biweight_func chroma_weight_avg, int list0, int list1, int pixel_shift, int chroma_idc)
Definition: h264_mb.c:370
H264SliceContext::mv_cache
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
Definition: h264dec.h:292
scan8
static const uint8_t scan8[16 *3+3]
Definition: h264_parse.h:40
H264SliceContext::deblocking_filter
int deblocking_filter
disable_deblocking_filter_idc with 1 <-> 0
Definition: h264dec.h:187
H264PredWeightTable::luma_log2_weight_denom
int luma_log2_weight_denom
Definition: h264_parse.h:72
H264PredWeightTable::use_weight
int use_weight
Definition: h264_parse.h:70
H264Ref::data
uint8_t * data[3]
Definition: h264dec.h:161
H264SliceContext::slice_num
int slice_num
Definition: h264dec.h:176
mc_part_std
static av_always_inline void mc_part_std(const H264Context *h, H264SliceContext *sl, int n, int square, int height, int delta, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int x_offset, int y_offset, const qpel_mc_func *qpix_put, h264_chroma_mc_func chroma_put, const qpel_mc_func *qpix_avg, h264_chroma_mc_func chroma_avg, int list0, int list1, int pixel_shift, int chroma_idc)
Definition: h264_mb.c:322
ff_h264_hl_decode_mb
void ff_h264_hl_decode_mb(const H264Context *h, H264SliceContext *sl)
Definition: h264_mb.c:800
H264PredWeightTable::chroma_weight
int chroma_weight[48][2][2][2]
Definition: h264_parse.h:78
intreadwrite.h
H264PredWeightTable::chroma_log2_weight_denom
int chroma_log2_weight_denom
Definition: h264_parse.h:73
IS_16X8
#define IS_16X8(a)
Definition: mpegutils.h:80
FIELD_PICTURE
#define FIELD_PICTURE(h)
Definition: h264dec.h:67
IS_SUB_4X8
#define IS_SUB_4X8(a)
Definition: mpegutils.h:85
AV_WN16A
#define AV_WN16A(p, v)
Definition: intreadwrite.h:532
dctcoef_set
static av_always_inline void dctcoef_set(int16_t *mb, int high_bit_depth, int index, int value)
Definition: h264_mb.c:605
H264PredWeightTable::luma_weight
int luma_weight[48][2][2]
Definition: h264_parse.h:77
xchg_mb_border
static av_always_inline void xchg_mb_border(const H264Context *h, H264SliceContext *sl, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int xchg, int chroma444, int simple, int pixel_shift)
Definition: h264_mb.c:509
prefetch_motion
static av_always_inline void prefetch_motion(const H264Context *h, H264SliceContext *sl, int list, int pixel_shift, int chroma_idc)
Definition: h264_mb.c:484
H264SliceContext::topright_samples_available
unsigned int topright_samples_available
Definition: h264dec.h:217
if
if(ret)
Definition: filter_design.txt:179
IS_DIR
#define IS_DIR(a, part, list)
Definition: mpegutils.h:89
threadframe.h
NULL
#define NULL
Definition: coverity.c:32
IS_INTRA_PCM
#define IS_INTRA_PCM(a)
Definition: mpegutils.h:75
H264SliceContext::edge_emu_buffer
uint8_t * edge_emu_buffer
Definition: h264dec.h:277
H264Ref::parent
const H264Picture * parent
Definition: h264dec.h:168
H264SliceContext::chroma_qp
int chroma_qp[2]
Definition: h264dec.h:182
list
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
Definition: filter_design.txt:25
VERT_PRED8x8
#define VERT_PRED8x8
Definition: h264pred.h:70
qpeldsp.h
H264SliceContext::mb_luma_dc
int16_t mb_luma_dc[3][16 *2]
as mb is addressed by scantable[i] and scantable is uint8_t we can either check that i is not too lar...
Definition: h264dec.h:301
H264SliceContext::qscale
int qscale
Definition: h264dec.h:181
h264_ps.h
index
int index
Definition: gxfenc.c:89
H264SliceContext::top_type
int top_type
Definition: h264dec.h:208
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
H264SliceContext::intra4x4_pred_mode_cache
int8_t intra4x4_pred_mode_cache[5 *8]
Definition: h264dec.h:199
h264_weight_func
void(* h264_weight_func)(uint8_t *block, ptrdiff_t stride, int height, int log2_denom, int weight, int offset)
Definition: h264dsp.h:33
H264Picture::reference
int reference
Definition: h264dec.h:145
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:318
rectangle.h
H264SliceContext::mb_uvlinesize
ptrdiff_t mb_uvlinesize
Definition: h264dec.h:222
H264SliceContext::pwt
H264PredWeightTable pwt
Definition: h264dec.h:191
H264Picture::tf
ThreadFrame tf
Definition: h264dec.h:108
H264SliceContext::intra16x16_pred_mode
int intra16x16_pred_mode
Definition: h264dec.h:197
H264SliceContext::top_borders
uint8_t(*[2] top_borders)[(16 *3) *2]
Definition: h264dec.h:278
H264SliceContext::cbp
int cbp
Definition: h264dec.h:249
LUMA_DC_BLOCK_INDEX
#define LUMA_DC_BLOCK_INDEX
Definition: h264dec.h:643
H264SliceContext::mb_y
int mb_y
Definition: h264dec.h:224
H264PredWeightTable::implicit_weight
int implicit_weight[48][48][2]
Definition: h264_parse.h:79
height
#define height
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
H264SliceContext::uvlinesize
ptrdiff_t uvlinesize
Definition: h264dec.h:220
IS_INTRA16x16
#define IS_INTRA16x16(a)
Definition: mpegutils.h:69
mb
#define mb
Definition: vf_colormatrix.c:99
FRAME_MBAFF
#define FRAME_MBAFF(h)
Definition: h264dec.h:66
IS_16X16
#define IS_16X16(a)
Definition: mpegutils.h:79
H264Picture::field_picture
int field_picture
whether or not picture was encoded in separate fields
Definition: h264dec.h:138
IS_SUB_8X4
#define IS_SUB_8X4(a)
Definition: mpegutils.h:84
h264dec.h
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
H264Context
H264Context.
Definition: h264dec.h:331
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
get_lowest_part_list_y
static int get_lowest_part_list_y(H264SliceContext *sl, int n, int height, int y_offset, int list)
Definition: h264_mb.c:40
common.h
H264SliceContext::mb_linesize
ptrdiff_t mb_linesize
may be equal to s->linesize or s->linesize * 2, for mbaff
Definition: h264dec.h:221
delta
float delta
Definition: vorbis_enc_data.h:430
av_always_inline
#define av_always_inline
Definition: attributes.h:49
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
H264SliceContext::list_count
unsigned int list_count
Definition: h264dec.h:262
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
AV_RN32A
#define AV_RN32A(p)
Definition: intreadwrite.h:524
dctcoef_get
static av_always_inline int dctcoef_get(int16_t *mb, int high_bit_depth, int index)
Definition: h264_mb.c:596
square
static int square(int x)
Definition: roqvideoenc.c:195
U
#define U(x)
Definition: vpx_arith.h:37
H264SliceContext::linesize
ptrdiff_t linesize
Definition: h264dec.h:220
hl_decode_mb_idct_luma
static av_always_inline void hl_decode_mb_idct_luma(const H264Context *h, H264SliceContext *sl, int mb_type, int simple, int transform_bypass, int pixel_shift, const int *block_offset, int linesize, uint8_t *dest_y, int p)
Definition: h264_mb.c:728
H264SliceContext::bipred_scratchpad
uint8_t * bipred_scratchpad
Definition: h264dec.h:276
AV_RN16A
#define AV_RN16A(p)
Definition: intreadwrite.h:520
get_lowest_part_y
static void get_lowest_part_y(const H264Context *h, H264SliceContext *sl, int16_t refs[2][48], int n, int height, int y_offset, int list0, int list1, int *nrefs)
Definition: h264_mb.c:53
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
IS_8X16
#define IS_8X16(a)
Definition: mpegutils.h:81
H264SliceContext::ref_list
H264Ref ref_list[2][48]
0..15: frame refs, 16..47: mbaff field refs.
Definition: h264dec.h:263
H264SliceContext::non_zero_count_cache
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
Definition: h264dec.h:287
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
DIAG_DOWN_LEFT_PRED
@ DIAG_DOWN_LEFT_PRED
Definition: vp9.h:49
int32_t
int32_t
Definition: audioconvert.c:56
idct_add
static void idct_add(uint8_t *dst, int stride, const uint8_t *src, int in_linesize, int *block)
Definition: mv30.c:168
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
CHROMA444
#define CHROMA444(h)
Definition: h264dec.h:92
h
h
Definition: vp9dsp_template.c:2038
IS_8x8DCT
#define IS_8x8DCT(a)
Definition: h264dec.h:95
H264Ref::reference
int reference
Definition: h264dec.h:164
hl_decode_mb_predict_luma
static av_always_inline void hl_decode_mb_predict_luma(const H264Context *h, H264SliceContext *sl, int mb_type, int simple, int transform_bypass, int pixel_shift, const int *block_offset, int linesize, uint8_t *dest_y, int p)
Definition: h264_mb.c:614
IS_SUB_8X8
#define IS_SUB_8X8(a)
Definition: mpegutils.h:83