FFmpeg
cavs.c
Go to the documentation of this file.
1 /*
2  * Chinese AVS video (AVS1-P2, JiZhun profile) decoder.
3  * Copyright (c) 2006 Stefan Gehrer <stefan.gehrer@gmx.de>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Chinese AVS video (AVS1-P2, JiZhun profile) decoder
25  * @author Stefan Gehrer <stefan.gehrer@gmx.de>
26  */
27 
28 #include "libavutil/mem_internal.h"
29 
30 #include "avcodec.h"
31 #include "get_bits.h"
32 #include "golomb.h"
33 #include "h264chroma.h"
34 #include "idctdsp.h"
35 #include "internal.h"
36 #include "mathops.h"
37 #include "qpeldsp.h"
38 #include "cavs.h"
39 
40 static const uint8_t alpha_tab[64] = {
41  0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3,
42  4, 4, 5, 5, 6, 7, 8, 9, 10, 11, 12, 13, 15, 16, 18, 20,
43  22, 24, 26, 28, 30, 33, 33, 35, 35, 36, 37, 37, 39, 39, 42, 44,
44  46, 48, 50, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
45 };
46 
47 static const uint8_t beta_tab[64] = {
48  0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2,
49  2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6,
50  6, 7, 7, 7, 8, 8, 8, 9, 9, 10, 10, 11, 11, 12, 13, 14,
51  15, 16, 17, 18, 19, 20, 21, 22, 23, 23, 24, 24, 25, 25, 26, 27
52 };
53 
54 static const uint8_t tc_tab[64] = {
55  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
56  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
57  2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4,
58  5, 5, 5, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 9, 9, 9
59 };
60 
61 /** mark block as unavailable, i.e. out of picture
62  * or not yet decoded */
63 static const cavs_vector un_mv = { 0, 0, 1, NOT_AVAIL };
64 
65 static const int8_t left_modifier_l[8] = { 0, -1, 6, -1, -1, 7, 6, 7 };
66 static const int8_t top_modifier_l[8] = { -1, 1, 5, -1, -1, 5, 7, 7 };
67 static const int8_t left_modifier_c[7] = { 5, -1, 2, -1, 6, 5, 6 };
68 static const int8_t top_modifier_c[7] = { 4, 1, -1, -1, 4, 6, 6 };
69 
70 /*****************************************************************************
71  *
72  * in-loop deblocking filter
73  *
74  ****************************************************************************/
75 
76 static inline int get_bs(cavs_vector *mvP, cavs_vector *mvQ, int b)
77 {
78  if ((mvP->ref == REF_INTRA) || (mvQ->ref == REF_INTRA))
79  return 2;
80  if((abs(mvP->x - mvQ->x) >= 4) ||
81  (abs(mvP->y - mvQ->y) >= 4) ||
82  (mvP->ref != mvQ->ref))
83  return 1;
84  if (b) {
85  mvP += MV_BWD_OFFS;
86  mvQ += MV_BWD_OFFS;
87  if((abs(mvP->x - mvQ->x) >= 4) ||
88  (abs(mvP->y - mvQ->y) >= 4) ||
89  (mvP->ref != mvQ->ref))
90  return 1;
91  }
92  return 0;
93 }
94 
95 #define SET_PARAMS \
96  alpha = alpha_tab[av_clip_uintp2(qp_avg + h->alpha_offset, 6)]; \
97  beta = beta_tab[av_clip_uintp2(qp_avg + h->beta_offset, 6)]; \
98  tc = tc_tab[av_clip_uintp2(qp_avg + h->alpha_offset, 6)];
99 
100 /**
101  * in-loop deblocking filter for a single macroblock
102  *
103  * boundary strength (bs) mapping:
104  *
105  * --4---5--
106  * 0 2 |
107  * | 6 | 7 |
108  * 1 3 |
109  * ---------
110  */
111 void ff_cavs_filter(AVSContext *h, enum cavs_mb mb_type)
112 {
113  uint8_t bs[8];
114  int qp_avg, alpha, beta, tc;
115  int i;
116 
117  /* save un-deblocked lines */
118  h->topleft_border_y = h->top_border_y[h->mbx * 16 + 15];
119  h->topleft_border_u = h->top_border_u[h->mbx * 10 + 8];
120  h->topleft_border_v = h->top_border_v[h->mbx * 10 + 8];
121  memcpy(&h->top_border_y[h->mbx * 16], h->cy + 15 * h->l_stride, 16);
122  memcpy(&h->top_border_u[h->mbx * 10 + 1], h->cu + 7 * h->c_stride, 8);
123  memcpy(&h->top_border_v[h->mbx * 10 + 1], h->cv + 7 * h->c_stride, 8);
124  for (i = 0; i < 8; i++) {
125  h->left_border_y[i * 2 + 1] = *(h->cy + 15 + (i * 2 + 0) * h->l_stride);
126  h->left_border_y[i * 2 + 2] = *(h->cy + 15 + (i * 2 + 1) * h->l_stride);
127  h->left_border_u[i + 1] = *(h->cu + 7 + i * h->c_stride);
128  h->left_border_v[i + 1] = *(h->cv + 7 + i * h->c_stride);
129  }
130  if (!h->loop_filter_disable) {
131  /* determine bs */
132  if (mb_type == I_8X8)
133  memset(bs, 2, 8);
134  else {
135  memset(bs, 0, 8);
136  if (ff_cavs_partition_flags[mb_type] & SPLITV) {
137  bs[2] = get_bs(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X1], mb_type > P_8X8);
138  bs[3] = get_bs(&h->mv[MV_FWD_X2], &h->mv[MV_FWD_X3], mb_type > P_8X8);
139  }
140  if (ff_cavs_partition_flags[mb_type] & SPLITH) {
141  bs[6] = get_bs(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X2], mb_type > P_8X8);
142  bs[7] = get_bs(&h->mv[MV_FWD_X1], &h->mv[MV_FWD_X3], mb_type > P_8X8);
143  }
144  bs[0] = get_bs(&h->mv[MV_FWD_A1], &h->mv[MV_FWD_X0], mb_type > P_8X8);
145  bs[1] = get_bs(&h->mv[MV_FWD_A3], &h->mv[MV_FWD_X2], mb_type > P_8X8);
146  bs[4] = get_bs(&h->mv[MV_FWD_B2], &h->mv[MV_FWD_X0], mb_type > P_8X8);
147  bs[5] = get_bs(&h->mv[MV_FWD_B3], &h->mv[MV_FWD_X1], mb_type > P_8X8);
148  }
149  if (AV_RN64(bs)) {
150  if (h->flags & A_AVAIL) {
151  qp_avg = (h->qp + h->left_qp + 1) >> 1;
152  SET_PARAMS;
153  h->cdsp.cavs_filter_lv(h->cy, h->l_stride, alpha, beta, tc, bs[0], bs[1]);
154  qp_avg = (ff_cavs_chroma_qp[h->qp] + ff_cavs_chroma_qp[h->left_qp] + 1) >> 1;
155  SET_PARAMS;
156  h->cdsp.cavs_filter_cv(h->cu, h->c_stride, alpha, beta, tc, bs[0], bs[1]);
157  h->cdsp.cavs_filter_cv(h->cv, h->c_stride, alpha, beta, tc, bs[0], bs[1]);
158  }
159  qp_avg = h->qp;
160  SET_PARAMS;
161  h->cdsp.cavs_filter_lv(h->cy + 8, h->l_stride, alpha, beta, tc, bs[2], bs[3]);
162  h->cdsp.cavs_filter_lh(h->cy + 8 * h->l_stride, h->l_stride, alpha, beta, tc, bs[6], bs[7]);
163 
164  if (h->flags & B_AVAIL) {
165  qp_avg = (h->qp + h->top_qp[h->mbx] + 1) >> 1;
166  SET_PARAMS;
167  h->cdsp.cavs_filter_lh(h->cy, h->l_stride, alpha, beta, tc, bs[4], bs[5]);
168  qp_avg = (ff_cavs_chroma_qp[h->qp] + ff_cavs_chroma_qp[h->top_qp[h->mbx]] + 1) >> 1;
169  SET_PARAMS;
170  h->cdsp.cavs_filter_ch(h->cu, h->c_stride, alpha, beta, tc, bs[4], bs[5]);
171  h->cdsp.cavs_filter_ch(h->cv, h->c_stride, alpha, beta, tc, bs[4], bs[5]);
172  }
173  }
174  }
175  h->left_qp = h->qp;
176  h->top_qp[h->mbx] = h->qp;
177 }
178 
179 #undef SET_PARAMS
180 
181 /*****************************************************************************
182  *
183  * spatial intra prediction
184  *
185  ****************************************************************************/
186 
188  uint8_t **left, int block)
189 {
190  int i;
191 
192  switch (block) {
193  case 0:
194  *left = h->left_border_y;
195  h->left_border_y[0] = h->left_border_y[1];
196  memset(&h->left_border_y[17], h->left_border_y[16], 9);
197  memcpy(&top[1], &h->top_border_y[h->mbx * 16], 16);
198  top[17] = top[16];
199  top[0] = top[1];
200  if ((h->flags & A_AVAIL) && (h->flags & B_AVAIL))
201  h->left_border_y[0] = top[0] = h->topleft_border_y;
202  break;
203  case 1:
204  *left = h->intern_border_y;
205  for (i = 0; i < 8; i++)
206  h->intern_border_y[i + 1] = *(h->cy + 7 + i * h->l_stride);
207  memset(&h->intern_border_y[9], h->intern_border_y[8], 9);
208  h->intern_border_y[0] = h->intern_border_y[1];
209  memcpy(&top[1], &h->top_border_y[h->mbx * 16 + 8], 8);
210  if (h->flags & C_AVAIL)
211  memcpy(&top[9], &h->top_border_y[(h->mbx + 1) * 16], 8);
212  else
213  memset(&top[9], top[8], 9);
214  top[17] = top[16];
215  top[0] = top[1];
216  if (h->flags & B_AVAIL)
217  h->intern_border_y[0] = top[0] = h->top_border_y[h->mbx * 16 + 7];
218  break;
219  case 2:
220  *left = &h->left_border_y[8];
221  memcpy(&top[1], h->cy + 7 * h->l_stride, 16);
222  top[17] = top[16];
223  top[0] = top[1];
224  if (h->flags & A_AVAIL)
225  top[0] = h->left_border_y[8];
226  break;
227  case 3:
228  *left = &h->intern_border_y[8];
229  for (i = 0; i < 8; i++)
230  h->intern_border_y[i + 9] = *(h->cy + 7 + (i + 8) * h->l_stride);
231  memset(&h->intern_border_y[17], h->intern_border_y[16], 9);
232  memcpy(&top[0], h->cy + 7 + 7 * h->l_stride, 9);
233  memset(&top[9], top[8], 9);
234  break;
235  }
236 }
237 
239 {
240  /* extend borders by one pixel */
241  h->left_border_u[9] = h->left_border_u[8];
242  h->left_border_v[9] = h->left_border_v[8];
243  if(h->flags & C_AVAIL) {
244  h->top_border_u[h->mbx*10 + 9] = h->top_border_u[h->mbx*10 + 11];
245  h->top_border_v[h->mbx*10 + 9] = h->top_border_v[h->mbx*10 + 11];
246  } else {
247  h->top_border_u[h->mbx * 10 + 9] = h->top_border_u[h->mbx * 10 + 8];
248  h->top_border_v[h->mbx * 10 + 9] = h->top_border_v[h->mbx * 10 + 8];
249  }
250  if((h->flags & A_AVAIL) && (h->flags & B_AVAIL)) {
251  h->top_border_u[h->mbx * 10] = h->left_border_u[0] = h->topleft_border_u;
252  h->top_border_v[h->mbx * 10] = h->left_border_v[0] = h->topleft_border_v;
253  } else {
254  h->left_border_u[0] = h->left_border_u[1];
255  h->left_border_v[0] = h->left_border_v[1];
256  h->top_border_u[h->mbx * 10] = h->top_border_u[h->mbx * 10 + 1];
257  h->top_border_v[h->mbx * 10] = h->top_border_v[h->mbx * 10 + 1];
258  }
259 }
260 
261 static void intra_pred_vert(uint8_t *d, uint8_t *top, uint8_t *left, ptrdiff_t stride)
262 {
263  int y;
264  uint64_t a = AV_RN64(&top[1]);
265  for (y = 0; y < 8; y++)
266  *((uint64_t *)(d + y * stride)) = a;
267 }
268 
269 static void intra_pred_horiz(uint8_t *d, uint8_t *top, uint8_t *left, ptrdiff_t stride)
270 {
271  int y;
272  uint64_t a;
273  for (y = 0; y < 8; y++) {
274  a = left[y + 1] * 0x0101010101010101ULL;
275  *((uint64_t *)(d + y * stride)) = a;
276  }
277 }
278 
279 static void intra_pred_dc_128(uint8_t *d, uint8_t *top, uint8_t *left, ptrdiff_t stride)
280 {
281  int y;
282  uint64_t a = 0x8080808080808080ULL;
283  for (y = 0; y < 8; y++)
284  *((uint64_t *)(d + y * stride)) = a;
285 }
286 
287 static void intra_pred_plane(uint8_t *d, uint8_t *top, uint8_t *left, ptrdiff_t stride)
288 {
289  int x, y, ia;
290  int ih = 0;
291  int iv = 0;
292  const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
293 
294  for (x = 0; x < 4; x++) {
295  ih += (x + 1) * (top[5 + x] - top[3 - x]);
296  iv += (x + 1) * (left[5 + x] - left[3 - x]);
297  }
298  ia = (top[8] + left[8]) << 4;
299  ih = (17 * ih + 16) >> 5;
300  iv = (17 * iv + 16) >> 5;
301  for (y = 0; y < 8; y++)
302  for (x = 0; x < 8; x++)
303  d[y * stride + x] = cm[(ia + (x - 3) * ih + (y - 3) * iv + 16) >> 5];
304 }
305 
306 #define LOWPASS(ARRAY, INDEX) \
307  ((ARRAY[(INDEX) - 1] + 2 * ARRAY[(INDEX)] + ARRAY[(INDEX) + 1] + 2) >> 2)
308 
309 static void intra_pred_lp(uint8_t *d, uint8_t *top, uint8_t *left, ptrdiff_t stride)
310 {
311  int x, y;
312  for (y = 0; y < 8; y++)
313  for (x = 0; x < 8; x++)
314  d[y * stride + x] = (LOWPASS(top, x + 1) + LOWPASS(left, y + 1)) >> 1;
315 }
316 
317 static void intra_pred_down_left(uint8_t *d, uint8_t *top, uint8_t *left, ptrdiff_t stride)
318 {
319  int x, y;
320  for (y = 0; y < 8; y++)
321  for (x = 0; x < 8; x++)
322  d[y * stride + x] = (LOWPASS(top, x + y + 2) + LOWPASS(left, x + y + 2)) >> 1;
323 }
324 
325 static void intra_pred_down_right(uint8_t *d, uint8_t *top, uint8_t *left, ptrdiff_t stride)
326 {
327  int x, y;
328  for (y = 0; y < 8; y++)
329  for (x = 0; x < 8; x++)
330  if (x == y)
331  d[y * stride + x] = (left[1] + 2 * top[0] + top[1] + 2) >> 2;
332  else if (x > y)
333  d[y * stride + x] = LOWPASS(top, x - y);
334  else
335  d[y * stride + x] = LOWPASS(left, y - x);
336 }
337 
338 static void intra_pred_lp_left(uint8_t *d, uint8_t *top, uint8_t *left, ptrdiff_t stride)
339 {
340  int x, y;
341  for (y = 0; y < 8; y++)
342  for (x = 0; x < 8; x++)
343  d[y * stride + x] = LOWPASS(left, y + 1);
344 }
345 
346 static void intra_pred_lp_top(uint8_t *d, uint8_t *top, uint8_t *left, ptrdiff_t stride)
347 {
348  int x, y;
349  for (y = 0; y < 8; y++)
350  for (x = 0; x < 8; x++)
351  d[y * stride + x] = LOWPASS(top, x + 1);
352 }
353 
354 #undef LOWPASS
355 
356 static inline void modify_pred(const int8_t *mod_table, int *mode)
357 {
358  *mode = mod_table[*mode];
359  if (*mode < 0) {
360  av_log(NULL, AV_LOG_ERROR, "Illegal intra prediction mode\n");
361  *mode = 0;
362  }
363 }
364 
365 void ff_cavs_modify_mb_i(AVSContext *h, int *pred_mode_uv)
366 {
367  /* save pred modes before they get modified */
368  h->pred_mode_Y[3] = h->pred_mode_Y[5];
369  h->pred_mode_Y[6] = h->pred_mode_Y[8];
370  h->top_pred_Y[h->mbx * 2 + 0] = h->pred_mode_Y[7];
371  h->top_pred_Y[h->mbx * 2 + 1] = h->pred_mode_Y[8];
372 
373  /* modify pred modes according to availability of neighbour samples */
374  if (!(h->flags & A_AVAIL)) {
375  modify_pred(left_modifier_l, &h->pred_mode_Y[4]);
376  modify_pred(left_modifier_l, &h->pred_mode_Y[7]);
377  modify_pred(left_modifier_c, pred_mode_uv);
378  }
379  if (!(h->flags & B_AVAIL)) {
380  modify_pred(top_modifier_l, &h->pred_mode_Y[4]);
381  modify_pred(top_modifier_l, &h->pred_mode_Y[5]);
382  modify_pred(top_modifier_c, pred_mode_uv);
383  }
384 }
385 
386 /*****************************************************************************
387  *
388  * motion compensation
389  *
390  ****************************************************************************/
391 
392 static inline void mc_dir_part(AVSContext *h, AVFrame *pic, int chroma_height,
393  int delta, int list, uint8_t *dest_y,
394  uint8_t *dest_cb, uint8_t *dest_cr,
395  int src_x_offset, int src_y_offset,
396  qpel_mc_func *qpix_op,
397  h264_chroma_mc_func chroma_op, cavs_vector *mv)
398 {
399  const int mx = mv->x + src_x_offset * 8;
400  const int my = mv->y + src_y_offset * 8;
401  const int luma_xy = (mx & 3) + ((my & 3) << 2);
402  uint8_t *src_y = pic->data[0] + (mx >> 2) + (my >> 2) * h->l_stride;
403  uint8_t *src_cb = pic->data[1] + (mx >> 3) + (my >> 3) * h->c_stride;
404  uint8_t *src_cr = pic->data[2] + (mx >> 3) + (my >> 3) * h->c_stride;
405  int extra_width = 0;
406  int extra_height = extra_width;
407  const int full_mx = mx >> 2;
408  const int full_my = my >> 2;
409  const int pic_width = 16 * h->mb_width;
410  const int pic_height = 16 * h->mb_height;
411  int emu = 0;
412 
413  if (!pic->data[0])
414  return;
415  if (mx & 7)
416  extra_width -= 3;
417  if (my & 7)
418  extra_height -= 3;
419 
420  if (full_mx < 0 - extra_width ||
421  full_my < 0 - extra_height ||
422  full_mx + 16 /* FIXME */ > pic_width + extra_width ||
423  full_my + 16 /* FIXME */ > pic_height + extra_height) {
424  h->vdsp.emulated_edge_mc(h->edge_emu_buffer,
425  src_y - 2 - 2 * h->l_stride,
426  h->l_stride, h->l_stride,
427  16 + 5, 16 + 5 /* FIXME */,
428  full_mx - 2, full_my - 2,
429  pic_width, pic_height);
430  src_y = h->edge_emu_buffer + 2 + 2 * h->l_stride;
431  emu = 1;
432  }
433 
434  // FIXME try variable height perhaps?
435  qpix_op[luma_xy](dest_y, src_y, h->l_stride);
436 
437  if (emu) {
438  h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src_cb,
439  h->c_stride, h->c_stride,
440  9, 9 /* FIXME */,
441  mx >> 3, my >> 3,
442  pic_width >> 1, pic_height >> 1);
443  src_cb = h->edge_emu_buffer;
444  }
445  chroma_op(dest_cb, src_cb, h->c_stride, chroma_height, mx & 7, my & 7);
446 
447  if (emu) {
448  h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src_cr,
449  h->c_stride, h->c_stride,
450  9, 9 /* FIXME */,
451  mx >> 3, my >> 3,
452  pic_width >> 1, pic_height >> 1);
453  src_cr = h->edge_emu_buffer;
454  }
455  chroma_op(dest_cr, src_cr, h->c_stride, chroma_height, mx & 7, my & 7);
456 }
457 
458 static inline void mc_part_std(AVSContext *h, int chroma_height, int delta,
459  uint8_t *dest_y,
460  uint8_t *dest_cb,
461  uint8_t *dest_cr,
462  int x_offset, int y_offset,
463  qpel_mc_func *qpix_put,
464  h264_chroma_mc_func chroma_put,
465  qpel_mc_func *qpix_avg,
466  h264_chroma_mc_func chroma_avg,
467  cavs_vector *mv)
468 {
469  qpel_mc_func *qpix_op = qpix_put;
470  h264_chroma_mc_func chroma_op = chroma_put;
471 
472  dest_y += x_offset * 2 + y_offset * h->l_stride * 2;
473  dest_cb += x_offset + y_offset * h->c_stride;
474  dest_cr += x_offset + y_offset * h->c_stride;
475  x_offset += 8 * h->mbx;
476  y_offset += 8 * h->mby;
477 
478  if (mv->ref >= 0) {
479  AVFrame *ref = h->DPB[mv->ref].f;
480  mc_dir_part(h, ref, chroma_height, delta, 0,
481  dest_y, dest_cb, dest_cr, x_offset, y_offset,
482  qpix_op, chroma_op, mv);
483 
484  qpix_op = qpix_avg;
485  chroma_op = chroma_avg;
486  }
487 
488  if ((mv + MV_BWD_OFFS)->ref >= 0) {
489  AVFrame *ref = h->DPB[0].f;
490  mc_dir_part(h, ref, chroma_height, delta, 1,
491  dest_y, dest_cb, dest_cr, x_offset, y_offset,
492  qpix_op, chroma_op, mv + MV_BWD_OFFS);
493  }
494 }
495 
496 void ff_cavs_inter(AVSContext *h, enum cavs_mb mb_type)
497 {
498  if (ff_cavs_partition_flags[mb_type] == 0) { // 16x16
499  mc_part_std(h, 8, 0, h->cy, h->cu, h->cv, 0, 0,
500  h->cdsp.put_cavs_qpel_pixels_tab[0],
501  h->h264chroma.put_h264_chroma_pixels_tab[0],
502  h->cdsp.avg_cavs_qpel_pixels_tab[0],
503  h->h264chroma.avg_h264_chroma_pixels_tab[0],
504  &h->mv[MV_FWD_X0]);
505  } else {
506  mc_part_std(h, 4, 0, h->cy, h->cu, h->cv, 0, 0,
507  h->cdsp.put_cavs_qpel_pixels_tab[1],
508  h->h264chroma.put_h264_chroma_pixels_tab[1],
509  h->cdsp.avg_cavs_qpel_pixels_tab[1],
510  h->h264chroma.avg_h264_chroma_pixels_tab[1],
511  &h->mv[MV_FWD_X0]);
512  mc_part_std(h, 4, 0, h->cy, h->cu, h->cv, 4, 0,
513  h->cdsp.put_cavs_qpel_pixels_tab[1],
514  h->h264chroma.put_h264_chroma_pixels_tab[1],
515  h->cdsp.avg_cavs_qpel_pixels_tab[1],
516  h->h264chroma.avg_h264_chroma_pixels_tab[1],
517  &h->mv[MV_FWD_X1]);
518  mc_part_std(h, 4, 0, h->cy, h->cu, h->cv, 0, 4,
519  h->cdsp.put_cavs_qpel_pixels_tab[1],
520  h->h264chroma.put_h264_chroma_pixels_tab[1],
521  h->cdsp.avg_cavs_qpel_pixels_tab[1],
522  h->h264chroma.avg_h264_chroma_pixels_tab[1],
523  &h->mv[MV_FWD_X2]);
524  mc_part_std(h, 4, 0, h->cy, h->cu, h->cv, 4, 4,
525  h->cdsp.put_cavs_qpel_pixels_tab[1],
526  h->h264chroma.put_h264_chroma_pixels_tab[1],
527  h->cdsp.avg_cavs_qpel_pixels_tab[1],
528  h->h264chroma.avg_h264_chroma_pixels_tab[1],
529  &h->mv[MV_FWD_X3]);
530  }
531 }
532 
533 /*****************************************************************************
534  *
535  * motion vector prediction
536  *
537  ****************************************************************************/
538 
539 static inline void scale_mv(AVSContext *h, int *d_x, int *d_y,
540  cavs_vector *src, int distp)
541 {
542  int64_t den = h->scale_den[FFMAX(src->ref, 0)];
543  *d_x = (src->x * distp * den + 256 + FF_SIGNBIT(src->x)) >> 9;
544  *d_y = (src->y * distp * den + 256 + FF_SIGNBIT(src->y)) >> 9;
545 }
546 
547 static inline void mv_pred_median(AVSContext *h,
548  cavs_vector *mvP,
549  cavs_vector *mvA,
550  cavs_vector *mvB,
551  cavs_vector *mvC)
552 {
553  int ax, ay, bx, by, cx, cy;
554  int len_ab, len_bc, len_ca, len_mid;
555 
556  /* scale candidates according to their temporal span */
557  scale_mv(h, &ax, &ay, mvA, mvP->dist);
558  scale_mv(h, &bx, &by, mvB, mvP->dist);
559  scale_mv(h, &cx, &cy, mvC, mvP->dist);
560  /* find the geometrical median of the three candidates */
561  len_ab = abs(ax - bx) + abs(ay - by);
562  len_bc = abs(bx - cx) + abs(by - cy);
563  len_ca = abs(cx - ax) + abs(cy - ay);
564  len_mid = mid_pred(len_ab, len_bc, len_ca);
565  if (len_mid == len_ab) {
566  mvP->x = cx;
567  mvP->y = cy;
568  } else if (len_mid == len_bc) {
569  mvP->x = ax;
570  mvP->y = ay;
571  } else {
572  mvP->x = bx;
573  mvP->y = by;
574  }
575 }
576 
578  enum cavs_mv_pred mode, enum cavs_block size, int ref)
579 {
580  cavs_vector *mvP = &h->mv[nP];
581  cavs_vector *mvA = &h->mv[nP-1];
582  cavs_vector *mvB = &h->mv[nP-4];
583  cavs_vector *mvC = &h->mv[nC];
584  const cavs_vector *mvP2 = NULL;
585 
586  mvP->ref = ref;
587  mvP->dist = h->dist[mvP->ref];
588  if (mvC->ref == NOT_AVAIL || (nP == MV_FWD_X3) || (nP == MV_BWD_X3 ))
589  mvC = &h->mv[nP - 5]; // set to top-left (mvD)
590  if (mode == MV_PRED_PSKIP &&
591  (mvA->ref == NOT_AVAIL ||
592  mvB->ref == NOT_AVAIL ||
593  (mvA->x | mvA->y | mvA->ref) == 0 ||
594  (mvB->x | mvB->y | mvB->ref) == 0)) {
595  mvP2 = &un_mv;
596  /* if there is only one suitable candidate, take it */
597  } else if (mvA->ref >= 0 && mvB->ref < 0 && mvC->ref < 0) {
598  mvP2 = mvA;
599  } else if (mvA->ref < 0 && mvB->ref >= 0 && mvC->ref < 0) {
600  mvP2 = mvB;
601  } else if (mvA->ref < 0 && mvB->ref < 0 && mvC->ref >= 0) {
602  mvP2 = mvC;
603  } else if (mode == MV_PRED_LEFT && mvA->ref == ref) {
604  mvP2 = mvA;
605  } else if (mode == MV_PRED_TOP && mvB->ref == ref) {
606  mvP2 = mvB;
607  } else if (mode == MV_PRED_TOPRIGHT && mvC->ref == ref) {
608  mvP2 = mvC;
609  }
610  if (mvP2) {
611  mvP->x = mvP2->x;
612  mvP->y = mvP2->y;
613  } else
614  mv_pred_median(h, mvP, mvA, mvB, mvC);
615 
616  if (mode < MV_PRED_PSKIP) {
617  int mx = get_se_golomb(&h->gb) + (unsigned)mvP->x;
618  int my = get_se_golomb(&h->gb) + (unsigned)mvP->y;
619 
620  if (mx != (int16_t)mx || my != (int16_t)my) {
621  av_log(h->avctx, AV_LOG_ERROR, "MV %d %d out of supported range\n", mx, my);
622  } else {
623  mvP->x = mx;
624  mvP->y = my;
625  }
626  }
627  set_mvs(mvP, size);
628 }
629 
630 /*****************************************************************************
631  *
632  * macroblock level
633  *
634  ****************************************************************************/
635 
636 /**
637  * initialise predictors for motion vectors and intra prediction
638  */
640 {
641  int i;
642 
643  /* copy predictors from top line (MB B and C) into cache */
644  for (i = 0; i < 3; i++) {
645  h->mv[MV_FWD_B2 + i] = h->top_mv[0][h->mbx * 2 + i];
646  h->mv[MV_BWD_B2 + i] = h->top_mv[1][h->mbx * 2 + i];
647  }
648  h->pred_mode_Y[1] = h->top_pred_Y[h->mbx * 2 + 0];
649  h->pred_mode_Y[2] = h->top_pred_Y[h->mbx * 2 + 1];
650  /* clear top predictors if MB B is not available */
651  if (!(h->flags & B_AVAIL)) {
652  h->mv[MV_FWD_B2] = un_mv;
653  h->mv[MV_FWD_B3] = un_mv;
654  h->mv[MV_BWD_B2] = un_mv;
655  h->mv[MV_BWD_B3] = un_mv;
656  h->pred_mode_Y[1] = h->pred_mode_Y[2] = NOT_AVAIL;
657  h->flags &= ~(C_AVAIL | D_AVAIL);
658  } else if (h->mbx) {
659  h->flags |= D_AVAIL;
660  }
661  if (h->mbx == h->mb_width - 1) // MB C not available
662  h->flags &= ~C_AVAIL;
663  /* clear top-right predictors if MB C is not available */
664  if (!(h->flags & C_AVAIL)) {
665  h->mv[MV_FWD_C2] = un_mv;
666  h->mv[MV_BWD_C2] = un_mv;
667  }
668  /* clear top-left predictors if MB D is not available */
669  if (!(h->flags & D_AVAIL)) {
670  h->mv[MV_FWD_D3] = un_mv;
671  h->mv[MV_BWD_D3] = un_mv;
672  }
673 }
674 
675 /**
676  * save predictors for later macroblocks and increase
677  * macroblock address
678  * @return 0 if end of frame is reached, 1 otherwise
679  */
681 {
682  int i;
683 
684  h->flags |= A_AVAIL;
685  h->cy += 16;
686  h->cu += 8;
687  h->cv += 8;
688  /* copy mvs as predictors to the left */
689  for (i = 0; i <= 20; i += 4)
690  h->mv[i] = h->mv[i + 2];
691  /* copy bottom mvs from cache to top line */
692  h->top_mv[0][h->mbx * 2 + 0] = h->mv[MV_FWD_X2];
693  h->top_mv[0][h->mbx * 2 + 1] = h->mv[MV_FWD_X3];
694  h->top_mv[1][h->mbx * 2 + 0] = h->mv[MV_BWD_X2];
695  h->top_mv[1][h->mbx * 2 + 1] = h->mv[MV_BWD_X3];
696  /* next MB address */
697  h->mbidx++;
698  h->mbx++;
699  if (h->mbx == h->mb_width) { // New mb line
700  h->flags = B_AVAIL | C_AVAIL;
701  /* clear left pred_modes */
702  h->pred_mode_Y[3] = h->pred_mode_Y[6] = NOT_AVAIL;
703  /* clear left mv predictors */
704  for (i = 0; i <= 20; i += 4)
705  h->mv[i] = un_mv;
706  h->mbx = 0;
707  h->mby++;
708  /* re-calculate sample pointers */
709  h->cy = h->cur.f->data[0] + h->mby * 16 * h->l_stride;
710  h->cu = h->cur.f->data[1] + h->mby * 8 * h->c_stride;
711  h->cv = h->cur.f->data[2] + h->mby * 8 * h->c_stride;
712  if (h->mby == h->mb_height) { // Frame end
713  return 0;
714  }
715  }
716  return 1;
717 }
718 
719 /*****************************************************************************
720  *
721  * frame level
722  *
723  ****************************************************************************/
724 
726 {
727  int i;
728 
729  /* clear some predictors */
730  for (i = 0; i <= 20; i += 4)
731  h->mv[i] = un_mv;
732  h->mv[MV_BWD_X0] = ff_cavs_dir_mv;
733  set_mvs(&h->mv[MV_BWD_X0], BLK_16X16);
734  h->mv[MV_FWD_X0] = ff_cavs_dir_mv;
735  set_mvs(&h->mv[MV_FWD_X0], BLK_16X16);
736  h->pred_mode_Y[3] = h->pred_mode_Y[6] = NOT_AVAIL;
737  h->cy = h->cur.f->data[0];
738  h->cu = h->cur.f->data[1];
739  h->cv = h->cur.f->data[2];
740  h->l_stride = h->cur.f->linesize[0];
741  h->c_stride = h->cur.f->linesize[1];
742  h->luma_scan[2] = 8 * h->l_stride;
743  h->luma_scan[3] = 8 * h->l_stride + 8;
744  h->mbx = h->mby = h->mbidx = 0;
745  h->flags = 0;
746 
747  return 0;
748 }
749 
750 /*****************************************************************************
751  *
752  * headers and interface
753  *
754  ****************************************************************************/
755 
756 /**
757  * some predictions require data from the top-neighbouring macroblock.
758  * this data has to be stored for one complete row of macroblocks
759  * and this storage space is allocated here
760  */
762 {
763  /* alloc top line of predictors */
764  h->top_qp = av_mallocz(h->mb_width);
765  h->top_mv[0] = av_mallocz_array(h->mb_width * 2 + 1, sizeof(cavs_vector));
766  h->top_mv[1] = av_mallocz_array(h->mb_width * 2 + 1, sizeof(cavs_vector));
767  h->top_pred_Y = av_mallocz_array(h->mb_width * 2, sizeof(*h->top_pred_Y));
768  h->top_border_y = av_mallocz_array(h->mb_width + 1, 16);
769  h->top_border_u = av_mallocz_array(h->mb_width, 10);
770  h->top_border_v = av_mallocz_array(h->mb_width, 10);
771 
772  /* alloc space for co-located MVs and types */
773  h->col_mv = av_mallocz_array(h->mb_width * h->mb_height,
774  4 * sizeof(cavs_vector));
775  h->col_type_base = av_mallocz(h->mb_width * h->mb_height);
776  h->block = av_mallocz(64 * sizeof(int16_t));
777 
778  if (!h->top_qp || !h->top_mv[0] || !h->top_mv[1] || !h->top_pred_Y ||
779  !h->top_border_y || !h->top_border_u || !h->top_border_v ||
780  !h->col_mv || !h->col_type_base || !h->block) {
781  av_freep(&h->top_qp);
782  av_freep(&h->top_mv[0]);
783  av_freep(&h->top_mv[1]);
784  av_freep(&h->top_pred_Y);
785  av_freep(&h->top_border_y);
786  av_freep(&h->top_border_u);
787  av_freep(&h->top_border_v);
788  av_freep(&h->col_mv);
789  av_freep(&h->col_type_base);
790  av_freep(&h->block);
791  return AVERROR(ENOMEM);
792  }
793  return 0;
794 }
795 
797 {
798  AVSContext *h = avctx->priv_data;
799 
800  ff_blockdsp_init(&h->bdsp, avctx);
801  ff_h264chroma_init(&h->h264chroma, 8);
802  ff_idctdsp_init(&h->idsp, avctx);
803  ff_videodsp_init(&h->vdsp, 8);
804  ff_cavsdsp_init(&h->cdsp, avctx);
805  ff_init_scantable_permutation(h->idsp.idct_permutation,
806  h->cdsp.idct_perm);
807  ff_init_scantable(h->idsp.idct_permutation, &h->scantable, ff_zigzag_direct);
808 
809  h->avctx = avctx;
810  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
811 
812  h->cur.f = av_frame_alloc();
813  h->DPB[0].f = av_frame_alloc();
814  h->DPB[1].f = av_frame_alloc();
815  if (!h->cur.f || !h->DPB[0].f || !h->DPB[1].f) {
816  ff_cavs_end(avctx);
817  return AVERROR(ENOMEM);
818  }
819 
820  h->luma_scan[0] = 0;
821  h->luma_scan[1] = 8;
822  h->intra_pred_l[INTRA_L_VERT] = intra_pred_vert;
823  h->intra_pred_l[INTRA_L_HORIZ] = intra_pred_horiz;
824  h->intra_pred_l[INTRA_L_LP] = intra_pred_lp;
825  h->intra_pred_l[INTRA_L_DOWN_LEFT] = intra_pred_down_left;
826  h->intra_pred_l[INTRA_L_DOWN_RIGHT] = intra_pred_down_right;
827  h->intra_pred_l[INTRA_L_LP_LEFT] = intra_pred_lp_left;
828  h->intra_pred_l[INTRA_L_LP_TOP] = intra_pred_lp_top;
829  h->intra_pred_l[INTRA_L_DC_128] = intra_pred_dc_128;
830  h->intra_pred_c[INTRA_C_LP] = intra_pred_lp;
831  h->intra_pred_c[INTRA_C_HORIZ] = intra_pred_horiz;
832  h->intra_pred_c[INTRA_C_VERT] = intra_pred_vert;
833  h->intra_pred_c[INTRA_C_PLANE] = intra_pred_plane;
834  h->intra_pred_c[INTRA_C_LP_LEFT] = intra_pred_lp_left;
835  h->intra_pred_c[INTRA_C_LP_TOP] = intra_pred_lp_top;
836  h->intra_pred_c[INTRA_C_DC_128] = intra_pred_dc_128;
837  h->mv[7] = un_mv;
838  h->mv[19] = un_mv;
839  return 0;
840 }
841 
843 {
844  AVSContext *h = avctx->priv_data;
845 
846  av_frame_free(&h->cur.f);
847  av_frame_free(&h->DPB[0].f);
848  av_frame_free(&h->DPB[1].f);
849 
850  av_freep(&h->top_qp);
851  av_freep(&h->top_mv[0]);
852  av_freep(&h->top_mv[1]);
853  av_freep(&h->top_pred_Y);
854  av_freep(&h->top_border_y);
855  av_freep(&h->top_border_u);
856  av_freep(&h->top_border_v);
857  av_freep(&h->col_mv);
858  av_freep(&h->col_type_base);
859  av_freep(&h->block);
860  av_freep(&h->edge_emu_buffer);
861  return 0;
862 }
BLK_16X16
@ BLK_16X16
Definition: cavs.h:116
left_modifier_l
static const int8_t left_modifier_l[8]
Definition: cavs.c:65
cavs_mb
cavs_mb
Definition: cavs.h:63
stride
int stride
Definition: mace.c:144
ff_init_scantable
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: idctdsp.c:29
MV_PRED_PSKIP
@ MV_PRED_PSKIP
Definition: cavs.h:111
ff_cavs_partition_flags
const uint8_t ff_cavs_partition_flags[30]
Definition: cavsdata.c:24
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
ff_cavs_chroma_qp
const uint8_t ff_cavs_chroma_qp[64]
Definition: cavsdata.c:57
MV_FWD_D3
@ MV_FWD_D3
Definition: cavs.h:123
MV_BWD_C2
@ MV_BWD_C2
Definition: cavs.h:136
mem_internal.h
MV_BWD_D3
@ MV_BWD_D3
Definition: cavs.h:133
MV_BWD_X0
@ MV_BWD_X0
Definition: cavs.h:138
mv
static const int8_t mv[256][2]
Definition: 4xm.c:78
INTRA_C_VERT
@ INTRA_C_VERT
Definition: cavs.h:99
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
MV_FWD_X0
@ MV_FWD_X0
Definition: cavs.h:128
un_mv
static const cavs_vector un_mv
mark block as unavailable, i.e.
Definition: cavs.c:63
AV_RN64
#define AV_RN64(p)
Definition: intreadwrite.h:368
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
ff_cavsdsp_init
av_cold void ff_cavsdsp_init(CAVSDSPContext *c, AVCodecContext *avctx)
Definition: cavsdsp.c:551
internal.h
b
#define b
Definition: input.c:41
intra_pred_down_right
static void intra_pred_down_right(uint8_t *d, uint8_t *top, uint8_t *left, ptrdiff_t stride)
Definition: cavs.c:325
LOWPASS
#define LOWPASS(ARRAY, INDEX)
Definition: cavs.c:306
ff_cavs_init
av_cold int ff_cavs_init(AVCodecContext *avctx)
Definition: cavs.c:796
av_mallocz_array
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190
ff_cavs_dir_mv
const cavs_vector ff_cavs_dir_mv
mark block as "no prediction from this direction" e.g.
Definition: cavsdata.c:66
cavs_block
cavs_block
Definition: cavs.h:115
scale_mv
static void scale_mv(AVSContext *h, int *d_x, int *d_y, cavs_vector *src, int distp)
Definition: cavs.c:539
INTRA_C_HORIZ
@ INTRA_C_HORIZ
Definition: cavs.h:98
ff_cavs_modify_mb_i
void ff_cavs_modify_mb_i(AVSContext *h, int *pred_mode_uv)
Definition: cavs.c:365
ff_cavs_load_intra_pred_chroma
void ff_cavs_load_intra_pred_chroma(AVSContext *h)
Definition: cavs.c:238
ff_crop_tab
#define ff_crop_tab
Definition: motionpixels_tablegen.c:26
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:332
MV_BWD_X3
@ MV_BWD_X3
Definition: cavs.h:142
ff_cavs_init_pic
int ff_cavs_init_pic(AVSContext *h)
Definition: cavs.c:725
golomb.h
exp golomb vlc stuff
set_mvs
static void set_mvs(cavs_vector *mv, enum cavs_block size)
Definition: cavs.h:252
MV_FWD_A3
@ MV_FWD_A3
Definition: cavs.h:130
INTRA_C_PLANE
@ INTRA_C_PLANE
Definition: cavs.h:100
cavs_vector::x
int16_t x
Definition: cavs.h:146
modify_pred
static void modify_pred(const int8_t *mod_table, int *mode)
Definition: cavs.c:356
h264_chroma_mc_func
void(* h264_chroma_mc_func)(uint8_t *dst, uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: h264chroma.h:25
tc_tab
static const uint8_t tc_tab[64]
Definition: cavs.c:54
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
SPLITV
#define SPLITV
Definition: cavs.h:58
cavs_vector::dist
int16_t dist
Definition: cavs.h:148
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
B_AVAIL
#define B_AVAIL
Definition: cavs.h:42
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
av_cold
#define av_cold
Definition: attributes.h:90
I_8X8
@ I_8X8
Definition: cavs.h:64
MV_BWD_X2
@ MV_BWD_X2
Definition: cavs.h:141
ff_cavs_init_top_lines
int ff_cavs_init_top_lines(AVSContext *h)
some predictions require data from the top-neighbouring macroblock.
Definition: cavs.c:761
left_modifier_c
static const int8_t left_modifier_c[7]
Definition: cavs.c:67
INTRA_L_DOWN_RIGHT
@ INTRA_L_DOWN_RIGHT
Definition: cavs.h:90
mc_dir_part
static void mc_dir_part(AVSContext *h, AVFrame *pic, int chroma_height, int delta, int list, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int src_x_offset, int src_y_offset, qpel_mc_func *qpix_op, h264_chroma_mc_func chroma_op, cavs_vector *mv)
Definition: cavs.c:392
get_bits.h
MV_PRED_TOPRIGHT
@ MV_PRED_TOPRIGHT
Definition: cavs.h:110
MV_FWD_B3
@ MV_FWD_B3
Definition: cavs.h:125
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
MV_BWD_OFFS
#define MV_BWD_OFFS
Definition: cavs.h:60
get_se_golomb
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:241
MV_BWD_B2
@ MV_BWD_B2
Definition: cavs.h:134
if
if(ret)
Definition: filter_design.txt:179
INTRA_L_LP
@ INTRA_L_LP
Definition: cavs.h:88
NULL
#define NULL
Definition: coverity.c:32
top_modifier_l
static const int8_t top_modifier_l[8]
Definition: cavs.c:66
INTRA_C_DC_128
@ INTRA_C_DC_128
Definition: cavs.h:103
INTRA_L_DC_128
@ INTRA_L_DC_128
Definition: cavs.h:93
src
#define src
Definition: vp8dsp.c:255
mathops.h
list
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
Definition: filter_design.txt:25
A_AVAIL
#define A_AVAIL
Definition: cavs.h:41
qpeldsp.h
abs
#define abs(x)
Definition: cuda_runtime.h:35
cavs_mv_loc
cavs_mv_loc
Definition: cavs.h:122
INTRA_C_LP
@ INTRA_C_LP
Definition: cavs.h:97
INTRA_L_VERT
@ INTRA_L_VERT
Definition: cavs.h:86
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
ff_cavs_mv
void ff_cavs_mv(AVSContext *h, enum cavs_mv_loc nP, enum cavs_mv_loc nC, enum cavs_mv_pred mode, enum cavs_block size, int ref)
Definition: cavs.c:577
D_AVAIL
#define D_AVAIL
Definition: cavs.h:44
FF_SIGNBIT
#define FF_SIGNBIT(x)
Definition: internal.h:109
intra_pred_down_left
static void intra_pred_down_left(uint8_t *d, uint8_t *top, uint8_t *left, ptrdiff_t stride)
Definition: cavs.c:317
FFMAX
#define FFMAX(a, b)
Definition: common.h:103
SET_PARAMS
#define SET_PARAMS
Definition: cavs.c:95
MV_FWD_A1
@ MV_FWD_A1
Definition: cavs.h:127
INTRA_L_HORIZ
@ INTRA_L_HORIZ
Definition: cavs.h:87
size
int size
Definition: twinvq_data.h:10344
h264chroma.h
intra_pred_dc_128
static void intra_pred_dc_128(uint8_t *d, uint8_t *top, uint8_t *left, ptrdiff_t stride)
Definition: cavs.c:279
ff_cavs_end
av_cold int ff_cavs_end(AVCodecContext *avctx)
Definition: cavs.c:842
MV_FWD_B2
@ MV_FWD_B2
Definition: cavs.h:124
REF_INTRA
#define REF_INTRA
Definition: cavs.h:46
C_AVAIL
#define C_AVAIL
Definition: cavs.h:43
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
P_8X8
@ P_8X8
Definition: cavs.h:69
intra_pred_lp_left
static void intra_pred_lp_left(uint8_t *d, uint8_t *top, uint8_t *left, ptrdiff_t stride)
Definition: cavs.c:338
ff_init_scantable_permutation
av_cold void ff_init_scantable_permutation(uint8_t *idct_permutation, enum idct_permutation_type perm_type)
Definition: idctdsp.c:50
i
int i
Definition: input.c:407
MV_FWD_X3
@ MV_FWD_X3
Definition: cavs.h:132
ff_cavs_load_intra_pred_luma
void ff_cavs_load_intra_pred_luma(AVSContext *h, uint8_t *top, uint8_t **left, int block)
Definition: cavs.c:187
ff_h264chroma_init
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:41
delta
float delta
Definition: vorbis_enc_data.h:457
get_bs
static int get_bs(cavs_vector *mvP, cavs_vector *mvQ, int b)
Definition: cavs.c:76
MV_PRED_TOP
@ MV_PRED_TOP
Definition: cavs.h:109
cavs_vector::y
int16_t y
Definition: cavs.h:147
uint8_t
uint8_t
Definition: audio_convert.c:194
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:238
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
beta_tab
static const uint8_t beta_tab[64]
Definition: cavs.c:47
MV_FWD_X2
@ MV_FWD_X2
Definition: cavs.h:131
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:746
mc_part_std
static void mc_part_std(AVSContext *h, int chroma_height, int delta, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int x_offset, int y_offset, qpel_mc_func *qpix_put, h264_chroma_mc_func chroma_put, qpel_mc_func *qpix_avg, h264_chroma_mc_func chroma_avg, cavs_vector *mv)
Definition: cavs.c:458
MV_FWD_C2
@ MV_FWD_C2
Definition: cavs.h:126
INTRA_L_DOWN_LEFT
@ INTRA_L_DOWN_LEFT
Definition: cavs.h:89
idctdsp.h
avcodec.h
ff_cavs_init_mb
void ff_cavs_init_mb(AVSContext *h)
initialise predictors for motion vectors and intra prediction
Definition: cavs.c:639
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
mv_pred_median
static void mv_pred_median(AVSContext *h, cavs_vector *mvP, cavs_vector *mvA, cavs_vector *mvB, cavs_vector *mvC)
Definition: cavs.c:547
mid_pred
#define mid_pred
Definition: mathops.h:97
cavs_mv_pred
cavs_mv_pred
Definition: cavs.h:106
INTRA_L_LP_LEFT
@ INTRA_L_LP_LEFT
Definition: cavs.h:91
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
ff_cavs_inter
void ff_cavs_inter(AVSContext *h, enum cavs_mb mb_type)
Definition: cavs.c:496
INTRA_C_LP_TOP
@ INTRA_C_LP_TOP
Definition: cavs.h:102
AVCodecContext
main external API structure.
Definition: avcodec.h:536
mode
mode
Definition: ebur128.h:83
cm
#define cm
Definition: dvbsubdec.c:37
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
intra_pred_vert
static void intra_pred_vert(uint8_t *d, uint8_t *top, uint8_t *left, ptrdiff_t stride)
Definition: cavs.c:261
AVSContext
Definition: cavs.h:165
cavs_vector
Definition: cavs.h:145
tc
#define tc
Definition: regdef.h:69
cavs_vector::ref
int16_t ref
Definition: cavs.h:149
ff_cavs_next_mb
int ff_cavs_next_mb(AVSContext *h)
save predictors for later macroblocks and increase macroblock address
Definition: cavs.c:680
MV_BWD_B3
@ MV_BWD_B3
Definition: cavs.h:135
alpha
static const int16_t alpha[]
Definition: ilbcdata.h:55
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:563
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
MV_PRED_LEFT
@ MV_PRED_LEFT
Definition: cavs.h:108
alpha_tab
static const uint8_t alpha_tab[64]
Definition: cavs.c:40
intra_pred_lp
static void intra_pred_lp(uint8_t *d, uint8_t *top, uint8_t *left, ptrdiff_t stride)
Definition: cavs.c:309
cavs.h
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
ff_cavs_filter
void ff_cavs_filter(AVSContext *h, enum cavs_mb mb_type)
in-loop deblocking filter for a single macroblock
Definition: cavs.c:111
MV_FWD_X1
@ MV_FWD_X1
Definition: cavs.h:129
intra_pred_horiz
static void intra_pred_horiz(uint8_t *d, uint8_t *top, uint8_t *left, ptrdiff_t stride)
Definition: cavs.c:269
h
h
Definition: vp9dsp_template.c:2038
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
Definition: blockdsp.c:60
MAX_NEG_CROP
#define MAX_NEG_CROP
Definition: mathops.h:31
INTRA_C_LP_LEFT
@ INTRA_C_LP_LEFT
Definition: cavs.h:101
intra_pred_plane
static void intra_pred_plane(uint8_t *d, uint8_t *top, uint8_t *left, ptrdiff_t stride)
Definition: cavs.c:287
intra_pred_lp_top
static void intra_pred_lp_top(uint8_t *d, uint8_t *top, uint8_t *left, ptrdiff_t stride)
Definition: cavs.c:346
top_modifier_c
static const int8_t top_modifier_c[7]
Definition: cavs.c:68
NOT_AVAIL
#define NOT_AVAIL
Definition: cavs.h:45
INTRA_L_LP_TOP
@ INTRA_L_LP_TOP
Definition: cavs.h:92
SPLITH
#define SPLITH
Definition: cavs.h:57