FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
error_resilience.c
Go to the documentation of this file.
1 /*
2  * Error resilience / concealment
3  *
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * Error resilience / concealment.
26  */
27 
28 #include <limits.h>
29 
30 #include "libavutil/atomic.h"
31 #include "libavutil/internal.h"
32 #include "avcodec.h"
33 #include "error_resilience.h"
34 #include "me_cmp.h"
35 #include "mpegutils.h"
36 #include "mpegvideo.h"
37 #include "rectangle.h"
38 #include "thread.h"
39 #include "version.h"
40 
41 /**
42  * @param stride the number of MVs to get to the next row
43  * @param mv_step the number of MVs per row or column in a macroblock
44  */
45 static void set_mv_strides(ERContext *s, ptrdiff_t *mv_step, ptrdiff_t *stride)
46 {
47  if (s->avctx->codec_id == AV_CODEC_ID_H264) {
49  *mv_step = 4;
50  *stride = s->mb_width * 4;
51  } else {
52  *mv_step = 2;
53  *stride = s->b8_stride;
54  }
55 }
56 
57 /**
58  * Replace the current MB with a flat dc-only version.
59  */
60 static void put_dc(ERContext *s, uint8_t *dest_y, uint8_t *dest_cb,
61  uint8_t *dest_cr, int mb_x, int mb_y)
62 {
63  int *linesize = s->cur_pic.f->linesize;
64  int dc, dcu, dcv, y, i;
65  for (i = 0; i < 4; i++) {
66  dc = s->dc_val[0][mb_x * 2 + (i & 1) + (mb_y * 2 + (i >> 1)) * s->b8_stride];
67  if (dc < 0)
68  dc = 0;
69  else if (dc > 2040)
70  dc = 2040;
71  for (y = 0; y < 8; y++) {
72  int x;
73  for (x = 0; x < 8; x++)
74  dest_y[x + (i & 1) * 8 + (y + (i >> 1) * 8) * linesize[0]] = dc / 8;
75  }
76  }
77  dcu = s->dc_val[1][mb_x + mb_y * s->mb_stride];
78  dcv = s->dc_val[2][mb_x + mb_y * s->mb_stride];
79  if (dcu < 0)
80  dcu = 0;
81  else if (dcu > 2040)
82  dcu = 2040;
83  if (dcv < 0)
84  dcv = 0;
85  else if (dcv > 2040)
86  dcv = 2040;
87 
88  if (dest_cr)
89  for (y = 0; y < 8; y++) {
90  int x;
91  for (x = 0; x < 8; x++) {
92  dest_cb[x + y * linesize[1]] = dcu / 8;
93  dest_cr[x + y * linesize[2]] = dcv / 8;
94  }
95  }
96 }
97 
98 static void filter181(int16_t *data, int width, int height, ptrdiff_t stride)
99 {
100  int x, y;
101 
102  /* horizontal filter */
103  for (y = 1; y < height - 1; y++) {
104  int prev_dc = data[0 + y * stride];
105 
106  for (x = 1; x < width - 1; x++) {
107  int dc;
108  dc = -prev_dc +
109  data[x + y * stride] * 8 -
110  data[x + 1 + y * stride];
111  dc = (dc * 10923 + 32768) >> 16;
112  prev_dc = data[x + y * stride];
113  data[x + y * stride] = dc;
114  }
115  }
116 
117  /* vertical filter */
118  for (x = 1; x < width - 1; x++) {
119  int prev_dc = data[x];
120 
121  for (y = 1; y < height - 1; y++) {
122  int dc;
123 
124  dc = -prev_dc +
125  data[x + y * stride] * 8 -
126  data[x + (y + 1) * stride];
127  dc = (dc * 10923 + 32768) >> 16;
128  prev_dc = data[x + y * stride];
129  data[x + y * stride] = dc;
130  }
131  }
132 }
133 
134 /**
135  * guess the dc of blocks which do not have an undamaged dc
136  * @param w width in 8 pixel blocks
137  * @param h height in 8 pixel blocks
138  */
139 static void guess_dc(ERContext *s, int16_t *dc, int w,
140  int h, ptrdiff_t stride, int is_luma)
141 {
142  int b_x, b_y;
143  int16_t (*col )[4] = av_malloc_array(stride, h*sizeof( int16_t)*4);
144  uint32_t (*dist)[4] = av_malloc_array(stride, h*sizeof(uint32_t)*4);
145 
146  if(!col || !dist) {
147  av_log(s->avctx, AV_LOG_ERROR, "guess_dc() is out of memory\n");
148  goto fail;
149  }
150 
151  for(b_y=0; b_y<h; b_y++){
152  int color= 1024;
153  int distance= -1;
154  for(b_x=0; b_x<w; b_x++){
155  int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
156  int error_j= s->error_status_table[mb_index_j];
157  int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]);
158  if(intra_j==0 || !(error_j&ER_DC_ERROR)){
159  color= dc[b_x + b_y*stride];
160  distance= b_x;
161  }
162  col [b_x + b_y*stride][1]= color;
163  dist[b_x + b_y*stride][1]= distance >= 0 ? b_x-distance : 9999;
164  }
165  color= 1024;
166  distance= -1;
167  for(b_x=w-1; b_x>=0; b_x--){
168  int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
169  int error_j= s->error_status_table[mb_index_j];
170  int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]);
171  if(intra_j==0 || !(error_j&ER_DC_ERROR)){
172  color= dc[b_x + b_y*stride];
173  distance= b_x;
174  }
175  col [b_x + b_y*stride][0]= color;
176  dist[b_x + b_y*stride][0]= distance >= 0 ? distance-b_x : 9999;
177  }
178  }
179  for(b_x=0; b_x<w; b_x++){
180  int color= 1024;
181  int distance= -1;
182  for(b_y=0; b_y<h; b_y++){
183  int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
184  int error_j= s->error_status_table[mb_index_j];
185  int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]);
186  if(intra_j==0 || !(error_j&ER_DC_ERROR)){
187  color= dc[b_x + b_y*stride];
188  distance= b_y;
189  }
190  col [b_x + b_y*stride][3]= color;
191  dist[b_x + b_y*stride][3]= distance >= 0 ? b_y-distance : 9999;
192  }
193  color= 1024;
194  distance= -1;
195  for(b_y=h-1; b_y>=0; b_y--){
196  int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
197  int error_j= s->error_status_table[mb_index_j];
198  int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]);
199  if(intra_j==0 || !(error_j&ER_DC_ERROR)){
200  color= dc[b_x + b_y*stride];
201  distance= b_y;
202  }
203  col [b_x + b_y*stride][2]= color;
204  dist[b_x + b_y*stride][2]= distance >= 0 ? distance-b_y : 9999;
205  }
206  }
207 
208  for (b_y = 0; b_y < h; b_y++) {
209  for (b_x = 0; b_x < w; b_x++) {
210  int mb_index, error, j;
211  int64_t guess, weight_sum;
212  mb_index = (b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride;
213  error = s->error_status_table[mb_index];
214 
215  if (IS_INTER(s->cur_pic.mb_type[mb_index]))
216  continue; // inter
217  if (!(error & ER_DC_ERROR))
218  continue; // dc-ok
219 
220  weight_sum = 0;
221  guess = 0;
222  for (j = 0; j < 4; j++) {
223  int64_t weight = 256 * 256 * 256 * 16 / FFMAX(dist[b_x + b_y*stride][j], 1);
224  guess += weight*(int64_t)col[b_x + b_y*stride][j];
225  weight_sum += weight;
226  }
227  guess = (guess + weight_sum / 2) / weight_sum;
228  dc[b_x + b_y * stride] = guess;
229  }
230  }
231 
232 fail:
233  av_freep(&col);
234  av_freep(&dist);
235 }
236 
237 /**
238  * simple horizontal deblocking filter used for error resilience
239  * @param w width in 8 pixel blocks
240  * @param h height in 8 pixel blocks
241  */
242 static void h_block_filter(ERContext *s, uint8_t *dst, int w,
243  int h, ptrdiff_t stride, int is_luma)
244 {
245  int b_x, b_y;
246  ptrdiff_t mvx_stride, mvy_stride;
247  const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
248  set_mv_strides(s, &mvx_stride, &mvy_stride);
249  mvx_stride >>= is_luma;
250  mvy_stride *= mvx_stride;
251 
252  for (b_y = 0; b_y < h; b_y++) {
253  for (b_x = 0; b_x < w - 1; b_x++) {
254  int y;
255  int left_status = s->error_status_table[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride];
256  int right_status = s->error_status_table[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride];
257  int left_intra = IS_INTRA(s->cur_pic.mb_type[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
258  int right_intra = IS_INTRA(s->cur_pic.mb_type[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
259  int left_damage = left_status & ER_MB_ERROR;
260  int right_damage = right_status & ER_MB_ERROR;
261  int offset = b_x * 8 + b_y * stride * 8;
262  int16_t *left_mv = s->cur_pic.motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
263  int16_t *right_mv = s->cur_pic.motion_val[0][mvy_stride * b_y + mvx_stride * (b_x + 1)];
264  if (!(left_damage || right_damage))
265  continue; // both undamaged
266  if ((!left_intra) && (!right_intra) &&
267  FFABS(left_mv[0] - right_mv[0]) +
268  FFABS(left_mv[1] + right_mv[1]) < 2)
269  continue;
270 
271  for (y = 0; y < 8; y++) {
272  int a, b, c, d;
273 
274  a = dst[offset + 7 + y * stride] - dst[offset + 6 + y * stride];
275  b = dst[offset + 8 + y * stride] - dst[offset + 7 + y * stride];
276  c = dst[offset + 9 + y * stride] - dst[offset + 8 + y * stride];
277 
278  d = FFABS(b) - ((FFABS(a) + FFABS(c) + 1) >> 1);
279  d = FFMAX(d, 0);
280  if (b < 0)
281  d = -d;
282 
283  if (d == 0)
284  continue;
285 
286  if (!(left_damage && right_damage))
287  d = d * 16 / 9;
288 
289  if (left_damage) {
290  dst[offset + 7 + y * stride] = cm[dst[offset + 7 + y * stride] + ((d * 7) >> 4)];
291  dst[offset + 6 + y * stride] = cm[dst[offset + 6 + y * stride] + ((d * 5) >> 4)];
292  dst[offset + 5 + y * stride] = cm[dst[offset + 5 + y * stride] + ((d * 3) >> 4)];
293  dst[offset + 4 + y * stride] = cm[dst[offset + 4 + y * stride] + ((d * 1) >> 4)];
294  }
295  if (right_damage) {
296  dst[offset + 8 + y * stride] = cm[dst[offset + 8 + y * stride] - ((d * 7) >> 4)];
297  dst[offset + 9 + y * stride] = cm[dst[offset + 9 + y * stride] - ((d * 5) >> 4)];
298  dst[offset + 10+ y * stride] = cm[dst[offset + 10 + y * stride] - ((d * 3) >> 4)];
299  dst[offset + 11+ y * stride] = cm[dst[offset + 11 + y * stride] - ((d * 1) >> 4)];
300  }
301  }
302  }
303  }
304 }
305 
306 /**
307  * simple vertical deblocking filter used for error resilience
308  * @param w width in 8 pixel blocks
309  * @param h height in 8 pixel blocks
310  */
311 static void v_block_filter(ERContext *s, uint8_t *dst, int w, int h,
312  ptrdiff_t stride, int is_luma)
313 {
314  int b_x, b_y;
315  ptrdiff_t mvx_stride, mvy_stride;
316  const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
317  set_mv_strides(s, &mvx_stride, &mvy_stride);
318  mvx_stride >>= is_luma;
319  mvy_stride *= mvx_stride;
320 
321  for (b_y = 0; b_y < h - 1; b_y++) {
322  for (b_x = 0; b_x < w; b_x++) {
323  int x;
324  int top_status = s->error_status_table[(b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride];
325  int bottom_status = s->error_status_table[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride];
326  int top_intra = IS_INTRA(s->cur_pic.mb_type[(b_x >> is_luma) + ( b_y >> is_luma) * s->mb_stride]);
327  int bottom_intra = IS_INTRA(s->cur_pic.mb_type[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]);
328  int top_damage = top_status & ER_MB_ERROR;
329  int bottom_damage = bottom_status & ER_MB_ERROR;
330  int offset = b_x * 8 + b_y * stride * 8;
331 
332  int16_t *top_mv = s->cur_pic.motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
333  int16_t *bottom_mv = s->cur_pic.motion_val[0][mvy_stride * (b_y + 1) + mvx_stride * b_x];
334 
335  if (!(top_damage || bottom_damage))
336  continue; // both undamaged
337 
338  if ((!top_intra) && (!bottom_intra) &&
339  FFABS(top_mv[0] - bottom_mv[0]) +
340  FFABS(top_mv[1] + bottom_mv[1]) < 2)
341  continue;
342 
343  for (x = 0; x < 8; x++) {
344  int a, b, c, d;
345 
346  a = dst[offset + x + 7 * stride] - dst[offset + x + 6 * stride];
347  b = dst[offset + x + 8 * stride] - dst[offset + x + 7 * stride];
348  c = dst[offset + x + 9 * stride] - dst[offset + x + 8 * stride];
349 
350  d = FFABS(b) - ((FFABS(a) + FFABS(c) + 1) >> 1);
351  d = FFMAX(d, 0);
352  if (b < 0)
353  d = -d;
354 
355  if (d == 0)
356  continue;
357 
358  if (!(top_damage && bottom_damage))
359  d = d * 16 / 9;
360 
361  if (top_damage) {
362  dst[offset + x + 7 * stride] = cm[dst[offset + x + 7 * stride] + ((d * 7) >> 4)];
363  dst[offset + x + 6 * stride] = cm[dst[offset + x + 6 * stride] + ((d * 5) >> 4)];
364  dst[offset + x + 5 * stride] = cm[dst[offset + x + 5 * stride] + ((d * 3) >> 4)];
365  dst[offset + x + 4 * stride] = cm[dst[offset + x + 4 * stride] + ((d * 1) >> 4)];
366  }
367  if (bottom_damage) {
368  dst[offset + x + 8 * stride] = cm[dst[offset + x + 8 * stride] - ((d * 7) >> 4)];
369  dst[offset + x + 9 * stride] = cm[dst[offset + x + 9 * stride] - ((d * 5) >> 4)];
370  dst[offset + x + 10 * stride] = cm[dst[offset + x + 10 * stride] - ((d * 3) >> 4)];
371  dst[offset + x + 11 * stride] = cm[dst[offset + x + 11 * stride] - ((d * 1) >> 4)];
372  }
373  }
374  }
375  }
376 }
377 
378 #define MV_FROZEN 8
379 #define MV_CHANGED 4
380 #define MV_UNCHANGED 2
381 #define MV_LISTED 1
382 static av_always_inline void add_blocklist(int (*blocklist)[2], int *blocklist_length, uint8_t *fixed, int mb_x, int mb_y, int mb_xy)
383 {
384  if (fixed[mb_xy])
385  return;
386  fixed[mb_xy] = MV_LISTED;
387  blocklist[ *blocklist_length ][0] = mb_x;
388  blocklist[(*blocklist_length)++][1] = mb_y;
389 }
390 
391 static void guess_mv(ERContext *s)
392 {
393  int (*blocklist)[2], (*next_blocklist)[2];
394  uint8_t *fixed;
395  const ptrdiff_t mb_stride = s->mb_stride;
396  const int mb_width = s->mb_width;
397  int mb_height = s->mb_height;
398  int i, depth, num_avail;
399  int mb_x, mb_y;
400  ptrdiff_t mot_step, mot_stride;
401  int blocklist_length, next_blocklist_length;
402 
403  if (s->last_pic.f && s->last_pic.f->data[0])
404  mb_height = FFMIN(mb_height, (s->last_pic.f->height+15)>>4);
405  if (s->next_pic.f && s->next_pic.f->data[0])
406  mb_height = FFMIN(mb_height, (s->next_pic.f->height+15)>>4);
407 
408  blocklist = (int (*)[2])s->er_temp_buffer;
409  next_blocklist = blocklist + s->mb_stride * s->mb_height;
410  fixed = (uint8_t *)(next_blocklist + s->mb_stride * s->mb_height);
411 
412  set_mv_strides(s, &mot_step, &mot_stride);
413 
414  num_avail = 0;
415  if (s->last_pic.motion_val[0])
416  ff_thread_await_progress(s->last_pic.tf, mb_height-1, 0);
417  for (i = 0; i < mb_width * mb_height; i++) {
418  const int mb_xy = s->mb_index2xy[i];
419  int f = 0;
420  int error = s->error_status_table[mb_xy];
421 
422  if (IS_INTRA(s->cur_pic.mb_type[mb_xy]))
423  f = MV_FROZEN; // intra // FIXME check
424  if (!(error & ER_MV_ERROR))
425  f = MV_FROZEN; // inter with undamaged MV
426 
427  fixed[mb_xy] = f;
428  if (f == MV_FROZEN)
429  num_avail++;
430  else if(s->last_pic.f->data[0] && s->last_pic.motion_val[0]){
431  const int mb_y= mb_xy / s->mb_stride;
432  const int mb_x= mb_xy % s->mb_stride;
433  const int mot_index= (mb_x + mb_y*mot_stride) * mot_step;
434  s->cur_pic.motion_val[0][mot_index][0]= s->last_pic.motion_val[0][mot_index][0];
435  s->cur_pic.motion_val[0][mot_index][1]= s->last_pic.motion_val[0][mot_index][1];
436  s->cur_pic.ref_index[0][4*mb_xy] = s->last_pic.ref_index[0][4*mb_xy];
437  }
438  }
439 
440  if ((!(s->avctx->error_concealment&FF_EC_GUESS_MVS)) ||
441  num_avail <= mb_width / 2) {
442  for (mb_y = 0; mb_y < mb_height; mb_y++) {
443  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
444  const int mb_xy = mb_x + mb_y * s->mb_stride;
445  int mv_dir = (s->last_pic.f && s->last_pic.f->data[0]) ? MV_DIR_FORWARD : MV_DIR_BACKWARD;
446 
447  if (IS_INTRA(s->cur_pic.mb_type[mb_xy]))
448  continue;
449  if (!(s->error_status_table[mb_xy] & ER_MV_ERROR))
450  continue;
451 
452  s->mv[0][0][0] = 0;
453  s->mv[0][0][1] = 0;
454  s->decode_mb(s->opaque, 0, mv_dir, MV_TYPE_16X16, &s->mv,
455  mb_x, mb_y, 0, 0);
456  }
457  }
458  return;
459  }
460 
461  blocklist_length = 0;
462  for (mb_y = 0; mb_y < mb_height; mb_y++) {
463  for (mb_x = 0; mb_x < mb_width; mb_x++) {
464  const int mb_xy = mb_x + mb_y * mb_stride;
465  if (fixed[mb_xy] == MV_FROZEN) {
466  if (mb_x) add_blocklist(blocklist, &blocklist_length, fixed, mb_x - 1, mb_y, mb_xy - 1);
467  if (mb_y) add_blocklist(blocklist, &blocklist_length, fixed, mb_x, mb_y - 1, mb_xy - mb_stride);
468  if (mb_x+1 < mb_width) add_blocklist(blocklist, &blocklist_length, fixed, mb_x + 1, mb_y, mb_xy + 1);
469  if (mb_y+1 < mb_height) add_blocklist(blocklist, &blocklist_length, fixed, mb_x, mb_y + 1, mb_xy + mb_stride);
470  }
471  }
472  }
473 
474  for (depth = 0; ; depth++) {
475  int changed, pass, none_left;
476  int blocklist_index;
477 
478  none_left = 1;
479  changed = 1;
480  for (pass = 0; (changed || pass < 2) && pass < 10; pass++) {
481  int score_sum = 0;
482 
483  changed = 0;
484  for (blocklist_index = 0; blocklist_index < blocklist_length; blocklist_index++) {
485  const int mb_x = blocklist[blocklist_index][0];
486  const int mb_y = blocklist[blocklist_index][1];
487  const int mb_xy = mb_x + mb_y * mb_stride;
488  int mv_predictor[8][2];
489  int ref[8];
490  int pred_count;
491  int j;
492  int best_score;
493  int best_pred;
494  int mot_index;
495  int prev_x, prev_y, prev_ref;
496 
497  if ((mb_x ^ mb_y ^ pass) & 1)
498  continue;
499  av_assert2(fixed[mb_xy] != MV_FROZEN);
500 
501 
502  av_assert1(!IS_INTRA(s->cur_pic.mb_type[mb_xy]));
503  av_assert1(s->last_pic.f && s->last_pic.f->data[0]);
504 
505  j = 0;
506  if (mb_x > 0)
507  j |= fixed[mb_xy - 1];
508  if (mb_x + 1 < mb_width)
509  j |= fixed[mb_xy + 1];
510  if (mb_y > 0)
511  j |= fixed[mb_xy - mb_stride];
512  if (mb_y + 1 < mb_height)
513  j |= fixed[mb_xy + mb_stride];
514 
515  av_assert2(j & MV_FROZEN);
516 
517  if (!(j & MV_CHANGED) && pass > 1)
518  continue;
519 
520  none_left = 0;
521  pred_count = 0;
522  mot_index = (mb_x + mb_y * mot_stride) * mot_step;
523 
524  if (mb_x > 0 && fixed[mb_xy - 1] > 1) {
525  mv_predictor[pred_count][0] =
526  s->cur_pic.motion_val[0][mot_index - mot_step][0];
527  mv_predictor[pred_count][1] =
528  s->cur_pic.motion_val[0][mot_index - mot_step][1];
529  ref[pred_count] =
530  s->cur_pic.ref_index[0][4 * (mb_xy - 1)];
531  pred_count++;
532  }
533  if (mb_x + 1 < mb_width && fixed[mb_xy + 1] > 1) {
534  mv_predictor[pred_count][0] =
535  s->cur_pic.motion_val[0][mot_index + mot_step][0];
536  mv_predictor[pred_count][1] =
537  s->cur_pic.motion_val[0][mot_index + mot_step][1];
538  ref[pred_count] =
539  s->cur_pic.ref_index[0][4 * (mb_xy + 1)];
540  pred_count++;
541  }
542  if (mb_y > 0 && fixed[mb_xy - mb_stride] > 1) {
543  mv_predictor[pred_count][0] =
544  s->cur_pic.motion_val[0][mot_index - mot_stride * mot_step][0];
545  mv_predictor[pred_count][1] =
546  s->cur_pic.motion_val[0][mot_index - mot_stride * mot_step][1];
547  ref[pred_count] =
548  s->cur_pic.ref_index[0][4 * (mb_xy - s->mb_stride)];
549  pred_count++;
550  }
551  if (mb_y + 1<mb_height && fixed[mb_xy + mb_stride] > 1) {
552  mv_predictor[pred_count][0] =
553  s->cur_pic.motion_val[0][mot_index + mot_stride * mot_step][0];
554  mv_predictor[pred_count][1] =
555  s->cur_pic.motion_val[0][mot_index + mot_stride * mot_step][1];
556  ref[pred_count] =
557  s->cur_pic.ref_index[0][4 * (mb_xy + s->mb_stride)];
558  pred_count++;
559  }
560  if (pred_count == 0)
561  continue;
562 
563  if (pred_count > 1) {
564  int sum_x = 0, sum_y = 0, sum_r = 0;
565  int max_x, max_y, min_x, min_y, max_r, min_r;
566 
567  for (j = 0; j < pred_count; j++) {
568  sum_x += mv_predictor[j][0];
569  sum_y += mv_predictor[j][1];
570  sum_r += ref[j];
571  if (j && ref[j] != ref[j - 1])
572  goto skip_mean_and_median;
573  }
574 
575  /* mean */
576  mv_predictor[pred_count][0] = sum_x / j;
577  mv_predictor[pred_count][1] = sum_y / j;
578  ref[pred_count] = sum_r / j;
579 
580  /* median */
581  if (pred_count >= 3) {
582  min_y = min_x = min_r = 99999;
583  max_y = max_x = max_r = -99999;
584  } else {
585  min_x = min_y = max_x = max_y = min_r = max_r = 0;
586  }
587  for (j = 0; j < pred_count; j++) {
588  max_x = FFMAX(max_x, mv_predictor[j][0]);
589  max_y = FFMAX(max_y, mv_predictor[j][1]);
590  max_r = FFMAX(max_r, ref[j]);
591  min_x = FFMIN(min_x, mv_predictor[j][0]);
592  min_y = FFMIN(min_y, mv_predictor[j][1]);
593  min_r = FFMIN(min_r, ref[j]);
594  }
595  mv_predictor[pred_count + 1][0] = sum_x - max_x - min_x;
596  mv_predictor[pred_count + 1][1] = sum_y - max_y - min_y;
597  ref[pred_count + 1] = sum_r - max_r - min_r;
598 
599  if (pred_count == 4) {
600  mv_predictor[pred_count + 1][0] /= 2;
601  mv_predictor[pred_count + 1][1] /= 2;
602  ref[pred_count + 1] /= 2;
603  }
604  pred_count += 2;
605  }
606 
607 skip_mean_and_median:
608  /* zero MV */
609  mv_predictor[pred_count][0] =
610  mv_predictor[pred_count][1] =
611  ref[pred_count] = 0;
612  pred_count++;
613 
614  prev_x = s->cur_pic.motion_val[0][mot_index][0];
615  prev_y = s->cur_pic.motion_val[0][mot_index][1];
616  prev_ref = s->cur_pic.ref_index[0][4 * mb_xy];
617 
618  /* last MV */
619  mv_predictor[pred_count][0] = prev_x;
620  mv_predictor[pred_count][1] = prev_y;
621  ref[pred_count] = prev_ref;
622  pred_count++;
623 
624  best_pred = 0;
625  best_score = 256 * 256 * 256 * 64;
626  for (j = 0; j < pred_count; j++) {
627  int *linesize = s->cur_pic.f->linesize;
628  int score = 0;
629  uint8_t *src = s->cur_pic.f->data[0] +
630  mb_x * 16 + mb_y * 16 * linesize[0];
631 
632  s->cur_pic.motion_val[0][mot_index][0] =
633  s->mv[0][0][0] = mv_predictor[j][0];
634  s->cur_pic.motion_val[0][mot_index][1] =
635  s->mv[0][0][1] = mv_predictor[j][1];
636 
637  // predictor intra or otherwise not available
638  if (ref[j] < 0)
639  continue;
640 
641  s->decode_mb(s->opaque, ref[j], MV_DIR_FORWARD,
642  MV_TYPE_16X16, &s->mv, mb_x, mb_y, 0, 0);
643 
644  if (mb_x > 0 && fixed[mb_xy - 1] > 1) {
645  int k;
646  for (k = 0; k < 16; k++)
647  score += FFABS(src[k * linesize[0] - 1] -
648  src[k * linesize[0]]);
649  }
650  if (mb_x + 1 < mb_width && fixed[mb_xy + 1] > 1) {
651  int k;
652  for (k = 0; k < 16; k++)
653  score += FFABS(src[k * linesize[0] + 15] -
654  src[k * linesize[0] + 16]);
655  }
656  if (mb_y > 0 && fixed[mb_xy - mb_stride] > 1) {
657  int k;
658  for (k = 0; k < 16; k++)
659  score += FFABS(src[k - linesize[0]] - src[k]);
660  }
661  if (mb_y + 1 < mb_height && fixed[mb_xy + mb_stride] > 1) {
662  int k;
663  for (k = 0; k < 16; k++)
664  score += FFABS(src[k + linesize[0] * 15] -
665  src[k + linesize[0] * 16]);
666  }
667 
668  if (score <= best_score) { // <= will favor the last MV
669  best_score = score;
670  best_pred = j;
671  }
672  }
673  score_sum += best_score;
674  s->mv[0][0][0] = mv_predictor[best_pred][0];
675  s->mv[0][0][1] = mv_predictor[best_pred][1];
676 
677  for (i = 0; i < mot_step; i++)
678  for (j = 0; j < mot_step; j++) {
679  s->cur_pic.motion_val[0][mot_index + i + j * mot_stride][0] = s->mv[0][0][0];
680  s->cur_pic.motion_val[0][mot_index + i + j * mot_stride][1] = s->mv[0][0][1];
681  }
682 
683  s->decode_mb(s->opaque, ref[best_pred], MV_DIR_FORWARD,
684  MV_TYPE_16X16, &s->mv, mb_x, mb_y, 0, 0);
685 
686 
687  if (s->mv[0][0][0] != prev_x || s->mv[0][0][1] != prev_y) {
688  fixed[mb_xy] = MV_CHANGED;
689  changed++;
690  } else
691  fixed[mb_xy] = MV_UNCHANGED;
692  }
693  }
694 
695  if (none_left)
696  return;
697 
698  next_blocklist_length = 0;
699 
700  for (blocklist_index = 0; blocklist_index < blocklist_length; blocklist_index++) {
701  const int mb_x = blocklist[blocklist_index][0];
702  const int mb_y = blocklist[blocklist_index][1];
703  const int mb_xy = mb_x + mb_y * mb_stride;
704 
705  if (fixed[mb_xy] & (MV_CHANGED|MV_UNCHANGED|MV_FROZEN)) {
706  fixed[mb_xy] = MV_FROZEN;
707  if (mb_x > 0)
708  add_blocklist(next_blocklist, &next_blocklist_length, fixed, mb_x - 1, mb_y, mb_xy - 1);
709  if (mb_y > 0)
710  add_blocklist(next_blocklist, &next_blocklist_length, fixed, mb_x, mb_y - 1, mb_xy - mb_stride);
711  if (mb_x + 1 < mb_width)
712  add_blocklist(next_blocklist, &next_blocklist_length, fixed, mb_x + 1, mb_y, mb_xy + 1);
713  if (mb_y + 1 < mb_height)
714  add_blocklist(next_blocklist, &next_blocklist_length, fixed, mb_x, mb_y + 1, mb_xy + mb_stride);
715  }
716  }
717  av_assert0(next_blocklist_length <= mb_height * mb_width);
718  FFSWAP(int , blocklist_length, next_blocklist_length);
719  FFSWAP(void*, blocklist, next_blocklist);
720  }
721 }
722 
724 {
725  int is_intra_likely, i, j, undamaged_count, skip_amount, mb_x, mb_y;
726 
727  if (!s->last_pic.f || !s->last_pic.f->data[0])
728  return 1; // no previous frame available -> use spatial prediction
729 
731  return 0;
732 
733  undamaged_count = 0;
734  for (i = 0; i < s->mb_num; i++) {
735  const int mb_xy = s->mb_index2xy[i];
736  const int error = s->error_status_table[mb_xy];
737  if (!((error & ER_DC_ERROR) && (error & ER_MV_ERROR)))
738  undamaged_count++;
739  }
740 
741  if (undamaged_count < 5)
742  return 0; // almost all MBs damaged -> use temporal prediction
743 
744  // prevent dsp.sad() check, that requires access to the image
745  if (CONFIG_XVMC &&
746  s->avctx->hwaccel && s->avctx->hwaccel->decode_mb &&
748  return 1;
749 
750  skip_amount = FFMAX(undamaged_count / 50, 1); // check only up to 50 MBs
751  is_intra_likely = 0;
752 
753  j = 0;
754  for (mb_y = 0; mb_y < s->mb_height - 1; mb_y++) {
755  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
756  int error;
757  const int mb_xy = mb_x + mb_y * s->mb_stride;
758 
759  error = s->error_status_table[mb_xy];
760  if ((error & ER_DC_ERROR) && (error & ER_MV_ERROR))
761  continue; // skip damaged
762 
763  j++;
764  // skip a few to speed things up
765  if ((j % skip_amount) != 0)
766  continue;
767 
768  if (s->cur_pic.f->pict_type == AV_PICTURE_TYPE_I) {
769  int *linesize = s->cur_pic.f->linesize;
770  uint8_t *mb_ptr = s->cur_pic.f->data[0] +
771  mb_x * 16 + mb_y * 16 * linesize[0];
772  uint8_t *last_mb_ptr = s->last_pic.f->data[0] +
773  mb_x * 16 + mb_y * 16 * linesize[0];
774 
775  if (s->avctx->codec_id == AV_CODEC_ID_H264) {
776  // FIXME
777  } else {
778  ff_thread_await_progress(s->last_pic.tf, mb_y, 0);
779  }
780  is_intra_likely += s->mecc.sad[0](NULL, last_mb_ptr, mb_ptr,
781  linesize[0], 16);
782  // FIXME need await_progress() here
783  is_intra_likely -= s->mecc.sad[0](NULL, last_mb_ptr,
784  last_mb_ptr + linesize[0] * 16,
785  linesize[0], 16);
786  } else {
787  if (IS_INTRA(s->cur_pic.mb_type[mb_xy]))
788  is_intra_likely++;
789  else
790  is_intra_likely--;
791  }
792  }
793  }
794 // av_log(NULL, AV_LOG_ERROR, "is_intra_likely: %d type:%d\n", is_intra_likely, s->pict_type);
795  return is_intra_likely > 0;
796 }
797 
799 {
800  if (!s->avctx->error_concealment)
801  return;
802 
803  if (!s->mecc_inited) {
804  ff_me_cmp_init(&s->mecc, s->avctx);
805  s->mecc_inited = 1;
806  }
807 
809  s->mb_stride * s->mb_height * sizeof(uint8_t));
810  atomic_init(&s->error_count, 3 * s->mb_num);
811  s->error_occurred = 0;
812 }
813 
815 {
816  if(s->avctx->hwaccel && s->avctx->hwaccel->decode_slice ||
817  !s->cur_pic.f ||
819  )
820  return 0;
821  return 1;
822 }
823 
824 /**
825  * Add a slice.
826  * @param endx x component of the last macroblock, can be -1
827  * for the last of the previous line
828  * @param status the status at the end (ER_MV_END, ER_AC_ERROR, ...), it is
829  * assumed that no earlier end or error of the same type occurred
830  */
831 void ff_er_add_slice(ERContext *s, int startx, int starty,
832  int endx, int endy, int status)
833 {
834  const int start_i = av_clip(startx + starty * s->mb_width, 0, s->mb_num - 1);
835  const int end_i = av_clip(endx + endy * s->mb_width, 0, s->mb_num);
836  const int start_xy = s->mb_index2xy[start_i];
837  const int end_xy = s->mb_index2xy[end_i];
838  int mask = -1;
839 
840  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_slice)
841  return;
842 
843  if (start_i > end_i || start_xy > end_xy) {
845  "internal error, slice end before start\n");
846  return;
847  }
848 
849  if (!s->avctx->error_concealment)
850  return;
851 
852  mask &= ~VP_START;
853  if (status & (ER_AC_ERROR | ER_AC_END)) {
854  mask &= ~(ER_AC_ERROR | ER_AC_END);
855  atomic_fetch_add(&s->error_count, start_i - end_i - 1);
856  }
857  if (status & (ER_DC_ERROR | ER_DC_END)) {
858  mask &= ~(ER_DC_ERROR | ER_DC_END);
859  atomic_fetch_add(&s->error_count, start_i - end_i - 1);
860  }
861  if (status & (ER_MV_ERROR | ER_MV_END)) {
862  mask &= ~(ER_MV_ERROR | ER_MV_END);
863  atomic_fetch_add(&s->error_count, start_i - end_i - 1);
864  }
865 
866  if (status & ER_MB_ERROR) {
867  s->error_occurred = 1;
868  atomic_store(&s->error_count, INT_MAX);
869  }
870 
871  if (mask == ~0x7F) {
872  memset(&s->error_status_table[start_xy], 0,
873  (end_xy - start_xy) * sizeof(uint8_t));
874  } else {
875  int i;
876  for (i = start_xy; i < end_xy; i++)
877  s->error_status_table[i] &= mask;
878  }
879 
880  if (end_i == s->mb_num)
881  atomic_store(&s->error_count, INT_MAX);
882  else {
883  s->error_status_table[end_xy] &= mask;
884  s->error_status_table[end_xy] |= status;
885  }
886 
887  s->error_status_table[start_xy] |= VP_START;
888 
889  if (start_xy > 0 && !(s->avctx->active_thread_type & FF_THREAD_SLICE) &&
890  er_supported(s) && s->avctx->skip_top * s->mb_width < start_i) {
891  int prev_status = s->error_status_table[s->mb_index2xy[start_i - 1]];
892 
893  prev_status &= ~ VP_START;
894  if (prev_status != (ER_MV_END | ER_DC_END | ER_AC_END)) {
895  s->error_occurred = 1;
896  atomic_store(&s->error_count, INT_MAX);
897  }
898  }
899 }
900 
902 {
903  int *linesize = NULL;
904  int i, mb_x, mb_y, error, error_type, dc_error, mv_error, ac_error;
905  int distance;
906  int threshold_part[4] = { 100, 100, 100 };
907  int threshold = 50;
908  int is_intra_likely;
909  int size = s->b8_stride * 2 * s->mb_height;
910 
911  /* We do not support ER of field pictures yet,
912  * though it should not crash if enabled. */
913  if (!s->avctx->error_concealment || !atomic_load(&s->error_count) ||
914  s->avctx->lowres ||
915  !er_supported(s) ||
916  atomic_load(&s->error_count) == 3 * s->mb_width *
917  (s->avctx->skip_top + s->avctx->skip_bottom)) {
918  return;
919  }
920  linesize = s->cur_pic.f->linesize;
921  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
922  int status = s->error_status_table[mb_x + (s->mb_height - 1) * s->mb_stride];
923  if (status != 0x7F)
924  break;
925  }
926 
927  if ( mb_x == s->mb_width
929  && (FFALIGN(s->avctx->height, 16)&16)
930  && atomic_load(&s->error_count) == 3 * s->mb_width * (s->avctx->skip_top + s->avctx->skip_bottom + 1)
931  ) {
932  av_log(s->avctx, AV_LOG_DEBUG, "ignoring last missing slice\n");
933  return;
934  }
935 
936  if (s->last_pic.f) {
937  if (s->last_pic.f->width != s->cur_pic.f->width ||
938  s->last_pic.f->height != s->cur_pic.f->height ||
939  s->last_pic.f->format != s->cur_pic.f->format) {
940  av_log(s->avctx, AV_LOG_WARNING, "Cannot use previous picture in error concealment\n");
941  memset(&s->last_pic, 0, sizeof(s->last_pic));
942  }
943  }
944  if (s->next_pic.f) {
945  if (s->next_pic.f->width != s->cur_pic.f->width ||
946  s->next_pic.f->height != s->cur_pic.f->height ||
947  s->next_pic.f->format != s->cur_pic.f->format) {
948  av_log(s->avctx, AV_LOG_WARNING, "Cannot use next picture in error concealment\n");
949  memset(&s->next_pic, 0, sizeof(s->next_pic));
950  }
951  }
952 
953  if (!s->cur_pic.motion_val[0] || !s->cur_pic.ref_index[0]) {
954  av_log(s->avctx, AV_LOG_ERROR, "Warning MVs not available\n");
955 
956  for (i = 0; i < 2; i++) {
957  s->ref_index_buf[i] = av_buffer_allocz(s->mb_stride * s->mb_height * 4 * sizeof(uint8_t));
958  s->motion_val_buf[i] = av_buffer_allocz((size + 4) * 2 * sizeof(uint16_t));
959  if (!s->ref_index_buf[i] || !s->motion_val_buf[i])
960  break;
961  s->cur_pic.ref_index[i] = s->ref_index_buf[i]->data;
962  s->cur_pic.motion_val[i] = (int16_t (*)[2])s->motion_val_buf[i]->data + 4;
963  }
964  if (i < 2) {
965  for (i = 0; i < 2; i++) {
968  s->cur_pic.ref_index[i] = NULL;
969  s->cur_pic.motion_val[i] = NULL;
970  }
971  return;
972  }
973  }
974 
975  if (s->avctx->debug & FF_DEBUG_ER) {
976  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
977  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
978  int status = s->error_status_table[mb_x + mb_y * s->mb_stride];
979 
980  av_log(s->avctx, AV_LOG_DEBUG, "%2X ", status);
981  }
982  av_log(s->avctx, AV_LOG_DEBUG, "\n");
983  }
984  }
985 
986 #if 1
987  /* handle overlapping slices */
988  for (error_type = 1; error_type <= 3; error_type++) {
989  int end_ok = 0;
990 
991  for (i = s->mb_num - 1; i >= 0; i--) {
992  const int mb_xy = s->mb_index2xy[i];
993  int error = s->error_status_table[mb_xy];
994 
995  if (error & (1 << error_type))
996  end_ok = 1;
997  if (error & (8 << error_type))
998  end_ok = 1;
999 
1000  if (!end_ok)
1001  s->error_status_table[mb_xy] |= 1 << error_type;
1002 
1003  if (error & VP_START)
1004  end_ok = 0;
1005  }
1006  }
1007 #endif
1008 #if 1
1009  /* handle slices with partitions of different length */
1010  if (s->partitioned_frame) {
1011  int end_ok = 0;
1012 
1013  for (i = s->mb_num - 1; i >= 0; i--) {
1014  const int mb_xy = s->mb_index2xy[i];
1015  int error = s->error_status_table[mb_xy];
1016 
1017  if (error & ER_AC_END)
1018  end_ok = 0;
1019  if ((error & ER_MV_END) ||
1020  (error & ER_DC_END) ||
1021  (error & ER_AC_ERROR))
1022  end_ok = 1;
1023 
1024  if (!end_ok)
1025  s->error_status_table[mb_xy]|= ER_AC_ERROR;
1026 
1027  if (error & VP_START)
1028  end_ok = 0;
1029  }
1030  }
1031 #endif
1032  /* handle missing slices */
1033  if (s->avctx->err_recognition & AV_EF_EXPLODE) {
1034  int end_ok = 1;
1035 
1036  // FIXME + 100 hack
1037  for (i = s->mb_num - 2; i >= s->mb_width + 100; i--) {
1038  const int mb_xy = s->mb_index2xy[i];
1039  int error1 = s->error_status_table[mb_xy];
1040  int error2 = s->error_status_table[s->mb_index2xy[i + 1]];
1041 
1042  if (error1 & VP_START)
1043  end_ok = 1;
1044 
1045  if (error2 == (VP_START | ER_MB_ERROR | ER_MB_END) &&
1046  error1 != (VP_START | ER_MB_ERROR | ER_MB_END) &&
1047  ((error1 & ER_AC_END) || (error1 & ER_DC_END) ||
1048  (error1 & ER_MV_END))) {
1049  // end & uninit
1050  end_ok = 0;
1051  }
1052 
1053  if (!end_ok)
1054  s->error_status_table[mb_xy] |= ER_MB_ERROR;
1055  }
1056  }
1057 
1058 #if 1
1059  /* backward mark errors */
1060  distance = 9999999;
1061  for (error_type = 1; error_type <= 3; error_type++) {
1062  for (i = s->mb_num - 1; i >= 0; i--) {
1063  const int mb_xy = s->mb_index2xy[i];
1064  int error = s->error_status_table[mb_xy];
1065 
1066  if (!s->mbskip_table || !s->mbskip_table[mb_xy]) // FIXME partition specific
1067  distance++;
1068  if (error & (1 << error_type))
1069  distance = 0;
1070 
1071  if (s->partitioned_frame) {
1072  if (distance < threshold_part[error_type - 1])
1073  s->error_status_table[mb_xy] |= 1 << error_type;
1074  } else {
1075  if (distance < threshold)
1076  s->error_status_table[mb_xy] |= 1 << error_type;
1077  }
1078 
1079  if (error & VP_START)
1080  distance = 9999999;
1081  }
1082  }
1083 #endif
1084 
1085  /* forward mark errors */
1086  error = 0;
1087  for (i = 0; i < s->mb_num; i++) {
1088  const int mb_xy = s->mb_index2xy[i];
1089  int old_error = s->error_status_table[mb_xy];
1090 
1091  if (old_error & VP_START) {
1092  error = old_error & ER_MB_ERROR;
1093  } else {
1094  error |= old_error & ER_MB_ERROR;
1095  s->error_status_table[mb_xy] |= error;
1096  }
1097  }
1098 #if 1
1099  /* handle not partitioned case */
1100  if (!s->partitioned_frame) {
1101  for (i = 0; i < s->mb_num; i++) {
1102  const int mb_xy = s->mb_index2xy[i];
1103  int error = s->error_status_table[mb_xy];
1104  if (error & ER_MB_ERROR)
1105  error |= ER_MB_ERROR;
1106  s->error_status_table[mb_xy] = error;
1107  }
1108  }
1109 #endif
1110 
1111  dc_error = ac_error = mv_error = 0;
1112  for (i = 0; i < s->mb_num; i++) {
1113  const int mb_xy = s->mb_index2xy[i];
1114  int error = s->error_status_table[mb_xy];
1115  if (error & ER_DC_ERROR)
1116  dc_error++;
1117  if (error & ER_AC_ERROR)
1118  ac_error++;
1119  if (error & ER_MV_ERROR)
1120  mv_error++;
1121  }
1122  av_log(s->avctx, AV_LOG_INFO, "concealing %d DC, %d AC, %d MV errors in %c frame\n",
1123  dc_error, ac_error, mv_error, av_get_picture_type_char(s->cur_pic.f->pict_type));
1124 
1125  is_intra_likely = is_intra_more_likely(s);
1126 
1127  /* set unknown mb-type to most likely */
1128  for (i = 0; i < s->mb_num; i++) {
1129  const int mb_xy = s->mb_index2xy[i];
1130  int error = s->error_status_table[mb_xy];
1131  if (!((error & ER_DC_ERROR) && (error & ER_MV_ERROR)))
1132  continue;
1133 
1134  if (is_intra_likely)
1135  s->cur_pic.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
1136  else
1137  s->cur_pic.mb_type[mb_xy] = MB_TYPE_16x16 | MB_TYPE_L0;
1138  }
1139 
1140  // change inter to intra blocks if no reference frames are available
1141  if (!(s->last_pic.f && s->last_pic.f->data[0]) &&
1142  !(s->next_pic.f && s->next_pic.f->data[0]))
1143  for (i = 0; i < s->mb_num; i++) {
1144  const int mb_xy = s->mb_index2xy[i];
1145  if (!IS_INTRA(s->cur_pic.mb_type[mb_xy]))
1146  s->cur_pic.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
1147  }
1148 
1149  /* handle inter blocks with damaged AC */
1150  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1151  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1152  const int mb_xy = mb_x + mb_y * s->mb_stride;
1153  const int mb_type = s->cur_pic.mb_type[mb_xy];
1154  const int dir = !(s->last_pic.f && s->last_pic.f->data[0]);
1155  const int mv_dir = dir ? MV_DIR_BACKWARD : MV_DIR_FORWARD;
1156  int mv_type;
1157 
1158  int error = s->error_status_table[mb_xy];
1159 
1160  if (IS_INTRA(mb_type))
1161  continue; // intra
1162  if (error & ER_MV_ERROR)
1163  continue; // inter with damaged MV
1164  if (!(error & ER_AC_ERROR))
1165  continue; // undamaged inter
1166 
1167  if (IS_8X8(mb_type)) {
1168  int mb_index = mb_x * 2 + mb_y * 2 * s->b8_stride;
1169  int j;
1170  mv_type = MV_TYPE_8X8;
1171  for (j = 0; j < 4; j++) {
1172  s->mv[0][j][0] = s->cur_pic.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][0];
1173  s->mv[0][j][1] = s->cur_pic.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][1];
1174  }
1175  } else {
1176  mv_type = MV_TYPE_16X16;
1177  s->mv[0][0][0] = s->cur_pic.motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][0];
1178  s->mv[0][0][1] = s->cur_pic.motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][1];
1179  }
1180 
1181  s->decode_mb(s->opaque, 0 /* FIXME H.264 partitioned slices need this set */,
1182  mv_dir, mv_type, &s->mv, mb_x, mb_y, 0, 0);
1183  }
1184  }
1185 
1186  /* guess MVs */
1187  if (s->cur_pic.f->pict_type == AV_PICTURE_TYPE_B) {
1188  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1189  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1190  int xy = mb_x * 2 + mb_y * 2 * s->b8_stride;
1191  const int mb_xy = mb_x + mb_y * s->mb_stride;
1192  const int mb_type = s->cur_pic.mb_type[mb_xy];
1193  int mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
1194 
1195  int error = s->error_status_table[mb_xy];
1196 
1197  if (IS_INTRA(mb_type))
1198  continue;
1199  if (!(error & ER_MV_ERROR))
1200  continue; // inter with undamaged MV
1201  if (!(error & ER_AC_ERROR))
1202  continue; // undamaged inter
1203 
1204  if (!(s->last_pic.f && s->last_pic.f->data[0]))
1205  mv_dir &= ~MV_DIR_FORWARD;
1206  if (!(s->next_pic.f && s->next_pic.f->data[0]))
1207  mv_dir &= ~MV_DIR_BACKWARD;
1208 
1209  if (s->pp_time) {
1210  int time_pp = s->pp_time;
1211  int time_pb = s->pb_time;
1212 
1214  ff_thread_await_progress(s->next_pic.tf, mb_y, 0);
1215 
1216  s->mv[0][0][0] = s->next_pic.motion_val[0][xy][0] * time_pb / time_pp;
1217  s->mv[0][0][1] = s->next_pic.motion_val[0][xy][1] * time_pb / time_pp;
1218  s->mv[1][0][0] = s->next_pic.motion_val[0][xy][0] * (time_pb - time_pp) / time_pp;
1219  s->mv[1][0][1] = s->next_pic.motion_val[0][xy][1] * (time_pb - time_pp) / time_pp;
1220  } else {
1221  s->mv[0][0][0] = 0;
1222  s->mv[0][0][1] = 0;
1223  s->mv[1][0][0] = 0;
1224  s->mv[1][0][1] = 0;
1225  }
1226 
1227  s->decode_mb(s->opaque, 0, mv_dir, MV_TYPE_16X16, &s->mv,
1228  mb_x, mb_y, 0, 0);
1229  }
1230  }
1231  } else
1232  guess_mv(s);
1233 
1234  /* the filters below manipulate raw image, skip them */
1235  if (CONFIG_XVMC && s->avctx->hwaccel && s->avctx->hwaccel->decode_mb)
1236  goto ec_clean;
1237  /* fill DC for inter blocks */
1238  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1239  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1240  int dc, dcu, dcv, y, n;
1241  int16_t *dc_ptr;
1242  uint8_t *dest_y, *dest_cb, *dest_cr;
1243  const int mb_xy = mb_x + mb_y * s->mb_stride;
1244  const int mb_type = s->cur_pic.mb_type[mb_xy];
1245 
1246  // error = s->error_status_table[mb_xy];
1247 
1248  if (IS_INTRA(mb_type) && s->partitioned_frame)
1249  continue;
1250  // if (error & ER_MV_ERROR)
1251  // continue; // inter data damaged FIXME is this good?
1252 
1253  dest_y = s->cur_pic.f->data[0] + mb_x * 16 + mb_y * 16 * linesize[0];
1254  dest_cb = s->cur_pic.f->data[1] + mb_x * 8 + mb_y * 8 * linesize[1];
1255  dest_cr = s->cur_pic.f->data[2] + mb_x * 8 + mb_y * 8 * linesize[2];
1256 
1257  dc_ptr = &s->dc_val[0][mb_x * 2 + mb_y * 2 * s->b8_stride];
1258  for (n = 0; n < 4; n++) {
1259  dc = 0;
1260  for (y = 0; y < 8; y++) {
1261  int x;
1262  for (x = 0; x < 8; x++)
1263  dc += dest_y[x + (n & 1) * 8 +
1264  (y + (n >> 1) * 8) * linesize[0]];
1265  }
1266  dc_ptr[(n & 1) + (n >> 1) * s->b8_stride] = (dc + 4) >> 3;
1267  }
1268 
1269  if (!s->cur_pic.f->data[2])
1270  continue;
1271 
1272  dcu = dcv = 0;
1273  for (y = 0; y < 8; y++) {
1274  int x;
1275  for (x = 0; x < 8; x++) {
1276  dcu += dest_cb[x + y * linesize[1]];
1277  dcv += dest_cr[x + y * linesize[2]];
1278  }
1279  }
1280  s->dc_val[1][mb_x + mb_y * s->mb_stride] = (dcu + 4) >> 3;
1281  s->dc_val[2][mb_x + mb_y * s->mb_stride] = (dcv + 4) >> 3;
1282  }
1283  }
1284 #if 1
1285  /* guess DC for damaged blocks */
1286  guess_dc(s, s->dc_val[0], s->mb_width*2, s->mb_height*2, s->b8_stride, 1);
1287  guess_dc(s, s->dc_val[1], s->mb_width , s->mb_height , s->mb_stride, 0);
1288  guess_dc(s, s->dc_val[2], s->mb_width , s->mb_height , s->mb_stride, 0);
1289 #endif
1290 
1291  /* filter luma DC */
1292  filter181(s->dc_val[0], s->mb_width * 2, s->mb_height * 2, s->b8_stride);
1293 
1294 #if 1
1295  /* render DC only intra */
1296  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1297  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1298  uint8_t *dest_y, *dest_cb, *dest_cr;
1299  const int mb_xy = mb_x + mb_y * s->mb_stride;
1300  const int mb_type = s->cur_pic.mb_type[mb_xy];
1301 
1302  int error = s->error_status_table[mb_xy];
1303 
1304  if (IS_INTER(mb_type))
1305  continue;
1306  if (!(error & ER_AC_ERROR))
1307  continue; // undamaged
1308 
1309  dest_y = s->cur_pic.f->data[0] + mb_x * 16 + mb_y * 16 * linesize[0];
1310  dest_cb = s->cur_pic.f->data[1] + mb_x * 8 + mb_y * 8 * linesize[1];
1311  dest_cr = s->cur_pic.f->data[2] + mb_x * 8 + mb_y * 8 * linesize[2];
1312  if (!s->cur_pic.f->data[2])
1313  dest_cb = dest_cr = NULL;
1314 
1315  put_dc(s, dest_y, dest_cb, dest_cr, mb_x, mb_y);
1316  }
1317  }
1318 #endif
1319 
1321  /* filter horizontal block boundaries */
1322  h_block_filter(s, s->cur_pic.f->data[0], s->mb_width * 2,
1323  s->mb_height * 2, linesize[0], 1);
1324 
1325  /* filter vertical block boundaries */
1326  v_block_filter(s, s->cur_pic.f->data[0], s->mb_width * 2,
1327  s->mb_height * 2, linesize[0], 1);
1328 
1329  if (s->cur_pic.f->data[2]) {
1330  h_block_filter(s, s->cur_pic.f->data[1], s->mb_width,
1331  s->mb_height, linesize[1], 0);
1332  h_block_filter(s, s->cur_pic.f->data[2], s->mb_width,
1333  s->mb_height, linesize[2], 0);
1334  v_block_filter(s, s->cur_pic.f->data[1], s->mb_width,
1335  s->mb_height, linesize[1], 0);
1336  v_block_filter(s, s->cur_pic.f->data[2], s->mb_width,
1337  s->mb_height, linesize[2], 0);
1338  }
1339  }
1340 
1341 ec_clean:
1342  /* clean a few tables */
1343  for (i = 0; i < s->mb_num; i++) {
1344  const int mb_xy = s->mb_index2xy[i];
1345  int error = s->error_status_table[mb_xy];
1346 
1347  if (s->mbskip_table && s->cur_pic.f->pict_type != AV_PICTURE_TYPE_B &&
1348  (error & (ER_DC_ERROR | ER_MV_ERROR | ER_AC_ERROR))) {
1349  s->mbskip_table[mb_xy] = 0;
1350  }
1351  if (s->mbintra_table)
1352  s->mbintra_table[mb_xy] = 1;
1353  }
1354 
1355  for (i = 0; i < 2; i++) {
1358  s->cur_pic.ref_index[i] = NULL;
1359  s->cur_pic.motion_val[i] = NULL;
1360  }
1361 
1362  memset(&s->cur_pic, 0, sizeof(ERPicture));
1363  memset(&s->last_pic, 0, sizeof(ERPicture));
1364  memset(&s->next_pic, 0, sizeof(ERPicture));
1365 }
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:1009
#define NULL
Definition: coverity.c:32
const char * s
Definition: avisynth_c.h:768
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
#define atomic_store(object, desired)
Definition: stdatomic.h:85
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
#define MV_CHANGED
AVBufferRef * motion_val_buf[2]
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
#define ER_MB_END
#define MV_LISTED
static void put_dc(ERContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int mb_x, int mb_y)
Replace the current MB with a flat dc-only version.
void ff_er_frame_end(ERContext *s)
const char * b
Definition: vf_curves.c:113
#define VP_START
< current MB is the first after a resync marker
#define MAX_NEG_CROP
Definition: mathops.h:31
static void guess_mv(ERContext *s)
uint32_t * mb_type
ERPicture last_pic
mpegvideo header.
static void h_block_filter(ERContext *s, uint8_t *dst, int w, int h, ptrdiff_t stride, int is_luma)
simple horizontal deblocking filter used for error resilience
#define ER_MV_ERROR
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:2644
#define src
Definition: vp8dsp.c:254
#define MB_TYPE_INTRA4x4
Definition: mpegutils.h:51
#define MV_FROZEN
static void v_block_filter(ERContext *s, uint8_t *dst, int w, int h, ptrdiff_t stride, int is_luma)
simple vertical deblocking filter used for error resilience
uint16_t pp_time
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
ptrdiff_t b8_stride
uint8_t
AVBufferRef * ref_index_buf[2]
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
Definition: log.c:94
Multithreading support functions.
#define ER_MB_ERROR
static void set_mv_strides(ERContext *s, ptrdiff_t *mv_step, ptrdiff_t *stride)
#define MB_TYPE_16x16
Definition: mpegutils.h:54
ERPicture cur_pic
#define height
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:88
#define ER_MV_END
void(* decode_mb)(struct MpegEncContext *s)
Called for every Macroblock in a slice.
Definition: avcodec.h:3625
#define FF_DEBUG_ER
Definition: avcodec.h:2583
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:2719
#define FF_EC_GUESS_MVS
Definition: avcodec.h:2559
ptrdiff_t size
Definition: opengl_enc.c:101
#define FFALIGN(x, a)
Definition: macros.h:48
#define av_log(a,...)
#define cm
Definition: dvbsubdec.c:37
Libavcodec version macros.
int width
Definition: frame.h:259
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
static const uint16_t mask[17]
Definition: lzw.c:38
#define atomic_load(object)
Definition: stdatomic.h:93
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:2758
int error_concealment
error concealment flags
Definition: avcodec.h:2558
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
void(* decode_mb)(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
uint16_t width
Definition: gdv.c:47
ThreadFrame * tf
static const uint8_t offset[127][2]
Definition: vf_spp.c:92
#define FFMAX(a, b)
Definition: common.h:94
static void filter181(int16_t *data, int width, int height, ptrdiff_t stride)
#define fail()
Definition: checkasm.h:113
uint8_t * mbintra_table
int * mb_index2xy
#define pass
Definition: fft_template.c:593
static float distance(float x, float y, int band)
uint8_t * error_status_table
static void guess_dc(ERContext *s, int16_t *dc, int w, int h, ptrdiff_t stride, int is_luma)
guess the dc of blocks which do not have an undamaged dc
common internal API header
#define ER_AC_ERROR
useful rectangle filling function
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:284
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:2612
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
uint8_t * er_temp_buffer
#define FFMIN(a, b)
Definition: common.h:96
#define FF_EC_DEBLOCK
Definition: avcodec.h:2560
#define ER_DC_END
uint16_t pb_time
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:2751
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:2623
int n
Definition: avisynth_c.h:684
int skip_top
Number of macroblock rows at the top which are skipped.
Definition: avcodec.h:2016
preferred ID for MPEG-1/2 video decoding
Definition: avcodec.h:220
static void error(const char *err)
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:274
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:263
#define atomic_fetch_add(object, operand)
Definition: stdatomic.h:131
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:260
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
Libavcodec external API header.
enum AVCodecID codec_id
Definition: avcodec.h:1498
ERPicture next_pic
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:232
static av_always_inline void add_blocklist(int(*blocklist)[2], int *blocklist_length, uint8_t *fixed, int mb_x, int mb_y, int mb_xy)
int debug
debug
Definition: avcodec.h:2568
uint8_t * data
The data buffer.
Definition: buffer.h:89
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:83
MECmpContext mecc
#define IS_INTER(a)
Definition: mpegutils.h:79
#define ER_DC_ERROR
AVCodecContext * avctx
static int weight(int i, int blen, int offset)
Definition: diracdec.c:1523
#define MV_DIR_FORWARD
Definition: mpegvideo.h:259
int8_t * ref_index[2]
int skip_bottom
Number of macroblock rows at the bottom which are skipped.
Definition: avcodec.h:2023
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:215
me_cmp_func sad[6]
Definition: me_cmp.h:56
#define FF_EC_FAVOR_INTER
Definition: avcodec.h:2561
int
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:105
int partitioned_frame
if(ret< 0)
Definition: vf_mcdeint.c:279
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
#define MV_UNCHANGED
static double c[64]
int16_t * dc_val[3]
Bi-dir predicted.
Definition: avutil.h:276
AVFrame * f
#define ff_crop_tab
#define IS_INTRA(x, y)
static int is_intra_more_likely(ERContext *s)
int mv[2][4][2]
int16_t(*[2] motion_val)[2]
#define IS_8X8(a)
Definition: mpegutils.h:89
static int er_supported(ERContext *s)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc
atomic_int error_count
void ff_er_frame_start(ERContext *s)
int height
Definition: frame.h:259
#define atomic_init(obj, value)
Definition: stdatomic.h:33
#define av_freep(p)
#define av_always_inline
Definition: attributes.h:39
#define ER_AC_END
#define av_malloc_array(a, b)
#define FFSWAP(type, a, b)
Definition: common.h:99
#define stride
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:3594
ptrdiff_t mb_stride
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:264
uint8_t * mbskip_table
#define MB_TYPE_L0
Definition: mpegutils.h:67