[FFmpeg-cvslog] error_resilience: decouple ER from MpegEncContext

Anton Khirnov git at videolan.org
Sat Feb 16 18:59:03 CET 2013


ffmpeg | branch: master | Anton Khirnov <anton at khirnov.net> | Sat Feb  2 20:42:07 2013 +0100| [54974c62982ae827becdbdb9b620b7ba75d079a0] | committer: Anton Khirnov

error_resilience: decouple ER from MpegEncContext

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=54974c62982ae827becdbdb9b620b7ba75d079a0
---

 libavcodec/error_resilience.c |  407 +++++++++++++++++------------------------
 libavcodec/error_resilience.h |   74 ++++++++
 libavcodec/h261dec.c          |    2 +-
 libavcodec/h263dec.c          |   20 +-
 libavcodec/h264.c             |   65 +++++--
 libavcodec/intrax8.c          |    2 +-
 libavcodec/mpeg12.c           |   20 +-
 libavcodec/mpeg4videodec.c    |   10 +-
 libavcodec/mpegvideo.c        |   95 +++++++++-
 libavcodec/mpegvideo.h        |   23 +--
 libavcodec/mpegvideo_enc.c    |    2 +-
 libavcodec/mss2.c             |    4 +-
 libavcodec/rv10.c             |    8 +-
 libavcodec/rv34.c             |   12 +-
 libavcodec/vc1dec.c           |   22 +--
 15 files changed, 431 insertions(+), 335 deletions(-)

diff --git a/libavcodec/error_resilience.c b/libavcodec/error_resilience.c
index 67552df..ff25c91 100644
--- a/libavcodec/error_resilience.c
+++ b/libavcodec/error_resilience.c
@@ -29,58 +29,21 @@
 
 #include "avcodec.h"
 #include "dsputil.h"
+#include "error_resilience.h"
 #include "mpegvideo.h"
-#include "h264.h"
 #include "rectangle.h"
 #include "thread.h"
 
-/*
- * H264 redefines mb_intra so it is not mistakely used (its uninitialized in h264)
- * but error concealment must support both h264 and h263 thus we must undo this
- */
-#undef mb_intra
-
-static void decode_mb(MpegEncContext *s, int ref)
-{
-    s->dest[0] = s->current_picture.f.data[0] + (s->mb_y *  16                       * s->linesize)   + s->mb_x *  16;
-    s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
-    s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
-
-    if (CONFIG_H264_DECODER && s->codec_id == AV_CODEC_ID_H264) {
-        H264Context *h = (void*)s;
-        h->mb_xy = s->mb_x + s->mb_y * s->mb_stride;
-        memset(h->non_zero_count_cache, 0, sizeof(h->non_zero_count_cache));
-        assert(ref >= 0);
-        /* FIXME: It is possible albeit uncommon that slice references
-         * differ between slices. We take the easy approach and ignore
-         * it for now. If this turns out to have any relevance in
-         * practice then correct remapping should be added. */
-        if (ref >= h->ref_count[0])
-            ref = 0;
-        fill_rectangle(&s->current_picture.f.ref_index[0][4 * h->mb_xy],
-                       2, 2, 2, ref, 1);
-        fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
-        fill_rectangle(h->mv_cache[0][scan8[0]], 4, 4, 8,
-                       pack16to32(s->mv[0][0][0], s->mv[0][0][1]), 4);
-        assert(!FRAME_MBAFF);
-        ff_h264_hl_decode_mb(h);
-    } else {
-        assert(ref == 0);
-        ff_MPV_decode_mb(s, s->block);
-    }
-}
-
 /**
  * @param stride the number of MVs to get to the next row
  * @param mv_step the number of MVs per row or column in a macroblock
  */
-static void set_mv_strides(MpegEncContext *s, int *mv_step, int *stride)
+static void set_mv_strides(ERContext *s, int *mv_step, int *stride)
 {
-    if (s->codec_id == AV_CODEC_ID_H264) {
-        H264Context *h = (void*)s;
+    if (s->avctx->codec_id == AV_CODEC_ID_H264) {
         assert(s->quarter_sample);
         *mv_step = 4;
-        *stride  = h->b_stride;
+        *stride  = s->mb_width * 4;
     } else {
         *mv_step = 2;
         *stride  = s->b8_stride;
@@ -90,9 +53,10 @@ static void set_mv_strides(MpegEncContext *s, int *mv_step, int *stride)
 /**
  * Replace the current MB with a flat dc-only version.
  */
-static void put_dc(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb,
+static void put_dc(ERContext *s, uint8_t *dest_y, uint8_t *dest_cb,
                    uint8_t *dest_cr, int mb_x, int mb_y)
 {
+    int *linesize = s->cur_pic->f.linesize;
     int dc, dcu, dcv, y, i;
     for (i = 0; i < 4; i++) {
         dc = s->dc_val[0][mb_x * 2 + (i &  1) + (mb_y * 2 + (i >> 1)) * s->b8_stride];
@@ -103,7 +67,7 @@ static void put_dc(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb,
         for (y = 0; y < 8; y++) {
             int x;
             for (x = 0; x < 8; x++)
-                dest_y[x + (i &  1) * 8 + (y + (i >> 1) * 8) * s->linesize] = dc / 8;
+                dest_y[x + (i &  1) * 8 + (y + (i >> 1) * 8) * linesize[0]] = dc / 8;
         }
     }
     dcu = s->dc_val[1][mb_x + mb_y * s->mb_stride];
@@ -119,8 +83,8 @@ static void put_dc(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb,
     for (y = 0; y < 8; y++) {
         int x;
         for (x = 0; x < 8; x++) {
-            dest_cb[x + y * s->uvlinesize] = dcu / 8;
-            dest_cr[x + y * s->uvlinesize] = dcv / 8;
+            dest_cb[x + y * linesize[1]] = dcu / 8;
+            dest_cr[x + y * linesize[2]] = dcv / 8;
         }
     }
 }
@@ -166,7 +130,7 @@ static void filter181(int16_t *data, int width, int height, int stride)
  * @param w     width in 8 pixel blocks
  * @param h     height in 8 pixel blocks
  */
-static void guess_dc(MpegEncContext *s, int16_t *dc, int w,
+static void guess_dc(ERContext *s, int16_t *dc, int w,
                      int h, int stride, int is_luma)
 {
     int b_x, b_y;
@@ -180,7 +144,7 @@ static void guess_dc(MpegEncContext *s, int16_t *dc, int w,
             mb_index = (b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride;
             error    = s->error_status_table[mb_index];
 
-            if (IS_INTER(s->current_picture.f.mb_type[mb_index]))
+            if (IS_INTER(s->cur_pic->f.mb_type[mb_index]))
                 continue; // inter
             if (!(error & ER_DC_ERROR))
                 continue; // dc-ok
@@ -189,7 +153,7 @@ static void guess_dc(MpegEncContext *s, int16_t *dc, int w,
             for (j = b_x + 1; j < w; j++) {
                 int mb_index_j = (j >> is_luma) + (b_y >> is_luma) * s->mb_stride;
                 int error_j    = s->error_status_table[mb_index_j];
-                int intra_j    = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
+                int intra_j    = IS_INTRA(s->cur_pic->f.mb_type[mb_index_j]);
                 if (intra_j == 0 || !(error_j & ER_DC_ERROR)) {
                     color[0]    = dc[j + b_y * stride];
                     distance[0] = j - b_x;
@@ -201,7 +165,7 @@ static void guess_dc(MpegEncContext *s, int16_t *dc, int w,
             for (j = b_x - 1; j >= 0; j--) {
                 int mb_index_j = (j >> is_luma) + (b_y >> is_luma) * s->mb_stride;
                 int error_j    = s->error_status_table[mb_index_j];
-                int intra_j    = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
+                int intra_j    = IS_INTRA(s->cur_pic->f.mb_type[mb_index_j]);
                 if (intra_j == 0 || !(error_j & ER_DC_ERROR)) {
                     color[1]    = dc[j + b_y * stride];
                     distance[1] = b_x - j;
@@ -213,7 +177,7 @@ static void guess_dc(MpegEncContext *s, int16_t *dc, int w,
             for (j = b_y + 1; j < h; j++) {
                 int mb_index_j = (b_x >> is_luma) + (j >> is_luma) * s->mb_stride;
                 int error_j    = s->error_status_table[mb_index_j];
-                int intra_j    = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
+                int intra_j    = IS_INTRA(s->cur_pic->f.mb_type[mb_index_j]);
 
                 if (intra_j == 0 || !(error_j & ER_DC_ERROR)) {
                     color[2]    = dc[b_x + j * stride];
@@ -226,7 +190,7 @@ static void guess_dc(MpegEncContext *s, int16_t *dc, int w,
             for (j = b_y - 1; j >= 0; j--) {
                 int mb_index_j = (b_x >> is_luma) + (j >> is_luma) * s->mb_stride;
                 int error_j    = s->error_status_table[mb_index_j];
-                int intra_j    = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
+                int intra_j    = IS_INTRA(s->cur_pic->f.mb_type[mb_index_j]);
                 if (intra_j == 0 || !(error_j & ER_DC_ERROR)) {
                     color[3]    = dc[b_x + j * stride];
                     distance[3] = b_y - j;
@@ -252,7 +216,7 @@ static void guess_dc(MpegEncContext *s, int16_t *dc, int w,
  * @param w     width in 8 pixel blocks
  * @param h     height in 8 pixel blocks
  */
-static void h_block_filter(MpegEncContext *s, uint8_t *dst, int w,
+static void h_block_filter(ERContext *s, uint8_t *dst, int w,
                            int h, int stride, int is_luma)
 {
     int b_x, b_y, mvx_stride, mvy_stride;
@@ -266,13 +230,13 @@ static void h_block_filter(MpegEncContext *s, uint8_t *dst, int w,
             int y;
             int left_status  = s->error_status_table[( b_x      >> is_luma) + (b_y >> is_luma) * s->mb_stride];
             int right_status = s->error_status_table[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride];
-            int left_intra   = IS_INTRA(s->current_picture.f.mb_type[( b_x      >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
-            int right_intra  = IS_INTRA(s->current_picture.f.mb_type[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
+            int left_intra   = IS_INTRA(s->cur_pic->f.mb_type[( b_x      >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
+            int right_intra  = IS_INTRA(s->cur_pic->f.mb_type[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
             int left_damage  = left_status & ER_MB_ERROR;
             int right_damage = right_status & ER_MB_ERROR;
             int offset       = b_x * 8 + b_y * stride * 8;
-            int16_t *left_mv  = s->current_picture.f.motion_val[0][mvy_stride * b_y + mvx_stride *  b_x];
-            int16_t *right_mv = s->current_picture.f.motion_val[0][mvy_stride * b_y + mvx_stride * (b_x + 1)];
+            int16_t *left_mv  = s->cur_pic->f.motion_val[0][mvy_stride * b_y + mvx_stride *  b_x];
+            int16_t *right_mv = s->cur_pic->f.motion_val[0][mvy_stride * b_y + mvx_stride * (b_x + 1)];
             if (!(left_damage || right_damage))
                 continue; // both undamaged
             if ((!left_intra) && (!right_intra) &&
@@ -320,7 +284,7 @@ static void h_block_filter(MpegEncContext *s, uint8_t *dst, int w,
  * @param w     width in 8 pixel blocks
  * @param h     height in 8 pixel blocks
  */
-static void v_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h,
+static void v_block_filter(ERContext *s, uint8_t *dst, int w, int h,
                            int stride, int is_luma)
 {
     int b_x, b_y, mvx_stride, mvy_stride;
@@ -334,14 +298,14 @@ static void v_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h,
             int x;
             int top_status    = s->error_status_table[(b_x >> is_luma) +  (b_y      >> is_luma) * s->mb_stride];
             int bottom_status = s->error_status_table[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride];
-            int top_intra     = IS_INTRA(s->current_picture.f.mb_type[(b_x >> is_luma) + ( b_y      >> is_luma) * s->mb_stride]);
-            int bottom_intra  = IS_INTRA(s->current_picture.f.mb_type[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]);
+            int top_intra     = IS_INTRA(s->cur_pic->f.mb_type[(b_x >> is_luma) + ( b_y      >> is_luma) * s->mb_stride]);
+            int bottom_intra  = IS_INTRA(s->cur_pic->f.mb_type[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]);
             int top_damage    = top_status & ER_MB_ERROR;
             int bottom_damage = bottom_status & ER_MB_ERROR;
             int offset        = b_x * 8 + b_y * stride * 8;
 
-            int16_t *top_mv    = s->current_picture.f.motion_val[0][mvy_stride *  b_y      + mvx_stride * b_x];
-            int16_t *bottom_mv = s->current_picture.f.motion_val[0][mvy_stride * (b_y + 1) + mvx_stride * b_x];
+            int16_t *top_mv    = s->cur_pic->f.motion_val[0][mvy_stride *  b_y      + mvx_stride * b_x];
+            int16_t *bottom_mv = s->cur_pic->f.motion_val[0][mvy_stride * (b_y + 1) + mvx_stride * b_x];
 
             if (!(top_damage || bottom_damage))
                 continue; // both undamaged
@@ -386,7 +350,7 @@ static void v_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h,
     }
 }
 
-static void guess_mv(MpegEncContext *s)
+static void guess_mv(ERContext *s)
 {
     uint8_t *fixed = s->er_temp_buffer;
 #define MV_FROZEN    3
@@ -406,7 +370,7 @@ static void guess_mv(MpegEncContext *s)
         int f = 0;
         int error = s->error_status_table[mb_xy];
 
-        if (IS_INTRA(s->current_picture.f.mb_type[mb_xy]))
+        if (IS_INTRA(s->cur_pic->f.mb_type[mb_xy]))
             f = MV_FROZEN; // intra // FIXME check
         if (!(error & ER_MV_ERROR))
             f = MV_FROZEN; // inter with undamaged MV
@@ -419,32 +383,19 @@ static void guess_mv(MpegEncContext *s)
     if ((!(s->avctx->error_concealment&FF_EC_GUESS_MVS)) ||
         num_avail <= mb_width / 2) {
         for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
-            s->mb_x = 0;
-            s->mb_y = mb_y;
-            ff_init_block_index(s);
             for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
                 const int mb_xy = mb_x + mb_y * s->mb_stride;
+                int mv_dir = (s->last_pic && s->last_pic->f.data[0]) ? MV_DIR_FORWARD : MV_DIR_BACKWARD;
 
-                ff_update_block_index(s);
-
-                if (IS_INTRA(s->current_picture.f.mb_type[mb_xy]))
+                if (IS_INTRA(s->cur_pic->f.mb_type[mb_xy]))
                     continue;
                 if (!(s->error_status_table[mb_xy] & ER_MV_ERROR))
                     continue;
 
-                s->mv_dir     = s->last_picture.f.data[0] ? MV_DIR_FORWARD
-                                                          : MV_DIR_BACKWARD;
-                s->mb_intra   = 0;
-                s->mv_type    = MV_TYPE_16X16;
-                s->mb_skipped = 0;
-
-                s->dsp.clear_blocks(s->block[0]);
-
-                s->mb_x        = mb_x;
-                s->mb_y        = mb_y;
                 s->mv[0][0][0] = 0;
                 s->mv[0][0][1] = 0;
-                decode_mb(s, 0);
+                s->decode_mb(s->opaque, 0, mv_dir, MV_TYPE_16X16, &s->mv,
+                             mb_x, mb_y, 0, 0);
             }
         }
         return;
@@ -461,9 +412,6 @@ static void guess_mv(MpegEncContext *s)
 
             changed = 0;
             for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
-                s->mb_x = 0;
-                s->mb_y = mb_y;
-                ff_init_block_index(s);
                 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
                     const int mb_xy        = mb_x + mb_y * s->mb_stride;
                     int mv_predictor[8][2] = { { 0 } };
@@ -475,15 +423,13 @@ static void guess_mv(MpegEncContext *s)
                     const int mot_index    = (mb_x + mb_y * mot_stride) * mot_step;
                     int prev_x, prev_y, prev_ref;
 
-                    ff_update_block_index(s);
-
                     if ((mb_x ^ mb_y ^ pass) & 1)
                         continue;
 
                     if (fixed[mb_xy] == MV_FROZEN)
                         continue;
-                    assert(!IS_INTRA(s->current_picture.f.mb_type[mb_xy]));
-                    assert(s->last_picture_ptr && s->last_picture_ptr->f.data[0]);
+                    assert(!IS_INTRA(s->cur_pic->f.mb_type[mb_xy]));
+                    assert(s->last_pic && s->last_pic->f.data[0]);
 
                     j = 0;
                     if (mb_x > 0             && fixed[mb_xy - 1]         == MV_FROZEN)
@@ -513,38 +459,38 @@ static void guess_mv(MpegEncContext *s)
 
                     if (mb_x > 0 && fixed[mb_xy - 1]) {
                         mv_predictor[pred_count][0] =
-                            s->current_picture.f.motion_val[0][mot_index - mot_step][0];
+                            s->cur_pic->f.motion_val[0][mot_index - mot_step][0];
                         mv_predictor[pred_count][1] =
-                            s->current_picture.f.motion_val[0][mot_index - mot_step][1];
+                            s->cur_pic->f.motion_val[0][mot_index - mot_step][1];
                         ref[pred_count] =
-                            s->current_picture.f.ref_index[0][4 * (mb_xy - 1)];
+                            s->cur_pic->f.ref_index[0][4 * (mb_xy - 1)];
                         pred_count++;
                     }
                     if (mb_x + 1 < mb_width && fixed[mb_xy + 1]) {
                         mv_predictor[pred_count][0] =
-                            s->current_picture.f.motion_val[0][mot_index + mot_step][0];
+                            s->cur_pic->f.motion_val[0][mot_index + mot_step][0];
                         mv_predictor[pred_count][1] =
-                            s->current_picture.f.motion_val[0][mot_index + mot_step][1];
+                            s->cur_pic->f.motion_val[0][mot_index + mot_step][1];
                         ref[pred_count] =
-                            s->current_picture.f.ref_index[0][4 * (mb_xy + 1)];
+                            s->cur_pic->f.ref_index[0][4 * (mb_xy + 1)];
                         pred_count++;
                     }
                     if (mb_y > 0 && fixed[mb_xy - mb_stride]) {
                         mv_predictor[pred_count][0] =
-                            s->current_picture.f.motion_val[0][mot_index - mot_stride * mot_step][0];
+                            s->cur_pic->f.motion_val[0][mot_index - mot_stride * mot_step][0];
                         mv_predictor[pred_count][1] =
-                            s->current_picture.f.motion_val[0][mot_index - mot_stride * mot_step][1];
+                            s->cur_pic->f.motion_val[0][mot_index - mot_stride * mot_step][1];
                         ref[pred_count] =
-                            s->current_picture.f.ref_index[0][4 * (mb_xy - s->mb_stride)];
+                            s->cur_pic->f.ref_index[0][4 * (mb_xy - s->mb_stride)];
                         pred_count++;
                     }
                     if (mb_y + 1<mb_height && fixed[mb_xy + mb_stride]) {
                         mv_predictor[pred_count][0] =
-                            s->current_picture.f.motion_val[0][mot_index + mot_stride * mot_step][0];
+                            s->cur_pic->f.motion_val[0][mot_index + mot_stride * mot_step][0];
                         mv_predictor[pred_count][1] =
-                            s->current_picture.f.motion_val[0][mot_index + mot_stride * mot_step][1];
+                            s->cur_pic->f.motion_val[0][mot_index + mot_stride * mot_step][1];
                         ref[pred_count] =
-                            s->current_picture.f.ref_index[0][4 * (mb_xy + s->mb_stride)];
+                            s->cur_pic->f.ref_index[0][4 * (mb_xy + s->mb_stride)];
                         pred_count++;
                     }
                     if (pred_count == 0)
@@ -602,19 +548,19 @@ skip_mean_and_median:
                         if (s->avctx->codec_id == AV_CODEC_ID_H264) {
                             // FIXME
                         } else {
-                            ff_thread_await_progress(&s->last_picture_ptr->f,
+                            ff_thread_await_progress(&s->last_pic->f,
                                                      mb_y, 0);
                         }
-                        if (!s->last_picture.f.motion_val[0] ||
-                            !s->last_picture.f.ref_index[0])
+                        if (!s->last_pic->f.motion_val[0] ||
+                            !s->last_pic->f.ref_index[0])
                             goto skip_last_mv;
-                        prev_x   = s->last_picture.f.motion_val[0][mot_index][0];
-                        prev_y   = s->last_picture.f.motion_val[0][mot_index][1];
-                        prev_ref = s->last_picture.f.ref_index[0][4 * mb_xy];
+                        prev_x   = s->last_pic->f.motion_val[0][mot_index][0];
+                        prev_y   = s->last_pic->f.motion_val[0][mot_index][1];
+                        prev_ref = s->last_pic->f.ref_index[0][4 * mb_xy];
                     } else {
-                        prev_x   = s->current_picture.f.motion_val[0][mot_index][0];
-                        prev_y   = s->current_picture.f.motion_val[0][mot_index][1];
-                        prev_ref = s->current_picture.f.ref_index[0][4 * mb_xy];
+                        prev_x   = s->cur_pic->f.motion_val[0][mot_index][0];
+                        prev_y   = s->cur_pic->f.motion_val[0][mot_index][1];
+                        prev_ref = s->cur_pic->f.ref_index[0][4 * mb_xy];
                     }
 
                     /* last MV */
@@ -624,54 +570,47 @@ skip_mean_and_median:
                     pred_count++;
 
 skip_last_mv:
-                    s->mv_dir     = MV_DIR_FORWARD;
-                    s->mb_intra   = 0;
-                    s->mv_type    = MV_TYPE_16X16;
-                    s->mb_skipped = 0;
-
-                    s->dsp.clear_blocks(s->block[0]);
-
-                    s->mb_x = mb_x;
-                    s->mb_y = mb_y;
 
                     for (j = 0; j < pred_count; j++) {
+                        int *linesize = s->cur_pic->f.linesize;
                         int score = 0;
-                        uint8_t *src = s->current_picture.f.data[0] +
-                                       mb_x * 16 + mb_y * 16 * s->linesize;
+                        uint8_t *src = s->cur_pic->f.data[0] +
+                                       mb_x * 16 + mb_y * 16 * linesize[0];
 
-                        s->current_picture.f.motion_val[0][mot_index][0] =
+                        s->cur_pic->f.motion_val[0][mot_index][0] =
                             s->mv[0][0][0] = mv_predictor[j][0];
-                        s->current_picture.f.motion_val[0][mot_index][1] =
+                        s->cur_pic->f.motion_val[0][mot_index][1] =
                             s->mv[0][0][1] = mv_predictor[j][1];
 
                         // predictor intra or otherwise not available
                         if (ref[j] < 0)
                             continue;
 
-                        decode_mb(s, ref[j]);
+                        s->decode_mb(s->opaque, ref[j], MV_DIR_FORWARD,
+                                     MV_TYPE_16X16, &s->mv, mb_x, mb_y, 0, 0);
 
                         if (mb_x > 0 && fixed[mb_xy - 1]) {
                             int k;
                             for (k = 0; k < 16; k++)
-                                score += FFABS(src[k * s->linesize - 1] -
-                                               src[k * s->linesize]);
+                                score += FFABS(src[k * linesize[0] - 1] -
+                                               src[k * linesize[0]]);
                         }
                         if (mb_x + 1 < mb_width && fixed[mb_xy + 1]) {
                             int k;
                             for (k = 0; k < 16; k++)
-                                score += FFABS(src[k * s->linesize + 15] -
-                                               src[k * s->linesize + 16]);
+                                score += FFABS(src[k * linesize[0] + 15] -
+                                               src[k * linesize[0] + 16]);
                         }
                         if (mb_y > 0 && fixed[mb_xy - mb_stride]) {
                             int k;
                             for (k = 0; k < 16; k++)
-                                score += FFABS(src[k - s->linesize] - src[k]);
+                                score += FFABS(src[k - linesize[0]] - src[k]);
                         }
                         if (mb_y + 1 < mb_height && fixed[mb_xy + mb_stride]) {
                             int k;
                             for (k = 0; k < 16; k++)
-                                score += FFABS(src[k + s->linesize * 15] -
-                                               src[k + s->linesize * 16]);
+                                score += FFABS(src[k + linesize[0] * 15] -
+                                               src[k + linesize[0] * 16]);
                         }
 
                         if (score <= best_score) { // <= will favor the last MV
@@ -685,11 +624,12 @@ skip_last_mv:
 
                     for (i = 0; i < mot_step; i++)
                         for (j = 0; j < mot_step; j++) {
-                            s->current_picture.f.motion_val[0][mot_index + i + j * mot_stride][0] = s->mv[0][0][0];
-                            s->current_picture.f.motion_val[0][mot_index + i + j * mot_stride][1] = s->mv[0][0][1];
+                            s->cur_pic->f.motion_val[0][mot_index + i + j * mot_stride][0] = s->mv[0][0][0];
+                            s->cur_pic->f.motion_val[0][mot_index + i + j * mot_stride][1] = s->mv[0][0][1];
                         }
 
-                    decode_mb(s, ref[best_pred]);
+                    s->decode_mb(s->opaque, ref[best_pred], MV_DIR_FORWARD,
+                                 MV_TYPE_16X16, &s->mv, mb_x, mb_y, 0, 0);
 
 
                     if (s->mv[0][0][0] != prev_x || s->mv[0][0][1] != prev_y) {
@@ -712,11 +652,11 @@ skip_last_mv:
     }
 }
 
-static int is_intra_more_likely(MpegEncContext *s)
+static int is_intra_more_likely(ERContext *s)
 {
     int is_intra_likely, i, j, undamaged_count, skip_amount, mb_x, mb_y;
 
-    if (!s->last_picture_ptr || !s->last_picture_ptr->f.data[0])
+    if (!s->last_pic || !s->last_pic->f.data[0])
         return 1; // no previous frame available -> use spatial prediction
 
     undamaged_count = 0;
@@ -727,12 +667,8 @@ static int is_intra_more_likely(MpegEncContext *s)
             undamaged_count++;
     }
 
-    if (s->codec_id == AV_CODEC_ID_H264) {
-        H264Context *h = (void*) s;
-        if (h->list_count <= 0 || h->ref_count[0] <= 0 ||
-            !h->ref_list[0][0].f.data[0])
-            return 1;
-    }
+    if (s->avctx->codec_id == AV_CODEC_ID_H264 && s->ref_count <= 0)
+        return 1;
 
     if (undamaged_count < 5)
         return 0; // almost all MBs damaged -> use temporal prediction
@@ -740,7 +676,7 @@ static int is_intra_more_likely(MpegEncContext *s)
     // prevent dsp.sad() check, that requires access to the image
     if (CONFIG_MPEG_XVMC_DECODER    &&
         s->avctx->xvmc_acceleration &&
-        s->pict_type == AV_PICTURE_TYPE_I)
+        s->cur_pic->f.pict_type == AV_PICTURE_TYPE_I)
         return 1;
 
     skip_amount     = FFMAX(undamaged_count / 50, 1); // check only up to 50 MBs
@@ -761,25 +697,25 @@ static int is_intra_more_likely(MpegEncContext *s)
             if ((j % skip_amount) != 0)
                 continue;
 
-            if (s->pict_type == AV_PICTURE_TYPE_I) {
-                uint8_t *mb_ptr      = s->current_picture.f.data[0] +
-                                       mb_x * 16 + mb_y * 16 * s->linesize;
-                uint8_t *last_mb_ptr = s->last_picture.f.data[0] +
-                                       mb_x * 16 + mb_y * 16 * s->linesize;
+            if (s->cur_pic->f.pict_type == AV_PICTURE_TYPE_I) {
+                int *linesize = s->cur_pic->f.linesize;
+                uint8_t *mb_ptr      = s->cur_pic->f.data[0] +
+                                       mb_x * 16 + mb_y * 16 * linesize[0];
+                uint8_t *last_mb_ptr = s->last_pic->f.data[0] +
+                                       mb_x * 16 + mb_y * 16 * linesize[0];
 
                 if (s->avctx->codec_id == AV_CODEC_ID_H264) {
                     // FIXME
                 } else {
-                    ff_thread_await_progress(&s->last_picture_ptr->f,
-                                             mb_y, 0);
+                    ff_thread_await_progress(&s->last_pic->f, mb_y, 0);
                 }
-                is_intra_likely += s->dsp.sad[0](NULL, last_mb_ptr, mb_ptr,
-                                                 s->linesize, 16);
-                is_intra_likely -= s->dsp.sad[0](NULL, last_mb_ptr,
-                                                 last_mb_ptr + s->linesize * 16,
-                                                 s->linesize, 16);
+                is_intra_likely += s->dsp->sad[0](NULL, last_mb_ptr, mb_ptr,
+                                                 linesize[0], 16);
+                is_intra_likely -= s->dsp->sad[0](NULL, last_mb_ptr,
+                                                 last_mb_ptr + linesize[0] * 16,
+                                                 linesize[0], 16);
             } else {
-                if (IS_INTRA(s->current_picture.f.mb_type[mb_xy]))
+                if (IS_INTRA(s->cur_pic->f.mb_type[mb_xy]))
                    is_intra_likely++;
                 else
                    is_intra_likely--;
@@ -789,9 +725,9 @@ static int is_intra_more_likely(MpegEncContext *s)
     return is_intra_likely > 0;
 }
 
-void ff_er_frame_start(MpegEncContext *s)
+void ff_er_frame_start(ERContext *s)
 {
-    if (!s->err_recognition)
+    if (!s->avctx->err_recognition)
         return;
 
     memset(s->error_status_table, ER_MB_ERROR | VP_START | ER_MB_END,
@@ -807,7 +743,7 @@ void ff_er_frame_start(MpegEncContext *s)
  * @param status the status at the end (ER_MV_END, ER_AC_ERROR, ...), it is
  *               assumed that no earlier end or error of the same type occurred
  */
-void ff_er_add_slice(MpegEncContext *s, int startx, int starty,
+void ff_er_add_slice(ERContext *s, int startx, int starty,
                      int endx, int endy, int status)
 {
     const int start_i  = av_clip(startx + starty * s->mb_width, 0, s->mb_num - 1);
@@ -825,7 +761,7 @@ void ff_er_add_slice(MpegEncContext *s, int startx, int starty,
         return;
     }
 
-    if (!s->err_recognition)
+    if (!s->avctx->err_recognition)
         return;
 
     mask &= ~VP_START;
@@ -875,37 +811,36 @@ void ff_er_add_slice(MpegEncContext *s, int startx, int starty,
     }
 }
 
-void ff_er_frame_end(MpegEncContext *s)
+void ff_er_frame_end(ERContext *s)
 {
+    int *linesize = s->cur_pic->f.linesize;
     int i, mb_x, mb_y, error, error_type, dc_error, mv_error, ac_error;
     int distance;
     int threshold_part[4] = { 100, 100, 100 };
     int threshold = 50;
     int is_intra_likely;
     int size = s->b8_stride * 2 * s->mb_height;
-    Picture *pic = s->current_picture_ptr;
 
     /* We do not support ER of field pictures yet,
      * though it should not crash if enabled. */
-    if (!s->err_recognition || s->error_count == 0                     ||
+    if (!s->avctx->err_recognition || s->error_count == 0              ||
         s->avctx->hwaccel                                              ||
         s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU          ||
-        s->picture_structure != PICT_FRAME                             ||
+        !s->cur_pic || s->cur_pic->field_picture                               ||
         s->error_count == 3 * s->mb_width *
                           (s->avctx->skip_top + s->avctx->skip_bottom)) {
         return;
     };
 
-    if (s->current_picture.f.motion_val[0] == NULL) {
+    if (s->cur_pic->f.motion_val[0] == NULL) {
         av_log(s->avctx, AV_LOG_ERROR, "Warning MVs not available\n");
 
         for (i = 0; i < 2; i++) {
-            pic->f.ref_index[i]     = av_mallocz(s->mb_stride * s->mb_height * 4 * sizeof(uint8_t));
-            pic->motion_val_base[i] = av_mallocz((size + 4) * 2 * sizeof(uint16_t));
-            pic->f.motion_val[i]    = pic->motion_val_base[i] + 4;
+            s->cur_pic->f.ref_index[i]     = av_mallocz(s->mb_stride * s->mb_height * 4 * sizeof(uint8_t));
+            s->cur_pic->motion_val_base[i] = av_mallocz((size + 4) * 2 * sizeof(uint16_t));
+            s->cur_pic->f.motion_val[i]    = s->cur_pic->motion_val_base[i] + 4;
         }
-        pic->f.motion_subsample_log2 = 3;
-        s->current_picture = *s->current_picture_ptr;
+        s->cur_pic->f.motion_subsample_log2 = 3;
     }
 
     if (s->avctx->debug & FF_DEBUG_ER) {
@@ -964,7 +899,7 @@ void ff_er_frame_end(MpegEncContext *s)
     }
 
     /* handle missing slices */
-    if (s->err_recognition & AV_EF_EXPLODE) {
+    if (s->avctx->err_recognition & AV_EF_EXPLODE) {
         int end_ok = 1;
 
         // FIXME + 100 hack
@@ -1063,30 +998,28 @@ void ff_er_frame_end(MpegEncContext *s)
             continue;
 
         if (is_intra_likely)
-            s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
+            s->cur_pic->f.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
         else
-            s->current_picture.f.mb_type[mb_xy] = MB_TYPE_16x16 | MB_TYPE_L0;
+            s->cur_pic->f.mb_type[mb_xy] = MB_TYPE_16x16 | MB_TYPE_L0;
     }
 
     // change inter to intra blocks if no reference frames are available
-    if (!s->last_picture.f.data[0] && !s->next_picture.f.data[0])
+    if (!(s->last_pic && s->last_pic->f.data[0]) &&
+        !(s->next_pic && s->next_pic->f.data[0]))
         for (i = 0; i < s->mb_num; i++) {
             const int mb_xy = s->mb_index2xy[i];
-            if (!IS_INTRA(s->current_picture.f.mb_type[mb_xy]))
-                s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
+            if (!IS_INTRA(s->cur_pic->f.mb_type[mb_xy]))
+                s->cur_pic->f.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
         }
 
     /* handle inter blocks with damaged AC */
     for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
-        s->mb_x = 0;
-        s->mb_y = mb_y;
-        ff_init_block_index(s);
         for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
             const int mb_xy   = mb_x + mb_y * s->mb_stride;
-            const int mb_type = s->current_picture.f.mb_type[mb_xy];
-            int dir           = !s->last_picture.f.data[0];
-
-            ff_update_block_index(s);
+            const int mb_type = s->cur_pic->f.mb_type[mb_xy];
+            const int dir     = !(s->last_pic && s->last_pic->f.data[0]);
+            const int mv_dir  = dir ? MV_DIR_BACKWARD : MV_DIR_FORWARD;
+            int mv_type;
 
             error = s->error_status_table[mb_xy];
 
@@ -1097,43 +1030,33 @@ void ff_er_frame_end(MpegEncContext *s)
             if (!(error & ER_AC_ERROR))
                 continue; // undamaged inter
 
-            s->mv_dir     = dir ? MV_DIR_BACKWARD : MV_DIR_FORWARD;
-            s->mb_intra   = 0;
-            s->mb_skipped = 0;
             if (IS_8X8(mb_type)) {
                 int mb_index = mb_x * 2 + mb_y * 2 * s->b8_stride;
                 int j;
-                s->mv_type = MV_TYPE_8X8;
+                mv_type = MV_TYPE_8X8;
                 for (j = 0; j < 4; j++) {
-                    s->mv[0][j][0] = s->current_picture.f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][0];
-                    s->mv[0][j][1] = s->current_picture.f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][1];
+                    s->mv[0][j][0] = s->cur_pic->f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][0];
+                    s->mv[0][j][1] = s->cur_pic->f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][1];
                 }
             } else {
-                s->mv_type     = MV_TYPE_16X16;
-                s->mv[0][0][0] = s->current_picture.f.motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][0];
-                s->mv[0][0][1] = s->current_picture.f.motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][1];
+                mv_type     = MV_TYPE_16X16;
+                s->mv[0][0][0] = s->cur_pic->f.motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][0];
+                s->mv[0][0][1] = s->cur_pic->f.motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][1];
             }
 
-            s->dsp.clear_blocks(s->block[0]);
-
-            s->mb_x = mb_x;
-            s->mb_y = mb_y;
-            decode_mb(s, 0 /* FIXME h264 partitioned slices need this set */);
+            s->decode_mb(s->opaque, 0 /* FIXME h264 partitioned slices need this set */,
+                         mv_dir, mv_type, &s->mv, mb_x, mb_y, 0, 0);
         }
     }
 
     /* guess MVs */
-    if (s->pict_type == AV_PICTURE_TYPE_B) {
+    if (s->cur_pic->f.pict_type == AV_PICTURE_TYPE_B) {
         for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
-            s->mb_x = 0;
-            s->mb_y = mb_y;
-            ff_init_block_index(s);
             for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
                 int       xy      = mb_x * 2 + mb_y * 2 * s->b8_stride;
                 const int mb_xy   = mb_x + mb_y * s->mb_stride;
-                const int mb_type = s->current_picture.f.mb_type[mb_xy];
-
-                ff_update_block_index(s);
+                const int mb_type = s->cur_pic->f.mb_type[mb_xy];
+                int mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
 
                 error = s->error_status_table[mb_xy];
 
@@ -1144,24 +1067,21 @@ void ff_er_frame_end(MpegEncContext *s)
                 if (!(error & ER_AC_ERROR))
                     continue; // undamaged inter
 
-                s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
-                if (!s->last_picture.f.data[0])
-                    s->mv_dir &= ~MV_DIR_FORWARD;
-                if (!s->next_picture.f.data[0])
-                    s->mv_dir &= ~MV_DIR_BACKWARD;
-                s->mb_intra   = 0;
-                s->mv_type    = MV_TYPE_16X16;
-                s->mb_skipped = 0;
+                if (!(s->last_pic && s->last_pic->f.data[0]))
+                    mv_dir &= ~MV_DIR_FORWARD;
+                if (!(s->next_pic && s->next_pic->f.data[0]))
+                    mv_dir &= ~MV_DIR_BACKWARD;
 
                 if (s->pp_time) {
                     int time_pp = s->pp_time;
                     int time_pb = s->pb_time;
 
-                    ff_thread_await_progress(&s->next_picture_ptr->f, mb_y, 0);
-                    s->mv[0][0][0] = s->next_picture.f.motion_val[0][xy][0] *  time_pb            / time_pp;
-                    s->mv[0][0][1] = s->next_picture.f.motion_val[0][xy][1] *  time_pb            / time_pp;
-                    s->mv[1][0][0] = s->next_picture.f.motion_val[0][xy][0] * (time_pb - time_pp) / time_pp;
-                    s->mv[1][0][1] = s->next_picture.f.motion_val[0][xy][1] * (time_pb - time_pp) / time_pp;
+                    ff_thread_await_progress(&s->next_pic->f, mb_y, 0);
+
+                    s->mv[0][0][0] = s->next_pic->f.motion_val[0][xy][0] *  time_pb            / time_pp;
+                    s->mv[0][0][1] = s->next_pic->f.motion_val[0][xy][1] *  time_pb            / time_pp;
+                    s->mv[1][0][0] = s->next_pic->f.motion_val[0][xy][0] * (time_pb - time_pp) / time_pp;
+                    s->mv[1][0][1] = s->next_pic->f.motion_val[0][xy][1] * (time_pb - time_pp) / time_pp;
                 } else {
                     s->mv[0][0][0] = 0;
                     s->mv[0][0][1] = 0;
@@ -1169,10 +1089,8 @@ void ff_er_frame_end(MpegEncContext *s)
                     s->mv[1][0][1] = 0;
                 }
 
-                s->dsp.clear_blocks(s->block[0]);
-                s->mb_x = mb_x;
-                s->mb_y = mb_y;
-                decode_mb(s, 0);
+                s->decode_mb(s->opaque, 0, mv_dir, MV_TYPE_16X16, &s->mv,
+                             mb_x, mb_y, 0, 0);
             }
         }
     } else
@@ -1188,7 +1106,7 @@ void ff_er_frame_end(MpegEncContext *s)
             int16_t *dc_ptr;
             uint8_t *dest_y, *dest_cb, *dest_cr;
             const int mb_xy   = mb_x + mb_y * s->mb_stride;
-            const int mb_type = s->current_picture.f.mb_type[mb_xy];
+            const int mb_type = s->cur_pic->f.mb_type[mb_xy];
 
             error = s->error_status_table[mb_xy];
 
@@ -1197,9 +1115,9 @@ void ff_er_frame_end(MpegEncContext *s)
             // if (error & ER_MV_ERROR)
             //     continue; // inter data damaged FIXME is this good?
 
-            dest_y  = s->current_picture.f.data[0] + mb_x * 16 + mb_y * 16 * s->linesize;
-            dest_cb = s->current_picture.f.data[1] + mb_x *  8 + mb_y *  8 * s->uvlinesize;
-            dest_cr = s->current_picture.f.data[2] + mb_x *  8 + mb_y *  8 * s->uvlinesize;
+            dest_y  = s->cur_pic->f.data[0] + mb_x * 16 + mb_y * 16 * linesize[0];
+            dest_cb = s->cur_pic->f.data[1] + mb_x *  8 + mb_y *  8 * linesize[1];
+            dest_cr = s->cur_pic->f.data[2] + mb_x *  8 + mb_y *  8 * linesize[2];
 
             dc_ptr = &s->dc_val[0][mb_x * 2 + mb_y * 2 * s->b8_stride];
             for (n = 0; n < 4; n++) {
@@ -1208,7 +1126,7 @@ void ff_er_frame_end(MpegEncContext *s)
                     int x;
                     for (x = 0; x < 8; x++)
                        dc += dest_y[x + (n & 1) * 8 +
-                             (y + (n >> 1) * 8) * s->linesize];
+                             (y + (n >> 1) * 8) * linesize[0]];
                 }
                 dc_ptr[(n & 1) + (n >> 1) * s->b8_stride] = (dc + 4) >> 3;
             }
@@ -1217,8 +1135,8 @@ void ff_er_frame_end(MpegEncContext *s)
             for (y = 0; y < 8; y++) {
                 int x;
                 for (x = 0; x < 8; x++) {
-                    dcu += dest_cb[x + y * s->uvlinesize];
-                    dcv += dest_cr[x + y * s->uvlinesize];
+                    dcu += dest_cb[x + y * linesize[1]];
+                    dcv += dest_cr[x + y * linesize[2]];
                 }
             }
             s->dc_val[1][mb_x + mb_y * s->mb_stride] = (dcu + 4) >> 3;
@@ -1239,7 +1157,7 @@ void ff_er_frame_end(MpegEncContext *s)
         for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
             uint8_t *dest_y, *dest_cb, *dest_cr;
             const int mb_xy   = mb_x + mb_y * s->mb_stride;
-            const int mb_type = s->current_picture.f.mb_type[mb_xy];
+            const int mb_type = s->cur_pic->f.mb_type[mb_xy];
 
             error = s->error_status_table[mb_xy];
 
@@ -1248,9 +1166,9 @@ void ff_er_frame_end(MpegEncContext *s)
             if (!(error & ER_AC_ERROR))
                 continue; // undamaged
 
-            dest_y  = s->current_picture.f.data[0] + mb_x * 16 + mb_y * 16 * s->linesize;
-            dest_cb = s->current_picture.f.data[1] + mb_x *  8 + mb_y *  8 * s->uvlinesize;
-            dest_cr = s->current_picture.f.data[2] + mb_x *  8 + mb_y *  8 * s->uvlinesize;
+            dest_y  = s->cur_pic->f.data[0] + mb_x * 16 + mb_y * 16 * linesize[0];
+            dest_cb = s->cur_pic->f.data[1] + mb_x *  8 + mb_y *  8 * linesize[1];
+            dest_cr = s->cur_pic->f.data[2] + mb_x *  8 + mb_y *  8 * linesize[2];
 
             put_dc(s, dest_y, dest_cb, dest_cr, mb_x, mb_y);
         }
@@ -1258,20 +1176,20 @@ void ff_er_frame_end(MpegEncContext *s)
 
     if (s->avctx->error_concealment & FF_EC_DEBLOCK) {
         /* filter horizontal block boundaries */
-        h_block_filter(s, s->current_picture.f.data[0], s->mb_width * 2,
-                       s->mb_height * 2, s->linesize, 1);
-        h_block_filter(s, s->current_picture.f.data[1], s->mb_width,
-                       s->mb_height  , s->uvlinesize, 0);
-        h_block_filter(s, s->current_picture.f.data[2], s->mb_width,
-                       s->mb_height  , s->uvlinesize, 0);
+        h_block_filter(s, s->cur_pic->f.data[0], s->mb_width * 2,
+                       s->mb_height * 2, linesize[0], 1);
+        h_block_filter(s, s->cur_pic->f.data[1], s->mb_width,
+                       s->mb_height, linesize[1], 0);
+        h_block_filter(s, s->cur_pic->f.data[2], s->mb_width,
+                       s->mb_height, linesize[2], 0);
 
         /* filter vertical block boundaries */
-        v_block_filter(s, s->current_picture.f.data[0], s->mb_width * 2,
-                       s->mb_height * 2, s->linesize, 1);
-        v_block_filter(s, s->current_picture.f.data[1], s->mb_width,
-                       s->mb_height  , s->uvlinesize, 0);
-        v_block_filter(s, s->current_picture.f.data[2], s->mb_width,
-                       s->mb_height  , s->uvlinesize, 0);
+        v_block_filter(s, s->cur_pic->f.data[0], s->mb_width * 2,
+                       s->mb_height * 2, linesize[0], 1);
+        v_block_filter(s, s->cur_pic->f.data[1], s->mb_width,
+                       s->mb_height, linesize[1], 0);
+        v_block_filter(s, s->cur_pic->f.data[2], s->mb_width,
+                       s->mb_height, linesize[2], 0);
     }
 
 ec_clean:
@@ -1280,10 +1198,13 @@ ec_clean:
         const int mb_xy = s->mb_index2xy[i];
         int       error = s->error_status_table[mb_xy];
 
-        if (s->pict_type != AV_PICTURE_TYPE_B &&
+        if (s->cur_pic->f.pict_type != AV_PICTURE_TYPE_B &&
             (error & (ER_DC_ERROR | ER_MV_ERROR | ER_AC_ERROR))) {
             s->mbskip_table[mb_xy] = 0;
         }
         s->mbintra_table[mb_xy] = 1;
     }
+    s->cur_pic = NULL;
+    s->next_pic    = NULL;
+    s->last_pic    = NULL;
 }
diff --git a/libavcodec/error_resilience.h b/libavcodec/error_resilience.h
new file mode 100644
index 0000000..949c58f
--- /dev/null
+++ b/libavcodec/error_resilience.h
@@ -0,0 +1,74 @@
+/*
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_ERROR_RESILIENCE_H
+#define AVCODEC_ERROR_RESILIENCE_H
+
+///< current MB is the first after a resync marker
+#define VP_START               1
+#define ER_AC_ERROR            2
+#define ER_DC_ERROR            4
+#define ER_MV_ERROR            8
+#define ER_AC_END              16
+#define ER_DC_END              32
+#define ER_MV_END              64
+
+#define ER_MB_ERROR (ER_AC_ERROR|ER_DC_ERROR|ER_MV_ERROR)
+#define ER_MB_END   (ER_AC_END|ER_DC_END|ER_MV_END)
+
+typedef struct ERContext {
+    AVCodecContext *avctx;
+    DSPContext *dsp;
+
+    int *mb_index2xy;
+    int mb_num;
+    int mb_width, mb_height;
+    int mb_stride;
+    int b8_stride;
+
+    int error_count, error_occurred;
+    uint8_t *error_status_table;
+    uint8_t *er_temp_buffer;
+    int16_t *dc_val[3];
+    uint8_t *mbskip_table;
+    uint8_t *mbintra_table;
+    int mv[2][4][2];
+
+    struct Picture *cur_pic;
+    struct Picture *last_pic;
+    struct Picture *next_pic;
+
+    uint16_t pp_time;
+    uint16_t pb_time;
+    int quarter_sample;
+    int partitioned_frame;
+    int ref_count;
+
+    void (*decode_mb)(void *opaque, int ref, int mv_dir, int mv_type,
+                      int (*mv)[2][4][2],
+                      int mb_x, int mb_y, int mb_intra, int mb_skipped);
+    void *opaque;
+} ERContext;
+
+void ff_er_frame_start(ERContext *s);
+void ff_er_frame_end(ERContext *s);
+void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy,
+                     int status);
+
+#endif /* AVCODEC_ERROR_RESILIENCE_H */
diff --git a/libavcodec/h261dec.c b/libavcodec/h261dec.c
index 500f795..9935897 100644
--- a/libavcodec/h261dec.c
+++ b/libavcodec/h261dec.c
@@ -609,7 +609,7 @@ retry:
     if(ff_MPV_frame_start(s, avctx) < 0)
         return -1;
 
-    ff_er_frame_start(s);
+    ff_mpeg_er_frame_start(s);
 
     /* decode each macroblock */
     s->mb_x=0;
diff --git a/libavcodec/h263dec.c b/libavcodec/h263dec.c
index 0eaf4d7..b1a32b2 100644
--- a/libavcodec/h263dec.c
+++ b/libavcodec/h263dec.c
@@ -188,7 +188,7 @@ static int decode_slice(MpegEncContext *s){
         /* per-row end of slice checks */
         if(s->msmpeg4_version){
             if(s->resync_mb_y + s->slice_height == s->mb_y){
-                ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_END);
+                ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_END);
 
                 return 0;
             }
@@ -229,7 +229,7 @@ static int decode_slice(MpegEncContext *s){
                     if(s->loop_filter)
                         ff_h263_loop_filter(s);
 
-                    ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, ER_MB_END&part_mask);
+                    ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, ER_MB_END&part_mask);
 
                     s->padding_bug_score--;
 
@@ -242,11 +242,11 @@ static int decode_slice(MpegEncContext *s){
                     return 0;
                 }else if(ret==SLICE_NOEND){
                     av_log(s->avctx, AV_LOG_ERROR, "Slice mismatch at MB: %d\n", xy);
-                    ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x+1, s->mb_y, ER_MB_END&part_mask);
+                    ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x+1, s->mb_y, ER_MB_END&part_mask);
                     return -1;
                 }
                 av_log(s->avctx, AV_LOG_ERROR, "Error at MB: %d\n", xy);
-                ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR&part_mask);
+                ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR&part_mask);
 
                 return -1;
             }
@@ -325,7 +325,7 @@ static int decode_slice(MpegEncContext *s){
         else if(left<0){
             av_log(s->avctx, AV_LOG_ERROR, "overreading %d bits\n", -left);
         }else
-            ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_END);
+            ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_END);
 
         return 0;
     }
@@ -334,7 +334,7 @@ static int decode_slice(MpegEncContext *s){
             get_bits_left(&s->gb),
             show_bits(&s->gb, 24), s->padding_bug_score);
 
-    ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, ER_MB_END&part_mask);
+    ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, ER_MB_END&part_mask);
 
     return -1;
 }
@@ -638,7 +638,7 @@ retry:
             return -1;
     }
 
-    ff_er_frame_start(s);
+    ff_mpeg_er_frame_start(s);
 
     //the second part of the wmv2 header contains the MB skip bits which are stored in current_picture->mb_type
     //which is not available before ff_MPV_frame_start()
@@ -662,7 +662,7 @@ retry:
             if(ff_h263_resync(s)<0)
                 break;
             if (prev_y * s->mb_width + prev_x < s->mb_y * s->mb_width + s->mb_x)
-                s->error_occurred = 1;
+                s->er.error_occurred = 1;
         }
 
         if(s->msmpeg4_version<4 && s->h263_pred)
@@ -673,7 +673,7 @@ retry:
 
     if (s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type==AV_PICTURE_TYPE_I)
         if(!CONFIG_MSMPEG4_DECODER || ff_msmpeg4_decode_ext_header(s, buf_size) < 0){
-            s->error_status_table[s->mb_num-1]= ER_MB_ERROR;
+            s->er.error_status_table[s->mb_num - 1] = ER_MB_ERROR;
         }
 
     assert(s->bitstream_buffer_size==0);
@@ -710,7 +710,7 @@ frame_end:
     }
 
 intrax8_decoded:
-    ff_er_frame_end(s);
+    ff_er_frame_end(&s->er);
 
     if (avctx->hwaccel) {
         if (avctx->hwaccel->end_frame(avctx) < 0)
diff --git a/libavcodec/h264.c b/libavcodec/h264.c
index 7a2fd69..4410f59 100644
--- a/libavcodec/h264.c
+++ b/libavcodec/h264.c
@@ -78,6 +78,33 @@ static const enum AVPixelFormat hwaccel_pixfmt_list_h264_jpeg_420[] = {
     AV_PIX_FMT_NONE
 };
 
+static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
+                              int (*mv)[2][4][2],
+                              int mb_x, int mb_y, int mb_intra, int mb_skipped)
+{
+    H264Context    *h = opaque;
+    MpegEncContext *s = &h->s;
+
+    s->mb_x  = mb_x;
+    s->mb_y  = mb_y;
+    h->mb_xy = s->mb_x + s->mb_y * s->mb_stride;
+    memset(h->non_zero_count_cache, 0, sizeof(h->non_zero_count_cache));
+    assert(ref >= 0);
+    /* FIXME: It is possible albeit uncommon that slice references
+     * differ between slices. We take the easy approach and ignore
+     * it for now. If this turns out to have any relevance in
+     * practice then correct remapping should be added. */
+    if (ref >= h->ref_count[0])
+        ref = 0;
+    fill_rectangle(&s->current_picture.f.ref_index[0][4 * h->mb_xy],
+                   2, 2, 2, ref, 1);
+    fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
+    fill_rectangle(h->mv_cache[0][scan8[0]], 4, 4, 8,
+                   pack16to32(s->mv[0][0][0], s->mv[0][0][1]), 4);
+    assert(!FRAME_MBAFF);
+    ff_h264_hl_decode_mb(h);
+}
+
 /**
  * Check if the top & left blocks are available if needed and
  * change the dc mode so it only uses the available blocks.
@@ -960,6 +987,9 @@ static int context_init(H264Context *h)
     h->ref_cache[1][scan8[7]  + 1] =
     h->ref_cache[1][scan8[13] + 1] = PART_NOT_AVAILABLE;
 
+    h->s.er.decode_mb = h264_er_decode_mb;
+    h->s.er.opaque    = h;
+
     return 0;
 
 fail:
@@ -1299,7 +1329,7 @@ int ff_h264_frame_start(H264Context *h)
 
     if (ff_MPV_frame_start(s, s->avctx) < 0)
         return -1;
-    ff_er_frame_start(s);
+    ff_mpeg_er_frame_start(s);
     /*
      * ff_MPV_frame_start uses pict_type to derive key_frame.
      * This is incorrect for H.264; IDR markings must be used.
@@ -2340,7 +2370,7 @@ static int field_end(H264Context *h, int in_setup)
      * causes problems for the first MB line, too.
      */
     if (!FIELD_PICTURE)
-        ff_er_frame_end(s);
+        ff_er_frame_end(&s->er);
 
     ff_MPV_frame_end(s);
 
@@ -3068,11 +3098,13 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
     if (h->slice_type_nos != AV_PICTURE_TYPE_I) {
         s->last_picture_ptr = &h->ref_list[0][0];
         s->last_picture_ptr->owner2 = s;
+        s->er.last_pic = s->last_picture_ptr;
         ff_copy_picture(&s->last_picture, s->last_picture_ptr);
     }
     if (h->slice_type_nos == AV_PICTURE_TYPE_B) {
         s->next_picture_ptr = &h->ref_list[1][0];
         s->next_picture_ptr->owner2 = s;
+        s->er.next_pic = s->next_picture_ptr;
         ff_copy_picture(&s->next_picture, s->next_picture_ptr);
     }
 
@@ -3639,6 +3671,15 @@ static void decode_finish_row(H264Context *h)
                               s->picture_structure == PICT_BOTTOM_FIELD);
 }
 
+static void er_add_slice(H264Context *h, int startx, int starty,
+                         int endx, int endy, int status)
+{
+    ERContext *er = &h->s.er;
+
+    er->ref_count = h->ref_count[0];
+    ff_er_add_slice(er, startx, starty, endx, endy, status);
+}
+
 static int decode_slice(struct AVCodecContext *avctx, void *arg)
 {
     H264Context *h = *(void **)arg;
@@ -3686,7 +3727,7 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg)
 
             if ((s->workaround_bugs & FF_BUG_TRUNCATED) &&
                 h->cabac.bytestream > h->cabac.bytestream_end + 2) {
-                ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x - 1,
+                er_add_slice(h, s->resync_mb_x, s->resync_mb_y, s->mb_x - 1,
                                 s->mb_y, ER_MB_END);
                 if (s->mb_x >= lf_x_start)
                     loop_filter(h, lf_x_start, s->mb_x + 1);
@@ -3697,7 +3738,7 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg)
                        "error while decoding MB %d %d, bytestream (%td)\n",
                        s->mb_x, s->mb_y,
                        h->cabac.bytestream_end - h->cabac.bytestream);
-                ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x,
+                er_add_slice(h, s->resync_mb_x, s->resync_mb_y, s->mb_x,
                                 s->mb_y, ER_MB_ERROR);
                 return -1;
             }
@@ -3717,7 +3758,7 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg)
             if (eos || s->mb_y >= s->mb_height) {
                 tprintf(s->avctx, "slice end %d %d\n",
                         get_bits_count(&s->gb), s->gb.size_in_bits);
-                ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x - 1,
+                er_add_slice(h, s->resync_mb_x, s->resync_mb_y, s->mb_x - 1,
                                 s->mb_y, ER_MB_END);
                 if (s->mb_x > lf_x_start)
                     loop_filter(h, lf_x_start, s->mb_x);
@@ -3744,7 +3785,7 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg)
             if (ret < 0) {
                 av_log(h->s.avctx, AV_LOG_ERROR,
                        "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
-                ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x,
+                er_add_slice(h, s->resync_mb_x, s->resync_mb_y, s->mb_x,
                                 s->mb_y, ER_MB_ERROR);
                 return -1;
             }
@@ -3764,13 +3805,13 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg)
                             get_bits_count(&s->gb), s->gb.size_in_bits);
 
                     if (get_bits_left(&s->gb) == 0) {
-                        ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y,
+                        er_add_slice(h, s->resync_mb_x, s->resync_mb_y,
                                         s->mb_x - 1, s->mb_y,
                                         ER_MB_END);
 
                         return 0;
                     } else {
-                        ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y,
+                        er_add_slice(h, s->resync_mb_x, s->resync_mb_y,
                                         s->mb_x - 1, s->mb_y,
                                         ER_MB_END);
 
@@ -3783,7 +3824,7 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg)
                 tprintf(s->avctx, "slice end %d %d\n",
                         get_bits_count(&s->gb), s->gb.size_in_bits);
                 if (get_bits_left(&s->gb) == 0) {
-                    ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y,
+                    er_add_slice(h, s->resync_mb_x, s->resync_mb_y,
                                     s->mb_x - 1, s->mb_y,
                                     ER_MB_END);
                     if (s->mb_x > lf_x_start)
@@ -3791,7 +3832,7 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg)
 
                     return 0;
                 } else {
-                    ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x,
+                    er_add_slice(h, s->resync_mb_x, s->resync_mb_y, s->mb_x,
                                     s->mb_y, ER_MB_ERROR);
 
                     return -1;
@@ -3823,7 +3864,7 @@ static int execute_decode_slices(H264Context *h, int context_count)
         for (i = 1; i < context_count; i++) {
             hx                    = h->thread_context[i];
             hx->s.err_recognition = avctx->err_recognition;
-            hx->s.error_count     = 0;
+            hx->s.er.error_count  = 0;
         }
 
         avctx->execute(avctx, decode_slice, h->thread_context,
@@ -3836,7 +3877,7 @@ static int execute_decode_slices(H264Context *h, int context_count)
         s->droppable         = hx->s.droppable;
         s->picture_structure = hx->s.picture_structure;
         for (i = 1; i < context_count; i++)
-            h->s.error_count += h->thread_context[i]->s.error_count;
+            h->s.er.error_count += h->thread_context[i]->s.er.error_count;
     }
 
     return 0;
diff --git a/libavcodec/intrax8.c b/libavcodec/intrax8.c
index fad7ffe..d2457f8 100644
--- a/libavcodec/intrax8.c
+++ b/libavcodec/intrax8.c
@@ -784,7 +784,7 @@ int ff_intrax8_decode_picture(IntraX8Context * const w, int dquant, int quant_of
     }
 
 error:
-    ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y,
+    ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
                         (s->mb_x>>1)-1, (s->mb_y>>1)-1,
                         ER_MB_END );
     return 0;
diff --git a/libavcodec/mpeg12.c b/libavcodec/mpeg12.c
index 1b9a48b..5a53383 100644
--- a/libavcodec/mpeg12.c
+++ b/libavcodec/mpeg12.c
@@ -1565,7 +1565,7 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
         if (ff_MPV_frame_start(s, avctx) < 0)
             return -1;
 
-        ff_er_frame_start(s);
+        ff_mpeg_er_frame_start(s);
 
         /* first check if we must repeat the frame */
         s->current_picture_ptr->f.repeat_pict = 0;
@@ -1856,7 +1856,7 @@ static int slice_decode_thread(AVCodecContext *c, void *arg)
     int mb_y            = s->start_mb_y;
     const int field_pic = s->picture_structure != PICT_FRAME;
 
-    s->error_count = (3 * (s->end_mb_y - s->start_mb_y) * s->mb_width) >> field_pic;
+    s->er.error_count = (3 * (s->end_mb_y - s->start_mb_y) * s->mb_width) >> field_pic;
 
     for (;;) {
         uint32_t start_code;
@@ -1866,14 +1866,14 @@ static int slice_decode_thread(AVCodecContext *c, void *arg)
         emms_c();
         av_dlog(c, "ret:%d resync:%d/%d mb:%d/%d ts:%d/%d ec:%d\n",
                 ret, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y,
-                s->start_mb_y, s->end_mb_y, s->error_count);
+                s->start_mb_y, s->end_mb_y, s->er.error_count);
         if (ret < 0) {
             if (c->err_recognition & AV_EF_EXPLODE)
                 return ret;
             if (s->resync_mb_x >= 0 && s->resync_mb_y >= 0)
-                ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, ER_AC_ERROR | ER_DC_ERROR | ER_MV_ERROR);
+                ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, ER_AC_ERROR | ER_DC_ERROR | ER_MV_ERROR);
         } else {
-            ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_AC_END | ER_DC_END | ER_MV_END);
+            ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_AC_END | ER_DC_END | ER_MV_END);
         }
 
         if (s->mb_y == s->end_mb_y)
@@ -1915,7 +1915,7 @@ static int slice_end(AVCodecContext *avctx, AVFrame *pict)
 
         s->current_picture_ptr->f.qscale_type = FF_QSCALE_TYPE_MPEG2;
 
-        ff_er_frame_end(s);
+        ff_er_frame_end(&s->er);
 
         ff_MPV_frame_end(s);
 
@@ -2210,7 +2210,7 @@ static int decode_chunks(AVCodecContext *avctx,
 
                     avctx->execute(avctx, slice_decode_thread,  &s2->thread_context[0], NULL, s->slice_count, sizeof(void*));
                     for (i = 0; i < s->slice_count; i++)
-                        s2->error_count += s2->thread_context[i]->error_count;
+                        s2->er.error_count += s2->thread_context[i]->er.error_count;
                 }
 
                 if (CONFIG_MPEG_VDPAU_DECODER && avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
@@ -2252,7 +2252,7 @@ static int decode_chunks(AVCodecContext *avctx,
                                s2->thread_context, NULL,
                                s->slice_count, sizeof(void*));
                 for (i = 0; i < s->slice_count; i++)
-                    s2->error_count += s2->thread_context[i]->error_count;
+                    s2->er.error_count += s2->thread_context[i]->er.error_count;
                 s->slice_count = 0;
             }
             if (last_code == 0 || last_code == SLICE_MIN_START_CODE) {
@@ -2411,9 +2411,9 @@ static int decode_chunks(AVCodecContext *avctx,
                         if (avctx->err_recognition & AV_EF_EXPLODE)
                             return ret;
                         if (s2->resync_mb_x >= 0 && s2->resync_mb_y >= 0)
-                            ff_er_add_slice(s2, s2->resync_mb_x, s2->resync_mb_y, s2->mb_x, s2->mb_y, ER_AC_ERROR | ER_DC_ERROR | ER_MV_ERROR);
+                            ff_er_add_slice(&s2->er, s2->resync_mb_x, s2->resync_mb_y, s2->mb_x, s2->mb_y, ER_AC_ERROR | ER_DC_ERROR | ER_MV_ERROR);
                     } else {
-                        ff_er_add_slice(s2, s2->resync_mb_x, s2->resync_mb_y, s2->mb_x-1, s2->mb_y, ER_AC_END | ER_DC_END | ER_MV_END);
+                        ff_er_add_slice(&s2->er, s2->resync_mb_x, s2->resync_mb_y, s2->mb_x-1, s2->mb_y, ER_AC_END | ER_DC_END | ER_MV_END);
                     }
                 }
             }
diff --git a/libavcodec/mpeg4videodec.c b/libavcodec/mpeg4videodec.c
index cab5882..ebc74a7 100644
--- a/libavcodec/mpeg4videodec.c
+++ b/libavcodec/mpeg4videodec.c
@@ -796,13 +796,13 @@ int ff_mpeg4_decode_partitions(MpegEncContext *s)
 
     mb_num= mpeg4_decode_partition_a(s);
     if(mb_num<0){
-        ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, part_a_error);
+        ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, part_a_error);
         return -1;
     }
 
     if(s->resync_mb_x + s->resync_mb_y*s->mb_width + mb_num > s->mb_num){
         av_log(s->avctx, AV_LOG_ERROR, "slice below monitor ...\n");
-        ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, part_a_error);
+        ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, part_a_error);
         return -1;
     }
 
@@ -823,15 +823,15 @@ int ff_mpeg4_decode_partitions(MpegEncContext *s)
             return -1;
         }
     }
-    ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, part_a_end);
+    ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, part_a_end);
 
     if( mpeg4_decode_partition_b(s, mb_num) < 0){
         if(s->pict_type==AV_PICTURE_TYPE_P)
-            ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, ER_DC_ERROR);
+            ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, ER_DC_ERROR);
         return -1;
     }else{
         if(s->pict_type==AV_PICTURE_TYPE_P)
-            ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_DC_END);
+            ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_DC_END);
     }
 
     return 0;
diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c
index 4b68fd5..4396ec2 100644
--- a/libavcodec/mpegvideo.c
+++ b/libavcodec/mpegvideo.c
@@ -147,6 +147,33 @@ const enum AVPixelFormat ff_hwaccel_pixfmt_list_420[] = {
     AV_PIX_FMT_NONE
 };
 
+static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
+                              int (*mv)[2][4][2],
+                              int mb_x, int mb_y, int mb_intra, int mb_skipped)
+{
+    MpegEncContext *s = opaque;
+
+    s->mv_dir     = mv_dir;
+    s->mv_type    = mv_type;
+    s->mb_intra   = mb_intra;
+    s->mb_skipped = mb_skipped;
+    s->mb_x       = mb_x;
+    s->mb_y       = mb_y;
+    memcpy(s->mv, mv, sizeof(*mv));
+
+    ff_init_block_index(s);
+    ff_update_block_index(s);
+
+    s->dsp.clear_blocks(s->block[0]);
+
+    s->dest[0] = s->current_picture.f.data[0] + (s->mb_y *  16                       * s->linesize)   + s->mb_x *  16;
+    s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
+    s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
+
+    assert(ref == 0);
+    ff_MPV_decode_mb(s, s->block);
+}
+
 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
                                           const uint8_t *end,
                                           uint32_t * restrict state)
@@ -723,6 +750,43 @@ void ff_MPV_decode_defaults(MpegEncContext *s)
     ff_MPV_common_defaults(s);
 }
 
+static int init_er(MpegEncContext *s)
+{
+    ERContext *er = &s->er;
+    int mb_array_size = s->mb_height * s->mb_stride;
+    int i;
+
+    er->avctx       = s->avctx;
+    er->dsp         = &s->dsp;
+
+    er->mb_index2xy = s->mb_index2xy;
+    er->mb_num      = s->mb_num;
+    er->mb_width    = s->mb_width;
+    er->mb_height   = s->mb_height;
+    er->mb_stride   = s->mb_stride;
+    er->b8_stride   = s->b8_stride;
+
+    er->er_temp_buffer     = av_malloc(s->mb_height * s->mb_stride);
+    er->error_status_table = av_mallocz(mb_array_size);
+    if (!er->er_temp_buffer || !er->error_status_table)
+        goto fail;
+
+    er->mbskip_table  = s->mbskip_table;
+    er->mbintra_table = s->mbintra_table;
+
+    for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
+        er->dc_val[i] = s->dc_val[i];
+
+    er->decode_mb = mpeg_er_decode_mb;
+    er->opaque    = s;
+
+    return 0;
+fail:
+    av_freep(&er->er_temp_buffer);
+    av_freep(&er->error_status_table);
+    return AVERROR(ENOMEM);
+}
+
 /**
  * Initialize and allocates MpegEncContext fields dependent on the resolution.
  */
@@ -801,11 +865,6 @@ static int init_context_frame(MpegEncContext *s)
 
     }
 
-    FF_ALLOC_OR_GOTO(s->avctx, s->er_temp_buffer,
-                     mb_array_size * sizeof(uint8_t), fail);
-    FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table,
-                      mb_array_size * sizeof(uint8_t), fail);
-
     if (s->codec_id == AV_CODEC_ID_MPEG4 ||
         (s->flags & CODEC_FLAG_INTERLACED_ME)) {
         /* interlaced direct mode decoding tables */
@@ -873,7 +932,7 @@ static int init_context_frame(MpegEncContext *s)
                     2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
     }
 
-    return 0;
+    return init_er(s);
 fail:
     return AVERROR(ENOMEM);
 }
@@ -1054,8 +1113,8 @@ static int free_context_frame(MpegEncContext *s)
 
     av_freep(&s->mbskip_table);
 
-    av_freep(&s->error_status_table);
-    av_freep(&s->er_temp_buffer);
+    av_freep(&s->er.error_status_table);
+    av_freep(&s->er.er_temp_buffer);
     av_freep(&s->mb_index2xy);
     av_freep(&s->lambda_table);
     av_freep(&s->cplx_tab);
@@ -1589,7 +1648,7 @@ void ff_MPV_frame_end(MpegEncContext *s)
     // just to make sure that all data is rendered.
     if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
         ff_xvmc_field_end(s);
-   } else if ((s->error_count || s->encoding) &&
+   } else if ((s->er.error_count || s->encoding) &&
               !s->avctx->hwaccel &&
               !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
               s->unrestricted_mv &&
@@ -2792,6 +2851,22 @@ void ff_set_qscale(MpegEncContext * s, int qscale)
 
 void ff_MPV_report_decode_progress(MpegEncContext *s)
 {
-    if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
+    if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
         ff_thread_report_progress(&s->current_picture_ptr->f, s->mb_y, 0);
 }
+
+void ff_mpeg_er_frame_start(MpegEncContext *s)
+{
+    ERContext *er = &s->er;
+
+    er->cur_pic  = s->current_picture_ptr;
+    er->last_pic = s->last_picture_ptr;
+    er->next_pic = s->next_picture_ptr;
+
+    er->pp_time           = s->pp_time;
+    er->pb_time           = s->pb_time;
+    er->quarter_sample    = s->quarter_sample;
+    er->partitioned_frame = s->partitioned_frame;
+
+    ff_er_frame_start(er);
+}
diff --git a/libavcodec/mpegvideo.h b/libavcodec/mpegvideo.h
index f3f9827..87e9d1b 100644
--- a/libavcodec/mpegvideo.h
+++ b/libavcodec/mpegvideo.h
@@ -30,6 +30,7 @@
 
 #include "avcodec.h"
 #include "dsputil.h"
+#include "error_resilience.h"
 #include "get_bits.h"
 #include "put_bits.h"
 #include "ratecontrol.h"
@@ -484,19 +485,6 @@ typedef struct MpegEncContext {
     int last_bits; ///< temp var used for calculating the above vars
 
     /* error concealment / resync */
-    int error_count, error_occurred;
-    uint8_t *error_status_table;       ///< table of the error status of each MB
-#define VP_START            1          ///< current MB is the first after a resync marker
-#define ER_AC_ERROR            2
-#define ER_DC_ERROR            4
-#define ER_MV_ERROR            8
-#define ER_AC_END              16
-#define ER_DC_END              32
-#define ER_MV_END              64
-
-#define ER_MB_ERROR (ER_AC_ERROR|ER_DC_ERROR|ER_MV_ERROR)
-#define ER_MB_END   (ER_AC_END|ER_DC_END|ER_MV_END)
-
     int resync_mb_x;                 ///< x position of last resync marker
     int resync_mb_y;                 ///< y position of last resync marker
     GetBitContext last_resync_gb;    ///< used to search for the next resync marker
@@ -701,15 +689,14 @@ typedef struct MpegEncContext {
     int mpv_flags;      ///< flags set by private options
     int quantizer_noise_shaping;
 
-    /* error resilience stuff */
-    uint8_t *er_temp_buffer;
-
     /* temp buffers for rate control */
     float *cplx_tab, *bits_tab;
 
     /* flag to indicate a reinitialization is required, e.g. after
      * a frame size change */
     int context_reinit;
+
+    ERContext er;
 } MpegEncContext;
 
 #define REBASE_PICTURE(pic, new_ctx, old_ctx)             \
@@ -787,9 +774,7 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src
 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *p, const uint8_t *end, uint32_t *state);
 void ff_set_qscale(MpegEncContext * s, int qscale);
 
-void ff_er_frame_start(MpegEncContext *s);
-void ff_er_frame_end(MpegEncContext *s);
-void ff_er_add_slice(MpegEncContext *s, int startx, int starty, int endx, int endy, int status);
+void ff_mpeg_er_frame_start(MpegEncContext *s);
 
 int ff_dct_common_init(MpegEncContext *s);
 void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64], uint16_t (*qmat16)[2][64],
diff --git a/libavcodec/mpegvideo_enc.c b/libavcodec/mpegvideo_enc.c
index a99c8de..3ba6f81 100644
--- a/libavcodec/mpegvideo_enc.c
+++ b/libavcodec/mpegvideo_enc.c
@@ -3037,7 +3037,7 @@ static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
     MERGE(b_count);
     MERGE(skip_count);
     MERGE(misc_bits);
-    MERGE(error_count);
+    MERGE(er.error_count);
     MERGE(padding_bug_score);
     MERGE(current_picture.f.error[0]);
     MERGE(current_picture.f.error[1]);
diff --git a/libavcodec/mss2.c b/libavcodec/mss2.c
index 9936272..006432b 100644
--- a/libavcodec/mss2.c
+++ b/libavcodec/mss2.c
@@ -406,7 +406,7 @@ static int decode_wmv9(AVCodecContext *avctx, const uint8_t *buf, int buf_size,
         return ret;
     }
 
-    ff_er_frame_start(s);
+    ff_mpeg_er_frame_start(s);
 
     v->bits = buf_size * 8;
 
@@ -419,7 +419,7 @@ static int decode_wmv9(AVCodecContext *avctx, const uint8_t *buf, int buf_size,
 
     ff_vc1_decode_blocks(v);
 
-    ff_er_frame_end(s);
+    ff_er_frame_end(&s->er);
 
     ff_MPV_frame_end(s);
 
diff --git a/libavcodec/rv10.c b/libavcodec/rv10.c
index 38abf78..9a9da91 100644
--- a/libavcodec/rv10.c
+++ b/libavcodec/rv10.c
@@ -528,13 +528,13 @@ static int rv10_decode_packet(AVCodecContext *avctx,
 
     if ((s->mb_x == 0 && s->mb_y == 0) || s->current_picture_ptr==NULL) {
         if(s->current_picture_ptr){ //FIXME write parser so we always have complete frames?
-            ff_er_frame_end(s);
+            ff_er_frame_end(&s->er);
             ff_MPV_frame_end(s);
             s->mb_x= s->mb_y = s->resync_mb_x = s->resync_mb_y= 0;
         }
         if(ff_MPV_frame_start(s, avctx) < 0)
             return -1;
-        ff_er_frame_start(s);
+        ff_mpeg_er_frame_start(s);
     } else {
         if (s->current_picture_ptr->f.pict_type != s->pict_type) {
             av_log(s->avctx, AV_LOG_ERROR, "Slice type mismatch\n");
@@ -626,7 +626,7 @@ static int rv10_decode_packet(AVCodecContext *avctx,
         if(ret == SLICE_END) break;
     }
 
-    ff_er_add_slice(s, start_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_END);
+    ff_er_add_slice(&s->er, start_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_END);
 
     return active_bits_size;
 }
@@ -697,7 +697,7 @@ static int rv10_decode_frame(AVCodecContext *avctx,
     }
 
     if(s->current_picture_ptr != NULL && s->mb_y>=s->mb_height){
-        ff_er_frame_end(s);
+        ff_er_frame_end(&s->er);
         ff_MPV_frame_end(s);
 
         if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
diff --git a/libavcodec/rv34.c b/libavcodec/rv34.c
index a131c10..34eeb50 100644
--- a/libavcodec/rv34.c
+++ b/libavcodec/rv34.c
@@ -1428,7 +1428,7 @@ static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int
         else
             res = rv34_decode_intra_macroblock(r, r->intra_types + s->mb_x * 4 + 4);
         if(res < 0){
-            ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_ERROR);
+            ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_ERROR);
             return -1;
         }
         if (++s->mb_x == s->mb_width) {
@@ -1451,7 +1451,7 @@ static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int
             s->first_slice_line=0;
         s->mb_num_left--;
     }
-    ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_END);
+    ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_END);
 
     return s->mb_y == s->mb_height;
 }
@@ -1564,7 +1564,7 @@ static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
     MpegEncContext *s = &r->s;
     int got_picture = 0;
 
-    ff_er_frame_end(s);
+    ff_er_frame_end(&s->er);
     ff_MPV_frame_end(s);
     s->mb_num_left = 0;
 
@@ -1646,7 +1646,7 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
         if (s->mb_num_left > 0) {
             av_log(avctx, AV_LOG_ERROR, "New frame but still %d MB left.",
                    s->mb_num_left);
-            ff_er_frame_end(s);
+            ff_er_frame_end(&s->er);
             ff_MPV_frame_end(s);
         }
 
@@ -1667,7 +1667,7 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
         s->pict_type = si.type ? si.type : AV_PICTURE_TYPE_I;
         if (ff_MPV_frame_start(s, s->avctx) < 0)
             return -1;
-        ff_er_frame_start(s);
+        ff_mpeg_er_frame_start(s);
         if (!r->tmp_b_block_base) {
             int i;
 
@@ -1766,7 +1766,7 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
             av_log(avctx, AV_LOG_INFO, "marking unfished frame as finished\n");
             /* always mark the current frame as finished, frame-mt supports
              * only complete frames */
-            ff_er_frame_end(s);
+            ff_er_frame_end(&s->er);
             ff_MPV_frame_end(s);
             s->mb_num_left = 0;
             ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
diff --git a/libavcodec/vc1dec.c b/libavcodec/vc1dec.c
index 38b8216..73c221e 100644
--- a/libavcodec/vc1dec.c
+++ b/libavcodec/vc1dec.c
@@ -4434,7 +4434,7 @@ static void vc1_decode_i_blocks(VC1Context *v)
             if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
 
             if (get_bits_count(&s->gb) > v->bits) {
-                ff_er_add_slice(s, 0, 0, s->mb_x, s->mb_y, ER_MB_ERROR);
+                ff_er_add_slice(&s->er, 0, 0, s->mb_x, s->mb_y, ER_MB_ERROR);
                 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
                        get_bits_count(&s->gb), v->bits);
                 return;
@@ -4452,7 +4452,7 @@ static void vc1_decode_i_blocks(VC1Context *v)
 
     /* This is intentionally mb_height and not end_mb_y - unlike in advanced
      * profile, these only differ are when decoding MSS2 rectangles. */
-    ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, ER_MB_END);
+    ff_er_add_slice(&s->er, 0, 0, s->mb_width - 1, s->mb_height - 1, ER_MB_END);
 }
 
 /** Decode blocks of I-frame for advanced profile
@@ -4562,7 +4562,7 @@ static void vc1_decode_i_blocks_adv(VC1Context *v)
 
             if (get_bits_count(&s->gb) > v->bits) {
                 // TODO: may need modification to handle slice coding
-                ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
+                ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
                 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
                        get_bits_count(&s->gb), v->bits);
                 return;
@@ -4586,7 +4586,7 @@ static void vc1_decode_i_blocks_adv(VC1Context *v)
     }
     if (v->s.loop_filter)
         ff_draw_horiz_band(s, (s->end_mb_y-1)*16, 16);
-    ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
+    ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
                     (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
 }
 
@@ -4638,7 +4638,7 @@ static void vc1_decode_p_blocks(VC1Context *v)
                 vc1_apply_p_loop_filter(v);
             if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
                 // TODO: may need modification to handle slice coding
-                ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
+                ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
                 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
                        get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
                 return;
@@ -4661,7 +4661,7 @@ static void vc1_decode_p_blocks(VC1Context *v)
     }
     if (s->end_mb_y >= s->start_mb_y)
         ff_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
-    ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
+    ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
                     (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
 }
 
@@ -4707,7 +4707,7 @@ static void vc1_decode_b_blocks(VC1Context *v)
                 vc1_decode_b_mb(v);
             if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
                 // TODO: may need modification to handle slice coding
-                ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
+                ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
                 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
                        get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
                 return;
@@ -4722,7 +4722,7 @@ static void vc1_decode_b_blocks(VC1Context *v)
     }
     if (v->s.loop_filter)
         ff_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
-    ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
+    ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
                     (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
 }
 
@@ -4730,7 +4730,7 @@ static void vc1_decode_skip_blocks(VC1Context *v)
 {
     MpegEncContext *s = &v->s;
 
-    ff_er_add_slice(s, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, ER_MB_END);
+    ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, ER_MB_END);
     s->first_slice_line = 1;
     for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
         s->mb_x = 0;
@@ -5558,7 +5558,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
         if (avctx->hwaccel->end_frame(avctx) < 0)
             goto err;
     } else {
-        ff_er_frame_start(s);
+        ff_mpeg_er_frame_start(s);
 
         v->bits = buf_size * 8;
         v->end_mb_x = s->mb_width;
@@ -5635,7 +5635,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
                 get_bits_count(&s->gb), s->gb.size_in_bits);
 //  if (get_bits_count(&s->gb) > buf_size * 8)
 //      return -1;
-        ff_er_frame_end(s);
+        ff_er_frame_end(&s->er);
     }
 
     ff_MPV_frame_end(s);



More information about the ffmpeg-cvslog mailing list