FFmpeg
h263.c
Go to the documentation of this file.
1 /*
2  * H.263/MPEG-4 backend for encoder and decoder
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * H.263+ support.
5  * Copyright (c) 2001 Juan J. Sierralta P
6  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * H.263/MPEG-4 codec.
28  */
29 
30 #include "libavutil/thread.h"
31 #include "mpegvideo.h"
32 #include "h263.h"
33 #include "h263data.h"
34 #include "h263dsp.h"
35 #include "idctdsp.h"
36 #include "mathops.h"
37 #include "mpegpicture.h"
38 #include "mpegutils.h"
39 #include "rl.h"
40 
41 static av_cold void h263_init_rl_inter(void)
42 {
43  static uint8_t h263_rl_inter_table[2][2 * MAX_RUN + MAX_LEVEL + 3];
44  ff_rl_init(&ff_h263_rl_inter, h263_rl_inter_table);
45 }
46 
48 {
49  static AVOnce init_static_once = AV_ONCE_INIT;
50  ff_thread_once(&init_static_once, h263_init_rl_inter);
51 }
52 
54  const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
55  //FIXME a lot of that is only needed for !low_delay
56  const int wrap = s->b8_stride;
57  const int xy = s->block_index[0];
58 
59  s->current_picture.mbskip_table[mb_xy] = s->mb_skipped;
60 
61  if(s->mv_type != MV_TYPE_8X8){
62  int motion_x, motion_y;
63  if (s->mb_intra) {
64  motion_x = 0;
65  motion_y = 0;
66  } else if (s->mv_type == MV_TYPE_16X16) {
67  motion_x = s->mv[0][0][0];
68  motion_y = s->mv[0][0][1];
69  } else /*if (s->mv_type == MV_TYPE_FIELD)*/ {
70  int i;
71  motion_x = s->mv[0][0][0] + s->mv[0][1][0];
72  motion_y = s->mv[0][0][1] + s->mv[0][1][1];
73  motion_x = (motion_x>>1) | (motion_x&1);
74  for(i=0; i<2; i++){
75  s->p_field_mv_table[i][0][mb_xy][0]= s->mv[0][i][0];
76  s->p_field_mv_table[i][0][mb_xy][1]= s->mv[0][i][1];
77  }
78  s->current_picture.ref_index[0][4*mb_xy ] =
79  s->current_picture.ref_index[0][4*mb_xy + 1] = s->field_select[0][0];
80  s->current_picture.ref_index[0][4*mb_xy + 2] =
81  s->current_picture.ref_index[0][4*mb_xy + 3] = s->field_select[0][1];
82  }
83 
84  /* no update if 8X8 because it has been done during parsing */
85  s->current_picture.motion_val[0][xy][0] = motion_x;
86  s->current_picture.motion_val[0][xy][1] = motion_y;
87  s->current_picture.motion_val[0][xy + 1][0] = motion_x;
88  s->current_picture.motion_val[0][xy + 1][1] = motion_y;
89  s->current_picture.motion_val[0][xy + wrap][0] = motion_x;
90  s->current_picture.motion_val[0][xy + wrap][1] = motion_y;
91  s->current_picture.motion_val[0][xy + 1 + wrap][0] = motion_x;
92  s->current_picture.motion_val[0][xy + 1 + wrap][1] = motion_y;
93  }
94 
95  if(s->encoding){ //FIXME encoding MUST be cleaned up
96  if (s->mv_type == MV_TYPE_8X8)
97  s->current_picture.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_8x8;
98  else if(s->mb_intra)
99  s->current_picture.mb_type[mb_xy] = MB_TYPE_INTRA;
100  else
101  s->current_picture.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_16x16;
102  }
103 }
104 
105 int ff_h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr)
106 {
107  int x, y, wrap, a, c, pred_dc;
108  int16_t *dc_val;
109 
110  /* find prediction */
111  if (n < 4) {
112  x = 2 * s->mb_x + (n & 1);
113  y = 2 * s->mb_y + ((n & 2) >> 1);
114  wrap = s->b8_stride;
115  dc_val = s->dc_val[0];
116  } else {
117  x = s->mb_x;
118  y = s->mb_y;
119  wrap = s->mb_stride;
120  dc_val = s->dc_val[n - 4 + 1];
121  }
122  /* B C
123  * A X
124  */
125  a = dc_val[(x - 1) + (y) * wrap];
126  c = dc_val[(x) + (y - 1) * wrap];
127 
128  /* No prediction outside GOB boundary */
129  if(s->first_slice_line && n!=3){
130  if(n!=2) c= 1024;
131  if(n!=1 && s->mb_x == s->resync_mb_x) a= 1024;
132  }
133  /* just DC prediction */
134  if (a != 1024 && c != 1024)
135  pred_dc = (a + c) >> 1;
136  else if (a != 1024)
137  pred_dc = a;
138  else
139  pred_dc = c;
140 
141  /* we assume pred is positive */
142  *dc_val_ptr = &dc_val[x + y * wrap];
143  return pred_dc;
144 }
145 
147  int qp_c;
148  const int linesize = s->linesize;
149  const int uvlinesize= s->uvlinesize;
150  const int xy = s->mb_y * s->mb_stride + s->mb_x;
151  uint8_t *dest_y = s->dest[0];
152  uint8_t *dest_cb= s->dest[1];
153  uint8_t *dest_cr= s->dest[2];
154 
155  /*
156  Diag Top
157  Left Center
158  */
159  if (!IS_SKIP(s->current_picture.mb_type[xy])) {
160  qp_c= s->qscale;
161  s->h263dsp.h263_v_loop_filter(dest_y + 8 * linesize, linesize, qp_c);
162  s->h263dsp.h263_v_loop_filter(dest_y + 8 * linesize + 8, linesize, qp_c);
163  }else
164  qp_c= 0;
165 
166  if(s->mb_y){
167  int qp_dt, qp_tt, qp_tc;
168 
169  if (IS_SKIP(s->current_picture.mb_type[xy - s->mb_stride]))
170  qp_tt=0;
171  else
172  qp_tt = s->current_picture.qscale_table[xy - s->mb_stride];
173 
174  if(qp_c)
175  qp_tc= qp_c;
176  else
177  qp_tc= qp_tt;
178 
179  if(qp_tc){
180  const int chroma_qp= s->chroma_qscale_table[qp_tc];
181  s->h263dsp.h263_v_loop_filter(dest_y, linesize, qp_tc);
182  s->h263dsp.h263_v_loop_filter(dest_y + 8, linesize, qp_tc);
183 
184  s->h263dsp.h263_v_loop_filter(dest_cb, uvlinesize, chroma_qp);
185  s->h263dsp.h263_v_loop_filter(dest_cr, uvlinesize, chroma_qp);
186  }
187 
188  if(qp_tt)
189  s->h263dsp.h263_h_loop_filter(dest_y - 8 * linesize + 8, linesize, qp_tt);
190 
191  if(s->mb_x){
192  if (qp_tt || IS_SKIP(s->current_picture.mb_type[xy - 1 - s->mb_stride]))
193  qp_dt= qp_tt;
194  else
195  qp_dt = s->current_picture.qscale_table[xy - 1 - s->mb_stride];
196 
197  if(qp_dt){
198  const int chroma_qp= s->chroma_qscale_table[qp_dt];
199  s->h263dsp.h263_h_loop_filter(dest_y - 8 * linesize, linesize, qp_dt);
200  s->h263dsp.h263_h_loop_filter(dest_cb - 8 * uvlinesize, uvlinesize, chroma_qp);
201  s->h263dsp.h263_h_loop_filter(dest_cr - 8 * uvlinesize, uvlinesize, chroma_qp);
202  }
203  }
204  }
205 
206  if(qp_c){
207  s->h263dsp.h263_h_loop_filter(dest_y + 8, linesize, qp_c);
208  if(s->mb_y + 1 == s->mb_height)
209  s->h263dsp.h263_h_loop_filter(dest_y + 8 * linesize + 8, linesize, qp_c);
210  }
211 
212  if(s->mb_x){
213  int qp_lc;
214  if (qp_c || IS_SKIP(s->current_picture.mb_type[xy - 1]))
215  qp_lc= qp_c;
216  else
217  qp_lc = s->current_picture.qscale_table[xy - 1];
218 
219  if(qp_lc){
220  s->h263dsp.h263_h_loop_filter(dest_y, linesize, qp_lc);
221  if(s->mb_y + 1 == s->mb_height){
222  const int chroma_qp= s->chroma_qscale_table[qp_lc];
223  s->h263dsp.h263_h_loop_filter(dest_y + 8 * linesize, linesize, qp_lc);
224  s->h263dsp.h263_h_loop_filter(dest_cb, uvlinesize, chroma_qp);
225  s->h263dsp.h263_h_loop_filter(dest_cr, uvlinesize, chroma_qp);
226  }
227  }
228  }
229 }
230 
231 void ff_h263_pred_acdc(MpegEncContext * s, int16_t *block, int n)
232 {
233  int x, y, wrap, a, c, pred_dc, scale, i;
234  int16_t *dc_val, *ac_val, *ac_val1;
235 
236  /* find prediction */
237  if (n < 4) {
238  x = 2 * s->mb_x + (n & 1);
239  y = 2 * s->mb_y + (n>> 1);
240  wrap = s->b8_stride;
241  dc_val = s->dc_val[0];
242  ac_val = s->ac_val[0][0];
243  scale = s->y_dc_scale;
244  } else {
245  x = s->mb_x;
246  y = s->mb_y;
247  wrap = s->mb_stride;
248  dc_val = s->dc_val[n - 4 + 1];
249  ac_val = s->ac_val[n - 4 + 1][0];
250  scale = s->c_dc_scale;
251  }
252 
253  ac_val += ((y) * wrap + (x)) * 16;
254  ac_val1 = ac_val;
255 
256  /* B C
257  * A X
258  */
259  a = dc_val[(x - 1) + (y) * wrap];
260  c = dc_val[(x) + (y - 1) * wrap];
261 
262  /* No prediction outside GOB boundary */
263  if(s->first_slice_line && n!=3){
264  if(n!=2) c= 1024;
265  if(n!=1 && s->mb_x == s->resync_mb_x) a= 1024;
266  }
267 
268  if (s->ac_pred) {
269  pred_dc = 1024;
270  if (s->h263_aic_dir) {
271  /* left prediction */
272  if (a != 1024) {
273  ac_val -= 16;
274  for(i=1;i<8;i++) {
275  block[s->idsp.idct_permutation[i << 3]] += ac_val[i];
276  }
277  pred_dc = a;
278  }
279  } else {
280  /* top prediction */
281  if (c != 1024) {
282  ac_val -= 16 * wrap;
283  for(i=1;i<8;i++) {
284  block[s->idsp.idct_permutation[i]] += ac_val[i + 8];
285  }
286  pred_dc = c;
287  }
288  }
289  } else {
290  /* just DC prediction */
291  if (a != 1024 && c != 1024)
292  pred_dc = (a + c) >> 1;
293  else if (a != 1024)
294  pred_dc = a;
295  else
296  pred_dc = c;
297  }
298 
299  /* we assume pred is positive */
300  block[0]=block[0]*scale + pred_dc;
301 
302  if (block[0] < 0)
303  block[0] = 0;
304  else
305  block[0] |= 1;
306 
307  /* Update AC/DC tables */
308  dc_val[(x) + (y) * wrap] = block[0];
309 
310  /* left copy */
311  for(i=1;i<8;i++)
312  ac_val1[i] = block[s->idsp.idct_permutation[i << 3]];
313  /* top copy */
314  for(i=1;i<8;i++)
315  ac_val1[8 + i] = block[s->idsp.idct_permutation[i]];
316 }
317 
318 int16_t *ff_h263_pred_motion(MpegEncContext * s, int block, int dir,
319  int *px, int *py)
320 {
321  int wrap;
322  int16_t *A, *B, *C, (*mot_val)[2];
323  static const int off[4]= {2, 1, 1, -1};
324 
325  wrap = s->b8_stride;
326  mot_val = s->current_picture.motion_val[dir] + s->block_index[block];
327 
328  A = mot_val[ - 1];
329  /* special case for first (slice) line */
330  if (s->first_slice_line && block<3) {
331  // we can't just change some MVs to simulate that as we need them for the B-frames (and ME)
332  // and if we ever support non rectangular objects than we need to do a few ifs here anyway :(
333  if(block==0){ //most common case
334  if(s->mb_x == s->resync_mb_x){ //rare
335  *px= *py = 0;
336  }else if(s->mb_x + 1 == s->resync_mb_x && s->h263_pred){ //rare
337  C = mot_val[off[block] - wrap];
338  if(s->mb_x==0){
339  *px = C[0];
340  *py = C[1];
341  }else{
342  *px = mid_pred(A[0], 0, C[0]);
343  *py = mid_pred(A[1], 0, C[1]);
344  }
345  }else{
346  *px = A[0];
347  *py = A[1];
348  }
349  }else if(block==1){
350  if(s->mb_x + 1 == s->resync_mb_x && s->h263_pred){ //rare
351  C = mot_val[off[block] - wrap];
352  *px = mid_pred(A[0], 0, C[0]);
353  *py = mid_pred(A[1], 0, C[1]);
354  }else{
355  *px = A[0];
356  *py = A[1];
357  }
358  }else{ /* block==2*/
359  B = mot_val[ - wrap];
360  C = mot_val[off[block] - wrap];
361  if(s->mb_x == s->resync_mb_x) //rare
362  A[0]=A[1]=0;
363 
364  *px = mid_pred(A[0], B[0], C[0]);
365  *py = mid_pred(A[1], B[1], C[1]);
366  }
367  } else {
368  B = mot_val[ - wrap];
369  C = mot_val[off[block] - wrap];
370  *px = mid_pred(A[0], B[0], C[0]);
371  *py = mid_pred(A[1], B[1], C[1]);
372  }
373  return *mot_val;
374 }
MB_TYPE_L0
#define MB_TYPE_L0
Definition: mpegutils.h:66
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:255
h263data.h
ff_h263_pred_acdc
void ff_h263_pred_acdc(MpegEncContext *s, int16_t *block, int n)
Definition: h263.c:231
thread.h
MAX_RUN
#define MAX_RUN
Definition: rl.h:35
h263dsp.h
MB_TYPE_16x16
#define MB_TYPE_16x16
Definition: mpegutils.h:53
mpegvideo.h
mpegutils.h
A
#define A(x)
Definition: vp56_arith.h:28
ff_h263_pred_motion
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:318
wrap
#define wrap(func)
Definition: neontest.h:65
h263_init_rl_inter
static av_cold void h263_init_rl_inter(void)
Definition: h263.c:41
scale
static av_always_inline float scale(float x, float s)
Definition: vf_v360.c:1388
ff_h263_update_motion_val
void ff_h263_update_motion_val(MpegEncContext *s)
Definition: h263.c:53
C
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
Definition: writing_filters.txt:58
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:175
av_cold
#define av_cold
Definition: attributes.h:90
s
#define s(width, name)
Definition: cbs_vp9.c:257
IS_SKIP
#define IS_SKIP(a)
Definition: mpegutils.h:80
ff_rl_init
av_cold void ff_rl_init(RLTable *rl, uint8_t static_store[2][2 *MAX_RUN+MAX_LEVEL+3])
Initialize index_run, max_level and max_run from n, last, table_vlc, table_run and table_level.
Definition: rl.c:27
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:173
MB_TYPE_8x8
#define MB_TYPE_8x8
Definition: mpegutils.h:56
mathops.h
ff_h263_pred_dc
int ff_h263_pred_dc(MpegEncContext *s, int n, int16_t **dc_val_ptr)
Definition: h263.c:105
AVOnce
#define AVOnce
Definition: thread.h:172
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:256
MAX_LEVEL
#define MAX_LEVEL
Definition: rl.h:36
ff_h263_rl_inter
RLTable ff_h263_rl_inter
Definition: h263data.c:159
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
mpegpicture.h
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
idctdsp.h
mid_pred
#define mid_pred
Definition: mathops.h:97
ff_h263_loop_filter
void ff_h263_loop_filter(MpegEncContext *s)
Definition: h263.c:146
B
#define B
Definition: huffyuvdsp.h:32
pred_dc
static void FUNC() pred_dc(uint8_t *_src, const uint8_t *_top, const uint8_t *_left, ptrdiff_t stride, int log2_size, int c_idx)
Definition: hevcpred_template.c:390
rl.h
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
ff_h263_init_rl_inter
av_cold void ff_h263_init_rl_inter(void)
Definition: h263.c:47
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:71
MB_TYPE_INTRA
#define MB_TYPE_INTRA
Definition: mpegutils.h:72
h263.h