FFmpeg
svq3.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 The FFmpeg Project
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /*
22  * How to use this decoder:
23  * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24  * have stsd atoms to describe media trak properties. A stsd atom for a
25  * video trak contains 1 or more ImageDescription atoms. These atoms begin
26  * with the 4-byte length of the atom followed by the codec fourcc. Some
27  * decoders need information in this atom to operate correctly. Such
28  * is the case with SVQ3. In order to get the best use out of this decoder,
29  * the calling app must make the SVQ3 ImageDescription atom available
30  * via the AVCodecContext's extradata[_size] field:
31  *
32  * AVCodecContext.extradata = pointer to ImageDescription, first characters
33  * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34  * AVCodecContext.extradata_size = size of ImageDescription atom memory
35  * buffer (which will be the same as the ImageDescription atom size field
36  * from the QT file, minus 4 bytes since the length is missing)
37  *
38  * You will know you have these parameters passed correctly when the decoder
39  * correctly decodes this file:
40  * http://samples.mplayerhq.hu/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
41  */
42 
43 #include <inttypes.h>
44 
45 #include "libavutil/attributes.h"
46 #include "libavutil/crc.h"
47 #include "libavutil/mem_internal.h"
48 
49 #include "codec_internal.h"
50 #include "decode.h"
51 #include "avcodec.h"
52 #include "mpegutils.h"
53 #include "h264data.h"
54 #include "h264dsp.h"
55 #include "h264pred.h"
56 #include "h264_parse.h"
57 #include "golomb.h"
58 #include "hpeldsp.h"
59 #include "mathops.h"
60 #include "rectangle.h"
61 #include "tpeldsp.h"
62 #include "videodsp.h"
63 
64 #if CONFIG_ZLIB
65 #include <zlib.h>
66 #endif
67 
68 /**
69  * @file
70  * svq3 decoder.
71  */
72 
73 typedef struct SVQ3Frame {
75 
76  int16_t (*motion_val_buf[2])[2];
77  int16_t (*motion_val[2])[2];
78 
79  uint32_t *mb_type_buf, *mb_type;
80 } SVQ3Frame;
81 
82 typedef struct SVQ3Context {
84 
90 
96  uint8_t *slice_buf;
97  unsigned slice_buf_size;
101  uint32_t watermark_key;
107  int qscale;
108  int cbp;
113 
117 
118  int mb_x, mb_y;
119  int mb_xy;
122  int b_stride;
123 
124  uint32_t *mb2br_xy;
125 
128 
131 
132  unsigned int top_samples_available;
134 
135  uint8_t *edge_emu_buffer;
136 
137  DECLARE_ALIGNED(16, int16_t, mv_cache)[2][5 * 8][2];
138  DECLARE_ALIGNED(8, int8_t, ref_cache)[2][5 * 8];
139  DECLARE_ALIGNED(16, int16_t, mb)[16 * 48 * 2];
140  DECLARE_ALIGNED(16, int16_t, mb_luma_dc)[3][16 * 2];
141  DECLARE_ALIGNED(8, uint8_t, non_zero_count_cache)[15 * 8];
142  uint32_t dequant4_coeff[QP_MAX_NUM + 1][16];
143  int block_offset[2 * (16 * 3)];
145 } SVQ3Context;
146 
147 #define FULLPEL_MODE 1
148 #define HALFPEL_MODE 2
149 #define THIRDPEL_MODE 3
150 #define PREDICT_MODE 4
151 
152 /* dual scan (from some older H.264 draft)
153  * o-->o-->o o
154  * | /|
155  * o o o / o
156  * | / | |/ |
157  * o o o o
158  * /
159  * o-->o-->o-->o
160  */
161 static const uint8_t svq3_scan[16] = {
162  0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
163  2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
164  0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
165  0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
166 };
167 
168 static const uint8_t luma_dc_zigzag_scan[16] = {
169  0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
170  3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
171  1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
172  3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
173 };
174 
175 static const uint8_t svq3_pred_0[25][2] = {
176  { 0, 0 },
177  { 1, 0 }, { 0, 1 },
178  { 0, 2 }, { 1, 1 }, { 2, 0 },
179  { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
180  { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
181  { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
182  { 2, 4 }, { 3, 3 }, { 4, 2 },
183  { 4, 3 }, { 3, 4 },
184  { 4, 4 }
185 };
186 
187 static const int8_t svq3_pred_1[6][6][5] = {
188  { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
189  { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
190  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
191  { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
192  { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
193  { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
194  { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
195  { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
196  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
197  { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
198  { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
199  { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
200 };
201 
202 static const struct {
203  uint8_t run;
204  uint8_t level;
205 } svq3_dct_tables[2][16] = {
206  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
207  { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
208  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
209  { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
210 };
211 
212 static const uint32_t svq3_dequant_coeff[32] = {
213  3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
214  9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
215  24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
216  61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
217 };
218 
219 static void svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
220 {
221  const unsigned qmul = svq3_dequant_coeff[qp];
222 #define stride 16
223  int i;
224  int temp[16];
225  static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
226 
227  for (i = 0; i < 4; i++) {
228  const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
229  const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
230  const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
231  const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
232 
233  temp[4 * i + 0] = z0 + z3;
234  temp[4 * i + 1] = z1 + z2;
235  temp[4 * i + 2] = z1 - z2;
236  temp[4 * i + 3] = z0 - z3;
237  }
238 
239  for (i = 0; i < 4; i++) {
240  const int offset = x_offset[i];
241  const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
242  const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
243  const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
244  const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
245 
246  output[stride * 0 + offset] = (int)((z0 + z3) * qmul + 0x80000) >> 20;
247  output[stride * 2 + offset] = (int)((z1 + z2) * qmul + 0x80000) >> 20;
248  output[stride * 8 + offset] = (int)((z1 - z2) * qmul + 0x80000) >> 20;
249  output[stride * 10 + offset] = (int)((z0 - z3) * qmul + 0x80000) >> 20;
250  }
251 }
252 #undef stride
253 
254 static void svq3_add_idct_c(uint8_t *dst, int16_t *block,
255  int stride, int qp, int dc)
256 {
257  const int qmul = svq3_dequant_coeff[qp];
258  int i;
259 
260  if (dc) {
261  dc = 13 * 13 * (dc == 1 ? 1538U* block[0]
262  : qmul * (block[0] >> 3) / 2);
263  block[0] = 0;
264  }
265 
266  for (i = 0; i < 4; i++) {
267  const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
268  const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
269  const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
270  const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
271 
272  block[0 + 4 * i] = z0 + z3;
273  block[1 + 4 * i] = z1 + z2;
274  block[2 + 4 * i] = z1 - z2;
275  block[3 + 4 * i] = z0 - z3;
276  }
277 
278  for (i = 0; i < 4; i++) {
279  const unsigned z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
280  const unsigned z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
281  const unsigned z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
282  const unsigned z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
283  const int rr = (dc + 0x80000u);
284 
285  dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((int)((z0 + z3) * qmul + rr) >> 20));
286  dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((int)((z1 + z2) * qmul + rr) >> 20));
287  dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((int)((z1 - z2) * qmul + rr) >> 20));
288  dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((int)((z0 - z3) * qmul + rr) >> 20));
289  }
290 
291  memset(block, 0, 16 * sizeof(int16_t));
292 }
293 
294 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
295  int index, const int type)
296 {
297  static const uint8_t *const scan_patterns[4] = {
299  };
300 
301  int run, level, sign, limit;
302  unsigned vlc;
303  const int intra = 3 * type >> 2;
304  const uint8_t *const scan = scan_patterns[type];
305 
306  for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
307  for (; (vlc = get_interleaved_ue_golomb(gb)) != 0; index++) {
308  if ((int32_t)vlc < 0)
309  return -1;
310 
311  sign = (vlc & 1) ? 0 : -1;
312  vlc = vlc + 1 >> 1;
313 
314  if (type == 3) {
315  if (vlc < 3) {
316  run = 0;
317  level = vlc;
318  } else if (vlc < 4) {
319  run = 1;
320  level = 1;
321  } else {
322  run = vlc & 0x3;
323  level = (vlc + 9 >> 2) - run;
324  }
325  } else {
326  if (vlc < 16U) {
327  run = svq3_dct_tables[intra][vlc].run;
328  level = svq3_dct_tables[intra][vlc].level;
329  } else if (intra) {
330  run = vlc & 0x7;
331  level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
332  } else {
333  run = vlc & 0xF;
334  level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
335  }
336  }
337 
338 
339  if ((index += run) >= limit)
340  return -1;
341 
342  block[scan[index]] = (level ^ sign) - sign;
343  }
344 
345  if (type != 2) {
346  break;
347  }
348  }
349 
350  return 0;
351 }
352 
353 static av_always_inline int
354 svq3_fetch_diagonal_mv(const SVQ3Context *s, const int16_t **C,
355  int i, int list, int part_width)
356 {
357  const int topright_ref = s->ref_cache[list][i - 8 + part_width];
358 
359  if (topright_ref != PART_NOT_AVAILABLE) {
360  *C = s->mv_cache[list][i - 8 + part_width];
361  return topright_ref;
362  } else {
363  *C = s->mv_cache[list][i - 8 - 1];
364  return s->ref_cache[list][i - 8 - 1];
365  }
366 }
367 
368 /**
369  * Get the predicted MV.
370  * @param n the block index
371  * @param part_width the width of the partition (4, 8,16) -> (1, 2, 4)
372  * @param mx the x component of the predicted motion vector
373  * @param my the y component of the predicted motion vector
374  */
375 static av_always_inline void svq3_pred_motion(const SVQ3Context *s, int n,
376  int part_width, int list,
377  int ref, int *const mx, int *const my)
378 {
379  const int index8 = scan8[n];
380  const int top_ref = s->ref_cache[list][index8 - 8];
381  const int left_ref = s->ref_cache[list][index8 - 1];
382  const int16_t *const A = s->mv_cache[list][index8 - 1];
383  const int16_t *const B = s->mv_cache[list][index8 - 8];
384  const int16_t *C;
385  int diagonal_ref, match_count;
386 
387 /* mv_cache
388  * B . . A T T T T
389  * U . . L . . , .
390  * U . . L . . . .
391  * U . . L . . , .
392  * . . . L . . . .
393  */
394 
395  diagonal_ref = svq3_fetch_diagonal_mv(s, &C, index8, list, part_width);
396  match_count = (diagonal_ref == ref) + (top_ref == ref) + (left_ref == ref);
397  if (match_count > 1) { //most common
398  *mx = mid_pred(A[0], B[0], C[0]);
399  *my = mid_pred(A[1], B[1], C[1]);
400  } else if (match_count == 1) {
401  if (left_ref == ref) {
402  *mx = A[0];
403  *my = A[1];
404  } else if (top_ref == ref) {
405  *mx = B[0];
406  *my = B[1];
407  } else {
408  *mx = C[0];
409  *my = C[1];
410  }
411  } else {
412  if (top_ref == PART_NOT_AVAILABLE &&
413  diagonal_ref == PART_NOT_AVAILABLE &&
414  left_ref != PART_NOT_AVAILABLE) {
415  *mx = A[0];
416  *my = A[1];
417  } else {
418  *mx = mid_pred(A[0], B[0], C[0]);
419  *my = mid_pred(A[1], B[1], C[1]);
420  }
421  }
422 }
423 
424 static inline void svq3_mc_dir_part(SVQ3Context *s,
425  int x, int y, int width, int height,
426  int mx, int my, int dxy,
427  int thirdpel, int dir, int avg)
428 {
429  const SVQ3Frame *pic = (dir == 0) ? s->last_pic : s->next_pic;
430  uint8_t *src, *dest;
431  int i, emu = 0;
432  int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
433  int linesize = s->cur_pic->f->linesize[0];
434  int uvlinesize = s->cur_pic->f->linesize[1];
435 
436  mx += x;
437  my += y;
438 
439  if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
440  my < 0 || my >= s->v_edge_pos - height - 1) {
441  emu = 1;
442  mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
443  my = av_clip(my, -16, s->v_edge_pos - height + 15);
444  }
445 
446  /* form component predictions */
447  dest = s->cur_pic->f->data[0] + x + y * linesize;
448  src = pic->f->data[0] + mx + my * linesize;
449 
450  if (emu) {
451  s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
452  linesize, linesize,
453  width + 1, height + 1,
454  mx, my, s->h_edge_pos, s->v_edge_pos);
455  src = s->edge_emu_buffer;
456  }
457  if (thirdpel)
458  (avg ? s->tdsp.avg_tpel_pixels_tab
459  : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src, linesize,
460  width, height);
461  else
462  (avg ? s->hdsp.avg_pixels_tab
463  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, linesize,
464  height);
465 
466  if (!(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
467  mx = mx + (mx < (int) x) >> 1;
468  my = my + (my < (int) y) >> 1;
469  width = width >> 1;
470  height = height >> 1;
471  blocksize++;
472 
473  for (i = 1; i < 3; i++) {
474  dest = s->cur_pic->f->data[i] + (x >> 1) + (y >> 1) * uvlinesize;
475  src = pic->f->data[i] + mx + my * uvlinesize;
476 
477  if (emu) {
478  s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
479  uvlinesize, uvlinesize,
480  width + 1, height + 1,
481  mx, my, (s->h_edge_pos >> 1),
482  s->v_edge_pos >> 1);
483  src = s->edge_emu_buffer;
484  }
485  if (thirdpel)
486  (avg ? s->tdsp.avg_tpel_pixels_tab
487  : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src,
488  uvlinesize,
489  width, height);
490  else
491  (avg ? s->hdsp.avg_pixels_tab
492  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
493  uvlinesize,
494  height);
495  }
496  }
497 }
498 
499 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
500  int dir, int avg)
501 {
502  int i, j, k, mx, my, dx, dy, x, y;
503  const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
504  const int part_height = 16 >> ((unsigned)(size + 1) / 3);
505  const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
506  const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
507  const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
508 
509  for (i = 0; i < 16; i += part_height)
510  for (j = 0; j < 16; j += part_width) {
511  const int b_xy = (4 * s->mb_x + (j >> 2)) +
512  (4 * s->mb_y + (i >> 2)) * s->b_stride;
513  int dxy;
514  x = 16 * s->mb_x + j;
515  y = 16 * s->mb_y + i;
516  k = (j >> 2 & 1) + (i >> 1 & 2) +
517  (j >> 1 & 4) + (i & 8);
518 
519  if (mode != PREDICT_MODE) {
520  svq3_pred_motion(s, k, part_width >> 2, dir, 1, &mx, &my);
521  } else {
522  mx = s->next_pic->motion_val[0][b_xy][0] * 2;
523  my = s->next_pic->motion_val[0][b_xy][1] * 2;
524 
525  if (dir == 0) {
526  mx = mx * s->frame_num_offset /
527  s->prev_frame_num_offset + 1 >> 1;
528  my = my * s->frame_num_offset /
529  s->prev_frame_num_offset + 1 >> 1;
530  } else {
531  mx = mx * (s->frame_num_offset - s->prev_frame_num_offset) /
532  s->prev_frame_num_offset + 1 >> 1;
533  my = my * (s->frame_num_offset - s->prev_frame_num_offset) /
534  s->prev_frame_num_offset + 1 >> 1;
535  }
536  }
537 
538  /* clip motion vector prediction to frame border */
539  mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
540  my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
541 
542  /* get (optional) motion vector differential */
543  if (mode == PREDICT_MODE) {
544  dx = dy = 0;
545  } else {
546  dy = get_interleaved_se_golomb(&s->gb_slice);
547  dx = get_interleaved_se_golomb(&s->gb_slice);
548 
549  if (dx != (int16_t)dx || dy != (int16_t)dy) {
550  av_log(s->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
551  return -1;
552  }
553  }
554 
555  /* compute motion vector */
556  if (mode == THIRDPEL_MODE) {
557  int fx, fy;
558  mx = (mx + 1 >> 1) + dx;
559  my = (my + 1 >> 1) + dy;
560  fx = (unsigned)(mx + 0x30000) / 3 - 0x10000;
561  fy = (unsigned)(my + 0x30000) / 3 - 0x10000;
562  dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
563 
564  svq3_mc_dir_part(s, x, y, part_width, part_height,
565  fx, fy, dxy, 1, dir, avg);
566  mx += mx;
567  my += my;
568  } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
569  mx = (unsigned)(mx + 1 + 0x30000) / 3 + dx - 0x10000;
570  my = (unsigned)(my + 1 + 0x30000) / 3 + dy - 0x10000;
571  dxy = (mx & 1) + 2 * (my & 1);
572 
573  svq3_mc_dir_part(s, x, y, part_width, part_height,
574  mx >> 1, my >> 1, dxy, 0, dir, avg);
575  mx *= 3;
576  my *= 3;
577  } else {
578  mx = (unsigned)(mx + 3 + 0x60000) / 6 + dx - 0x10000;
579  my = (unsigned)(my + 3 + 0x60000) / 6 + dy - 0x10000;
580 
581  svq3_mc_dir_part(s, x, y, part_width, part_height,
582  mx, my, 0, 0, dir, avg);
583  mx *= 6;
584  my *= 6;
585  }
586 
587  /* update mv_cache */
588  if (mode != PREDICT_MODE) {
589  int32_t mv = pack16to32(mx, my);
590 
591  if (part_height == 8 && i < 8) {
592  AV_WN32A(s->mv_cache[dir][scan8[k] + 1 * 8], mv);
593 
594  if (part_width == 8 && j < 8)
595  AV_WN32A(s->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
596  }
597  if (part_width == 8 && j < 8)
598  AV_WN32A(s->mv_cache[dir][scan8[k] + 1], mv);
599  if (part_width == 4 || part_height == 4)
600  AV_WN32A(s->mv_cache[dir][scan8[k]], mv);
601  }
602 
603  /* write back motion vectors */
604  fill_rectangle(s->cur_pic->motion_val[dir][b_xy],
605  part_width >> 2, part_height >> 2, s->b_stride,
606  pack16to32(mx, my), 4);
607  }
608 
609  return 0;
610 }
611 
613  int mb_type, const int *block_offset,
614  int linesize, uint8_t *dest_y)
615 {
616  int i;
617  if (!IS_INTRA4x4(mb_type)) {
618  for (i = 0; i < 16; i++)
619  if (s->non_zero_count_cache[scan8[i]] || s->mb[i * 16]) {
620  uint8_t *const ptr = dest_y + block_offset[i];
621  svq3_add_idct_c(ptr, s->mb + i * 16, linesize,
622  s->qscale, IS_INTRA(mb_type) ? 1 : 0);
623  }
624  }
625 }
626 
628  int mb_type,
629  const int *block_offset,
630  int linesize,
631  uint8_t *dest_y)
632 {
633  int i;
634  int qscale = s->qscale;
635 
636  if (IS_INTRA4x4(mb_type)) {
637  for (i = 0; i < 16; i++) {
638  uint8_t *const ptr = dest_y + block_offset[i];
639  const int dir = s->intra4x4_pred_mode_cache[scan8[i]];
640 
641  uint8_t *topright;
642  int nnz;
643  if (dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED) {
644  av_assert2(s->mb_y || linesize <= block_offset[i]);
645  topright = ptr + 4 - linesize;
646  } else
647  topright = NULL;
648 
649  s->hpc.pred4x4[dir](ptr, topright, linesize);
650  nnz = s->non_zero_count_cache[scan8[i]];
651  if (nnz) {
652  svq3_add_idct_c(ptr, s->mb + i * 16, linesize, qscale, 0);
653  }
654  }
655  } else {
656  s->hpc.pred16x16[s->intra16x16_pred_mode](dest_y, linesize);
657  svq3_luma_dc_dequant_idct_c(s->mb, s->mb_luma_dc[0], qscale);
658  }
659 }
660 
662 {
663  const int mb_x = s->mb_x;
664  const int mb_y = s->mb_y;
665  const int mb_xy = s->mb_xy;
666  const int mb_type = s->cur_pic->mb_type[mb_xy];
667  uint8_t *dest_y, *dest_cb, *dest_cr;
668  int linesize, uvlinesize;
669  int i, j;
670  const int *block_offset = &s->block_offset[0];
671  const int block_h = 16 >> 1;
672 
673  linesize = s->cur_pic->f->linesize[0];
674  uvlinesize = s->cur_pic->f->linesize[1];
675 
676  dest_y = s->cur_pic->f->data[0] + (mb_x + mb_y * linesize) * 16;
677  dest_cb = s->cur_pic->f->data[1] + mb_x * 8 + mb_y * uvlinesize * block_h;
678  dest_cr = s->cur_pic->f->data[2] + mb_x * 8 + mb_y * uvlinesize * block_h;
679 
680  s->vdsp.prefetch(dest_y + (s->mb_x & 3) * 4 * linesize + 64, linesize, 4);
681  s->vdsp.prefetch(dest_cb + (s->mb_x & 7) * uvlinesize + 64, dest_cr - dest_cb, 2);
682 
683  if (IS_INTRA(mb_type)) {
684  s->hpc.pred8x8[s->chroma_pred_mode](dest_cb, uvlinesize);
685  s->hpc.pred8x8[s->chroma_pred_mode](dest_cr, uvlinesize);
686 
687  hl_decode_mb_predict_luma(s, mb_type, block_offset, linesize, dest_y);
688  }
689 
690  hl_decode_mb_idct_luma(s, mb_type, block_offset, linesize, dest_y);
691 
692  if (s->cbp & 0x30) {
693  uint8_t *dest[2] = { dest_cb, dest_cr };
694  s->h264dsp.h264_chroma_dc_dequant_idct(s->mb + 16 * 16 * 1,
695  s->dequant4_coeff[4][0]);
696  s->h264dsp.h264_chroma_dc_dequant_idct(s->mb + 16 * 16 * 2,
697  s->dequant4_coeff[4][0]);
698  for (j = 1; j < 3; j++) {
699  for (i = j * 16; i < j * 16 + 4; i++)
700  if (s->non_zero_count_cache[scan8[i]] || s->mb[i * 16]) {
701  uint8_t *const ptr = dest[j - 1] + block_offset[i];
702  svq3_add_idct_c(ptr, s->mb + i * 16,
703  uvlinesize, ff_h264_chroma_qp[0][s->qscale + 12] - 12, 2);
704  }
705  }
706  }
707 }
708 
709 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
710 {
711  int i, j, k, m, dir, mode;
712  int cbp = 0;
713  uint32_t vlc;
714  int8_t *top, *left;
715  const int mb_xy = s->mb_xy;
716  const int b_xy = 4 * s->mb_x + 4 * s->mb_y * s->b_stride;
717 
718  s->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
719  s->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
720 
721  if (mb_type == 0) { /* SKIP */
722  if (s->pict_type == AV_PICTURE_TYPE_P ||
723  s->next_pic->mb_type[mb_xy] == -1) {
724  svq3_mc_dir_part(s, 16 * s->mb_x, 16 * s->mb_y, 16, 16,
725  0, 0, 0, 0, 0, 0);
726 
727  if (s->pict_type == AV_PICTURE_TYPE_B)
728  svq3_mc_dir_part(s, 16 * s->mb_x, 16 * s->mb_y, 16, 16,
729  0, 0, 0, 0, 1, 1);
730 
731  mb_type = MB_TYPE_SKIP;
732  } else {
733  mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
734  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
735  return -1;
736  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
737  return -1;
738 
739  mb_type = MB_TYPE_16x16;
740  }
741  } else if (mb_type < 8) { /* INTER */
742  if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&s->gb_slice))
744  else if (s->halfpel_flag &&
745  s->thirdpel_flag == !get_bits1(&s->gb_slice))
746  mode = HALFPEL_MODE;
747  else
748  mode = FULLPEL_MODE;
749 
750  /* fill caches */
751  /* note ref_cache should contain here:
752  * ????????
753  * ???11111
754  * N??11111
755  * N??11111
756  * N??11111
757  */
758 
759  for (m = 0; m < 2; m++) {
760  if (s->mb_x > 0 && s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - 1] + 6] != -1) {
761  for (i = 0; i < 4; i++)
762  AV_COPY32(s->mv_cache[m][scan8[0] - 1 + i * 8],
763  s->cur_pic->motion_val[m][b_xy - 1 + i * s->b_stride]);
764  } else {
765  for (i = 0; i < 4; i++)
766  AV_ZERO32(s->mv_cache[m][scan8[0] - 1 + i * 8]);
767  }
768  if (s->mb_y > 0) {
769  memcpy(s->mv_cache[m][scan8[0] - 1 * 8],
770  s->cur_pic->motion_val[m][b_xy - s->b_stride],
771  4 * 2 * sizeof(int16_t));
772  memset(&s->ref_cache[m][scan8[0] - 1 * 8],
773  (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
774 
775  if (s->mb_x < s->mb_width - 1) {
776  AV_COPY32(s->mv_cache[m][scan8[0] + 4 - 1 * 8],
777  s->cur_pic->motion_val[m][b_xy - s->b_stride + 4]);
778  s->ref_cache[m][scan8[0] + 4 - 1 * 8] =
779  (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride + 1] + 6] == -1 ||
780  s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
781  } else
782  s->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
783  if (s->mb_x > 0) {
784  AV_COPY32(s->mv_cache[m][scan8[0] - 1 - 1 * 8],
785  s->cur_pic->motion_val[m][b_xy - s->b_stride - 1]);
786  s->ref_cache[m][scan8[0] - 1 - 1 * 8] =
787  (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
788  } else
789  s->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
790  } else
791  memset(&s->ref_cache[m][scan8[0] - 1 * 8 - 1],
792  PART_NOT_AVAILABLE, 8);
793 
794  if (s->pict_type != AV_PICTURE_TYPE_B)
795  break;
796  }
797 
798  /* decode motion vector(s) and form prediction(s) */
799  if (s->pict_type == AV_PICTURE_TYPE_P) {
800  if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
801  return -1;
802  } else { /* AV_PICTURE_TYPE_B */
803  if (mb_type != 2) {
804  if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
805  return -1;
806  } else {
807  for (i = 0; i < 4; i++)
808  memset(s->cur_pic->motion_val[0][b_xy + i * s->b_stride],
809  0, 4 * 2 * sizeof(int16_t));
810  }
811  if (mb_type != 1) {
812  if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
813  return -1;
814  } else {
815  for (i = 0; i < 4; i++)
816  memset(s->cur_pic->motion_val[1][b_xy + i * s->b_stride],
817  0, 4 * 2 * sizeof(int16_t));
818  }
819  }
820 
821  mb_type = MB_TYPE_16x16;
822  } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
823  int8_t *i4x4 = s->intra4x4_pred_mode + s->mb2br_xy[s->mb_xy];
824  int8_t *i4x4_cache = s->intra4x4_pred_mode_cache;
825 
826  memset(s->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
827 
828  if (mb_type == 8) {
829  if (s->mb_x > 0) {
830  for (i = 0; i < 4; i++)
831  s->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - 1] + 6 - i];
832  if (s->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
833  s->left_samples_available = 0x5F5F;
834  }
835  if (s->mb_y > 0) {
836  s->intra4x4_pred_mode_cache[4 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 0];
837  s->intra4x4_pred_mode_cache[5 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 1];
838  s->intra4x4_pred_mode_cache[6 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 2];
839  s->intra4x4_pred_mode_cache[7 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 3];
840 
841  if (s->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
842  s->top_samples_available = 0x33FF;
843  }
844 
845  /* decode prediction codes for luma blocks */
846  for (i = 0; i < 16; i += 2) {
847  vlc = get_interleaved_ue_golomb(&s->gb_slice);
848 
849  if (vlc >= 25U) {
850  av_log(s->avctx, AV_LOG_ERROR,
851  "luma prediction:%"PRIu32"\n", vlc);
852  return -1;
853  }
854 
855  left = &s->intra4x4_pred_mode_cache[scan8[i] - 1];
856  top = &s->intra4x4_pred_mode_cache[scan8[i] - 8];
857 
858  left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
859  left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
860 
861  if (left[1] == -1 || left[2] == -1) {
862  av_log(s->avctx, AV_LOG_ERROR, "weird prediction\n");
863  return -1;
864  }
865  }
866  } else { /* mb_type == 33, DC_128_PRED block type */
867  for (i = 0; i < 4; i++)
868  memset(&s->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
869  }
870 
871  AV_COPY32(i4x4, i4x4_cache + 4 + 8 * 4);
872  i4x4[4] = i4x4_cache[7 + 8 * 3];
873  i4x4[5] = i4x4_cache[7 + 8 * 2];
874  i4x4[6] = i4x4_cache[7 + 8 * 1];
875 
876  if (mb_type == 8) {
877  ff_h264_check_intra4x4_pred_mode(s->intra4x4_pred_mode_cache,
878  s->avctx, s->top_samples_available,
879  s->left_samples_available);
880 
881  s->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
882  s->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
883  } else {
884  for (i = 0; i < 4; i++)
885  memset(&s->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
886 
887  s->top_samples_available = 0x33FF;
888  s->left_samples_available = 0x5F5F;
889  }
890 
891  mb_type = MB_TYPE_INTRA4x4;
892  } else { /* INTRA16x16 */
893  dir = ff_h264_i_mb_type_info[mb_type - 8].pred_mode;
894  dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
895 
896  if ((s->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(s->avctx, s->top_samples_available,
897  s->left_samples_available, dir, 0)) < 0) {
898  av_log(s->avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
899  return s->intra16x16_pred_mode;
900  }
901 
902  cbp = ff_h264_i_mb_type_info[mb_type - 8].cbp;
903  mb_type = MB_TYPE_INTRA16x16;
904  }
905 
906  if (!IS_INTER(mb_type) && s->pict_type != AV_PICTURE_TYPE_I) {
907  for (i = 0; i < 4; i++)
908  memset(s->cur_pic->motion_val[0][b_xy + i * s->b_stride],
909  0, 4 * 2 * sizeof(int16_t));
910  if (s->pict_type == AV_PICTURE_TYPE_B) {
911  for (i = 0; i < 4; i++)
912  memset(s->cur_pic->motion_val[1][b_xy + i * s->b_stride],
913  0, 4 * 2 * sizeof(int16_t));
914  }
915  }
916  if (!IS_INTRA4x4(mb_type)) {
917  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy], DC_PRED, 8);
918  }
919  if (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B) {
920  memset(s->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
921  }
922 
923  if (!IS_INTRA16x16(mb_type) &&
924  (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B)) {
925  if ((vlc = get_interleaved_ue_golomb(&s->gb_slice)) >= 48U){
926  av_log(s->avctx, AV_LOG_ERROR, "cbp_vlc=%"PRIu32"\n", vlc);
927  return -1;
928  }
929 
930  cbp = IS_INTRA(mb_type) ? ff_h264_golomb_to_intra4x4_cbp[vlc]
932  }
933  if (IS_INTRA16x16(mb_type) ||
934  (s->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
935  s->qscale += get_interleaved_se_golomb(&s->gb_slice);
936 
937  if (s->qscale > 31u) {
938  av_log(s->avctx, AV_LOG_ERROR, "qscale:%d\n", s->qscale);
939  return -1;
940  }
941  }
942  if (IS_INTRA16x16(mb_type)) {
943  AV_ZERO128(s->mb_luma_dc[0] + 0);
944  AV_ZERO128(s->mb_luma_dc[0] + 8);
945  if (svq3_decode_block(&s->gb_slice, s->mb_luma_dc[0], 0, 1)) {
946  av_log(s->avctx, AV_LOG_ERROR,
947  "error while decoding intra luma dc\n");
948  return -1;
949  }
950  }
951 
952  if (cbp) {
953  const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
954  const int type = ((s->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
955 
956  for (i = 0; i < 4; i++)
957  if ((cbp & (1 << i))) {
958  for (j = 0; j < 4; j++) {
959  k = index ? (1 * (j & 1) + 2 * (i & 1) +
960  2 * (j & 2) + 4 * (i & 2))
961  : (4 * i + j);
962  s->non_zero_count_cache[scan8[k]] = 1;
963 
964  if (svq3_decode_block(&s->gb_slice, &s->mb[16 * k], index, type)) {
965  av_log(s->avctx, AV_LOG_ERROR,
966  "error while decoding block\n");
967  return -1;
968  }
969  }
970  }
971 
972  if ((cbp & 0x30)) {
973  for (i = 1; i < 3; ++i)
974  if (svq3_decode_block(&s->gb_slice, &s->mb[16 * 16 * i], 0, 3)) {
975  av_log(s->avctx, AV_LOG_ERROR,
976  "error while decoding chroma dc block\n");
977  return -1;
978  }
979 
980  if ((cbp & 0x20)) {
981  for (i = 1; i < 3; i++) {
982  for (j = 0; j < 4; j++) {
983  k = 16 * i + j;
984  s->non_zero_count_cache[scan8[k]] = 1;
985 
986  if (svq3_decode_block(&s->gb_slice, &s->mb[16 * k], 1, 1)) {
987  av_log(s->avctx, AV_LOG_ERROR,
988  "error while decoding chroma ac block\n");
989  return -1;
990  }
991  }
992  }
993  }
994  }
995  }
996 
997  s->cbp = cbp;
998  s->cur_pic->mb_type[mb_xy] = mb_type;
999 
1000  if (IS_INTRA(mb_type))
1001  s->chroma_pred_mode = ff_h264_check_intra_pred_mode(s->avctx, s->top_samples_available,
1002  s->left_samples_available, DC_PRED8x8, 1);
1003 
1004  return 0;
1005 }
1006 
1008 {
1009  SVQ3Context *s = avctx->priv_data;
1010  const int mb_xy = s->mb_xy;
1011  int i, header;
1012  unsigned slice_id;
1013 
1014  header = get_bits(&s->gb, 8);
1015 
1016  if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
1017  /* TODO: what? */
1018  av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
1019  return -1;
1020  } else {
1021  int slice_bits, slice_bytes, slice_length;
1022  int length = header >> 5 & 3;
1023 
1024  slice_length = show_bits(&s->gb, 8 * length);
1025  slice_bits = slice_length * 8;
1026  slice_bytes = slice_length + length - 1;
1027 
1028  skip_bits(&s->gb, 8);
1029 
1030  av_fast_padded_malloc(&s->slice_buf, &s->slice_buf_size, slice_bytes);
1031  if (!s->slice_buf)
1032  return AVERROR(ENOMEM);
1033 
1034  if (slice_bytes * 8LL > get_bits_left(&s->gb)) {
1035  av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
1036  return AVERROR_INVALIDDATA;
1037  }
1038  memcpy(s->slice_buf, s->gb.buffer + s->gb.index / 8, slice_bytes);
1039 
1040  if (length > 0) {
1041  memmove(s->slice_buf, &s->slice_buf[slice_length], length - 1);
1042  }
1043 
1044  if (s->watermark_key) {
1045  uint32_t header = AV_RL32(&s->slice_buf[1]);
1046  AV_WL32(&s->slice_buf[1], header ^ s->watermark_key);
1047  }
1048  init_get_bits(&s->gb_slice, s->slice_buf, slice_bits);
1049 
1050  skip_bits_long(&s->gb, slice_bytes * 8);
1051  }
1052 
1053  if ((slice_id = get_interleaved_ue_golomb(&s->gb_slice)) >= 3) {
1054  av_log(s->avctx, AV_LOG_ERROR, "illegal slice type %u \n", slice_id);
1055  return -1;
1056  }
1057 
1058  s->slice_type = ff_h264_golomb_to_pict_type[slice_id];
1059 
1060  if ((header & 0x9F) == 2) {
1061  i = (s->mb_num < 64) ? 6 : (1 + av_log2(s->mb_num - 1));
1062  get_bits(&s->gb_slice, i);
1063  } else if (get_bits1(&s->gb_slice)) {
1064  avpriv_report_missing_feature(s->avctx, "Media key encryption");
1065  return AVERROR_PATCHWELCOME;
1066  }
1067 
1068  s->slice_num = get_bits(&s->gb_slice, 8);
1069  s->qscale = get_bits(&s->gb_slice, 5);
1070  s->adaptive_quant = get_bits1(&s->gb_slice);
1071 
1072  /* unknown fields */
1073  skip_bits1(&s->gb_slice);
1074 
1075  if (s->has_watermark)
1076  skip_bits1(&s->gb_slice);
1077 
1078  skip_bits1(&s->gb_slice);
1079  skip_bits(&s->gb_slice, 2);
1080 
1081  if (skip_1stop_8data_bits(&s->gb_slice) < 0)
1082  return AVERROR_INVALIDDATA;
1083 
1084  /* reset intra predictors and invalidate motion vector references */
1085  if (s->mb_x > 0) {
1086  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - 1] + 3,
1087  -1, 4 * sizeof(int8_t));
1088  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - s->mb_x],
1089  -1, 8 * sizeof(int8_t) * s->mb_x);
1090  }
1091  if (s->mb_y > 0) {
1092  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - s->mb_stride],
1093  -1, 8 * sizeof(int8_t) * (s->mb_width - s->mb_x));
1094 
1095  if (s->mb_x > 0)
1096  s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride - 1] + 3] = -1;
1097  }
1098 
1099  return 0;
1100 }
1101 
1103 {
1104  int q, x;
1105  const int max_qp = 51;
1106 
1107  for (q = 0; q < max_qp + 1; q++) {
1108  int shift = ff_h264_quant_div6[q] + 2;
1109  int idx = ff_h264_quant_rem6[q];
1110  for (x = 0; x < 16; x++)
1111  s->dequant4_coeff[q][(x >> 2) | ((x << 2) & 0xF)] =
1112  ((uint32_t)ff_h264_dequant4_coeff_init[idx][(x & 1) + ((x >> 2) & 1)] * 16) << shift;
1113  }
1114 }
1115 
1117 {
1118  SVQ3Context *s = avctx->priv_data;
1119  int m, x, y;
1120  unsigned char *extradata;
1121  unsigned char *extradata_end;
1122  unsigned int size;
1123  int marker_found = 0;
1124  int ret;
1125 
1126  s->cur_pic = &s->frames[0];
1127  s->last_pic = &s->frames[1];
1128  s->next_pic = &s->frames[2];
1129 
1130  s->cur_pic->f = av_frame_alloc();
1131  s->last_pic->f = av_frame_alloc();
1132  s->next_pic->f = av_frame_alloc();
1133  if (!s->cur_pic->f || !s->last_pic->f || !s->next_pic->f)
1134  return AVERROR(ENOMEM);
1135 
1136  ff_h264dsp_init(&s->h264dsp, 8, 1);
1137  ff_h264_pred_init(&s->hpc, AV_CODEC_ID_SVQ3, 8, 1);
1138  ff_videodsp_init(&s->vdsp, 8);
1139 
1140 
1141  avctx->bits_per_raw_sample = 8;
1142 
1143  ff_hpeldsp_init(&s->hdsp, avctx->flags);
1144  ff_tpeldsp_init(&s->tdsp);
1145 
1146  avctx->pix_fmt = AV_PIX_FMT_YUVJ420P;
1147  avctx->color_range = AVCOL_RANGE_JPEG;
1148 
1149  s->avctx = avctx;
1150  s->halfpel_flag = 1;
1151  s->thirdpel_flag = 1;
1152  s->has_watermark = 0;
1153 
1154  /* prowl for the "SEQH" marker in the extradata */
1155  extradata = (unsigned char *)avctx->extradata;
1156  extradata_end = avctx->extradata + avctx->extradata_size;
1157  if (extradata) {
1158  for (m = 0; m + 8 < avctx->extradata_size; m++) {
1159  if (!memcmp(extradata, "SEQH", 4)) {
1160  marker_found = 1;
1161  break;
1162  }
1163  extradata++;
1164  }
1165  }
1166 
1167  /* if a match was found, parse the extra data */
1168  if (marker_found) {
1169  GetBitContext gb;
1170  int frame_size_code;
1171  int unk0, unk1, unk2, unk3, unk4;
1172  int w,h;
1173 
1174  size = AV_RB32(&extradata[4]);
1175  if (size > extradata_end - extradata - 8)
1176  return AVERROR_INVALIDDATA;
1177  init_get_bits(&gb, extradata + 8, size * 8);
1178 
1179  /* 'frame size code' and optional 'width, height' */
1180  frame_size_code = get_bits(&gb, 3);
1181  switch (frame_size_code) {
1182  case 0:
1183  w = 160;
1184  h = 120;
1185  break;
1186  case 1:
1187  w = 128;
1188  h = 96;
1189  break;
1190  case 2:
1191  w = 176;
1192  h = 144;
1193  break;
1194  case 3:
1195  w = 352;
1196  h = 288;
1197  break;
1198  case 4:
1199  w = 704;
1200  h = 576;
1201  break;
1202  case 5:
1203  w = 240;
1204  h = 180;
1205  break;
1206  case 6:
1207  w = 320;
1208  h = 240;
1209  break;
1210  case 7:
1211  w = get_bits(&gb, 12);
1212  h = get_bits(&gb, 12);
1213  break;
1214  }
1215  ret = ff_set_dimensions(avctx, w, h);
1216  if (ret < 0)
1217  return ret;
1218 
1219  s->halfpel_flag = get_bits1(&gb);
1220  s->thirdpel_flag = get_bits1(&gb);
1221 
1222  /* unknown fields */
1223  unk0 = get_bits1(&gb);
1224  unk1 = get_bits1(&gb);
1225  unk2 = get_bits1(&gb);
1226  unk3 = get_bits1(&gb);
1227 
1228  s->low_delay = get_bits1(&gb);
1229 
1230  /* unknown field */
1231  unk4 = get_bits1(&gb);
1232 
1233  av_log(avctx, AV_LOG_DEBUG, "Unknown fields %d %d %d %d %d\n",
1234  unk0, unk1, unk2, unk3, unk4);
1235 
1236  if (skip_1stop_8data_bits(&gb) < 0)
1237  return AVERROR_INVALIDDATA;
1238 
1239  s->has_watermark = get_bits1(&gb);
1240  avctx->has_b_frames = !s->low_delay;
1241  if (s->has_watermark) {
1242 #if CONFIG_ZLIB
1243  unsigned watermark_width = get_interleaved_ue_golomb(&gb);
1244  unsigned watermark_height = get_interleaved_ue_golomb(&gb);
1245  int u1 = get_interleaved_ue_golomb(&gb);
1246  int u2 = get_bits(&gb, 8);
1247  int u3 = get_bits(&gb, 2);
1248  int u4 = get_interleaved_ue_golomb(&gb);
1249  unsigned long buf_len = watermark_width *
1250  watermark_height * 4;
1251  int offset = get_bits_count(&gb) + 7 >> 3;
1252  uint8_t *buf;
1253 
1254  if (watermark_height <= 0 ||
1255  (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height)
1256  return AVERROR_INVALIDDATA;
1257 
1258  buf = av_malloc(buf_len);
1259  if (!buf)
1260  return AVERROR(ENOMEM);
1261 
1262  av_log(avctx, AV_LOG_DEBUG, "watermark size: %ux%u\n",
1263  watermark_width, watermark_height);
1264  av_log(avctx, AV_LOG_DEBUG,
1265  "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
1266  u1, u2, u3, u4, offset);
1267  if (uncompress(buf, &buf_len, extradata + 8 + offset,
1268  size - offset) != Z_OK) {
1269  av_log(avctx, AV_LOG_ERROR,
1270  "could not uncompress watermark logo\n");
1271  av_free(buf);
1272  return -1;
1273  }
1274  s->watermark_key = av_bswap16(av_crc(av_crc_get_table(AV_CRC_16_CCITT), 0, buf, buf_len));
1275 
1276  s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1277  av_log(avctx, AV_LOG_DEBUG,
1278  "watermark key %#"PRIx32"\n", s->watermark_key);
1279  av_free(buf);
1280 #else
1281  av_log(avctx, AV_LOG_ERROR,
1282  "this svq3 file contains watermark which need zlib support compiled in\n");
1283  return AVERROR(ENOSYS);
1284 #endif
1285  }
1286  }
1287 
1288  s->mb_width = (avctx->width + 15) / 16;
1289  s->mb_height = (avctx->height + 15) / 16;
1290  s->mb_stride = s->mb_width + 1;
1291  s->mb_num = s->mb_width * s->mb_height;
1292  s->b_stride = 4 * s->mb_width;
1293  s->h_edge_pos = s->mb_width * 16;
1294  s->v_edge_pos = s->mb_height * 16;
1295 
1296  s->intra4x4_pred_mode = av_mallocz(s->mb_stride * 2 * 8);
1297  if (!s->intra4x4_pred_mode)
1298  return AVERROR(ENOMEM);
1299 
1300  s->mb2br_xy = av_mallocz(s->mb_stride * (s->mb_height + 1) *
1301  sizeof(*s->mb2br_xy));
1302  if (!s->mb2br_xy)
1303  return AVERROR(ENOMEM);
1304 
1305  for (y = 0; y < s->mb_height; y++)
1306  for (x = 0; x < s->mb_width; x++) {
1307  const int mb_xy = x + y * s->mb_stride;
1308 
1309  s->mb2br_xy[mb_xy] = 8 * (mb_xy % (2 * s->mb_stride));
1310  }
1311 
1313 
1314  return 0;
1315 }
1316 
1317 static void free_picture(SVQ3Frame *pic)
1318 {
1319  int i;
1320  for (i = 0; i < 2; i++) {
1321  av_freep(&pic->motion_val_buf[i]);
1322  }
1323  av_freep(&pic->mb_type_buf);
1324 
1325  av_frame_unref(pic->f);
1326 }
1327 
1328 static int get_buffer(AVCodecContext *avctx, SVQ3Frame *pic)
1329 {
1330  SVQ3Context *s = avctx->priv_data;
1331  const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
1332  const int b4_stride = s->mb_width * 4 + 1;
1333  const int b4_array_size = b4_stride * s->mb_height * 4;
1334  int ret;
1335 
1336  if (!pic->motion_val_buf[0]) {
1337  int i;
1338 
1339  pic->mb_type_buf = av_calloc(big_mb_num + s->mb_stride, sizeof(uint32_t));
1340  if (!pic->mb_type_buf)
1341  return AVERROR(ENOMEM);
1342  pic->mb_type = pic->mb_type_buf + 2 * s->mb_stride + 1;
1343 
1344  for (i = 0; i < 2; i++) {
1345  pic->motion_val_buf[i] = av_calloc(b4_array_size + 4, 2 * sizeof(int16_t));
1346  if (!pic->motion_val_buf[i]) {
1347  ret = AVERROR(ENOMEM);
1348  goto fail;
1349  }
1350 
1351  pic->motion_val[i] = pic->motion_val_buf[i] + 4;
1352  }
1353  }
1354 
1355  ret = ff_get_buffer(avctx, pic->f,
1356  (s->pict_type != AV_PICTURE_TYPE_B) ?
1358  if (ret < 0)
1359  goto fail;
1360 
1361  if (!s->edge_emu_buffer) {
1362  s->edge_emu_buffer = av_calloc(pic->f->linesize[0], 17);
1363  if (!s->edge_emu_buffer)
1364  return AVERROR(ENOMEM);
1365  }
1366 
1367  return 0;
1368 fail:
1369  free_picture(pic);
1370  return ret;
1371 }
1372 
1373 static int svq3_decode_frame(AVCodecContext *avctx, AVFrame *rframe,
1374  int *got_frame, AVPacket *avpkt)
1375 {
1376  SVQ3Context *s = avctx->priv_data;
1377  int buf_size = avpkt->size;
1378  int left;
1379  int ret, m, i;
1380 
1381  /* special case for last picture */
1382  if (buf_size == 0) {
1383  if (s->next_pic->f->data[0] && !s->low_delay && !s->last_frame_output) {
1384  ret = av_frame_ref(rframe, s->next_pic->f);
1385  if (ret < 0)
1386  return ret;
1387  s->last_frame_output = 1;
1388  *got_frame = 1;
1389  }
1390  return 0;
1391  }
1392 
1393  s->mb_x = s->mb_y = s->mb_xy = 0;
1394 
1395  ret = init_get_bits8(&s->gb, avpkt->data, avpkt->size);
1396  if (ret < 0)
1397  return ret;
1398 
1399  if (svq3_decode_slice_header(avctx))
1400  return -1;
1401 
1402  s->pict_type = s->slice_type;
1403 
1404  if (s->pict_type != AV_PICTURE_TYPE_B)
1405  FFSWAP(SVQ3Frame*, s->next_pic, s->last_pic);
1406 
1407  av_frame_unref(s->cur_pic->f);
1408 
1409  /* for skipping the frame */
1410  s->cur_pic->f->pict_type = s->pict_type;
1411  s->cur_pic->f->key_frame = (s->pict_type == AV_PICTURE_TYPE_I);
1412 
1413  ret = get_buffer(avctx, s->cur_pic);
1414  if (ret < 0)
1415  return ret;
1416 
1417  for (i = 0; i < 16; i++) {
1418  s->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * s->cur_pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
1419  s->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * s->cur_pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
1420  }
1421  for (i = 0; i < 16; i++) {
1422  s->block_offset[16 + i] =
1423  s->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * s->cur_pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
1424  s->block_offset[48 + 16 + i] =
1425  s->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * s->cur_pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
1426  }
1427 
1428  if (s->pict_type != AV_PICTURE_TYPE_I) {
1429  if (!s->last_pic->f->data[0]) {
1430  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1431  av_frame_unref(s->last_pic->f);
1432  ret = get_buffer(avctx, s->last_pic);
1433  if (ret < 0)
1434  return ret;
1435  memset(s->last_pic->f->data[0], 0, avctx->height * s->last_pic->f->linesize[0]);
1436  memset(s->last_pic->f->data[1], 0x80, (avctx->height / 2) *
1437  s->last_pic->f->linesize[1]);
1438  memset(s->last_pic->f->data[2], 0x80, (avctx->height / 2) *
1439  s->last_pic->f->linesize[2]);
1440  }
1441 
1442  if (s->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f->data[0]) {
1443  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1444  av_frame_unref(s->next_pic->f);
1445  ret = get_buffer(avctx, s->next_pic);
1446  if (ret < 0)
1447  return ret;
1448  memset(s->next_pic->f->data[0], 0, avctx->height * s->next_pic->f->linesize[0]);
1449  memset(s->next_pic->f->data[1], 0x80, (avctx->height / 2) *
1450  s->next_pic->f->linesize[1]);
1451  memset(s->next_pic->f->data[2], 0x80, (avctx->height / 2) *
1452  s->next_pic->f->linesize[2]);
1453  }
1454  }
1455 
1456  if (avctx->debug & FF_DEBUG_PICT_INFO)
1457  av_log(s->avctx, AV_LOG_DEBUG,
1458  "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1459  av_get_picture_type_char(s->pict_type),
1460  s->halfpel_flag, s->thirdpel_flag,
1461  s->adaptive_quant, s->qscale, s->slice_num);
1462 
1463  if (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B ||
1464  avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I ||
1465  avctx->skip_frame >= AVDISCARD_ALL)
1466  return 0;
1467 
1468  if (s->pict_type == AV_PICTURE_TYPE_B) {
1469  s->frame_num_offset = s->slice_num - s->prev_frame_num;
1470 
1471  if (s->frame_num_offset < 0)
1472  s->frame_num_offset += 256;
1473  if (s->frame_num_offset == 0 ||
1474  s->frame_num_offset >= s->prev_frame_num_offset) {
1475  av_log(s->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1476  return -1;
1477  }
1478  } else {
1479  s->prev_frame_num = s->frame_num;
1480  s->frame_num = s->slice_num;
1481  s->prev_frame_num_offset = s->frame_num - s->prev_frame_num;
1482 
1483  if (s->prev_frame_num_offset < 0)
1484  s->prev_frame_num_offset += 256;
1485  }
1486 
1487  for (m = 0; m < 2; m++) {
1488  int i;
1489  for (i = 0; i < 4; i++) {
1490  int j;
1491  for (j = -1; j < 4; j++)
1492  s->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1493  if (i < 3)
1494  s->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1495  }
1496  }
1497 
1498  for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
1499  for (s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
1500  unsigned mb_type;
1501  s->mb_xy = s->mb_x + s->mb_y * s->mb_stride;
1502 
1503  if ((get_bits_left(&s->gb_slice)) <= 7) {
1504  if (((get_bits_count(&s->gb_slice) & 7) == 0 ||
1505  show_bits(&s->gb_slice, get_bits_left(&s->gb_slice) & 7) == 0)) {
1506 
1507  if (svq3_decode_slice_header(avctx))
1508  return -1;
1509  }
1510  if (s->slice_type != s->pict_type) {
1511  avpriv_request_sample(avctx, "non constant slice type");
1512  }
1513  /* TODO: support s->mb_skip_run */
1514  }
1515 
1516  mb_type = get_interleaved_ue_golomb(&s->gb_slice);
1517 
1518  if (s->pict_type == AV_PICTURE_TYPE_I)
1519  mb_type += 8;
1520  else if (s->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1521  mb_type += 4;
1522  if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1523  av_log(s->avctx, AV_LOG_ERROR,
1524  "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
1525  return -1;
1526  }
1527 
1528  if (mb_type != 0 || s->cbp)
1529  hl_decode_mb(s);
1530 
1531  if (s->pict_type != AV_PICTURE_TYPE_B && !s->low_delay)
1532  s->cur_pic->mb_type[s->mb_x + s->mb_y * s->mb_stride] =
1533  (s->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1534  }
1535 
1536  ff_draw_horiz_band(avctx, s->cur_pic->f,
1537  s->last_pic->f->data[0] ? s->last_pic->f : NULL,
1538  16 * s->mb_y, 16, PICT_FRAME, 0,
1539  s->low_delay);
1540  }
1541 
1542  left = buf_size*8 - get_bits_count(&s->gb_slice);
1543 
1544  if (s->mb_y != s->mb_height || s->mb_x != s->mb_width) {
1545  av_log(avctx, AV_LOG_INFO, "frame num %d incomplete pic x %d y %d left %d\n", avctx->frame_number, s->mb_y, s->mb_x, left);
1546  //av_hex_dump(stderr, buf+buf_size-8, 8);
1547  }
1548 
1549  if (left < 0) {
1550  av_log(avctx, AV_LOG_ERROR, "frame num %d left %d\n", avctx->frame_number, left);
1551  return -1;
1552  }
1553 
1554  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay)
1555  ret = av_frame_ref(rframe, s->cur_pic->f);
1556  else if (s->last_pic->f->data[0])
1557  ret = av_frame_ref(rframe, s->last_pic->f);
1558  if (ret < 0)
1559  return ret;
1560 
1561  /* Do not output the last pic after seeking. */
1562  if (s->last_pic->f->data[0] || s->low_delay)
1563  *got_frame = 1;
1564 
1565  if (s->pict_type != AV_PICTURE_TYPE_B) {
1566  FFSWAP(SVQ3Frame*, s->cur_pic, s->next_pic);
1567  } else {
1568  av_frame_unref(s->cur_pic->f);
1569  }
1570 
1571  return buf_size;
1572 }
1573 
1575 {
1576  SVQ3Context *s = avctx->priv_data;
1577 
1578  for (int i = 0; i < FF_ARRAY_ELEMS(s->frames); i++) {
1579  free_picture(&s->frames[i]);
1580  av_frame_free(&s->frames[i].f);
1581  }
1582  av_freep(&s->slice_buf);
1583  av_freep(&s->intra4x4_pred_mode);
1584  av_freep(&s->edge_emu_buffer);
1585  av_freep(&s->mb2br_xy);
1586 
1587  return 0;
1588 }
1589 
1591  .p.name = "svq3",
1592  CODEC_LONG_NAME("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1593  .p.type = AVMEDIA_TYPE_VIDEO,
1594  .p.id = AV_CODEC_ID_SVQ3,
1595  .priv_data_size = sizeof(SVQ3Context),
1597  .close = svq3_decode_end,
1599  .p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND |
1602  .p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,
1603  AV_PIX_FMT_NONE},
1604  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
1605 };
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:38
SVQ3Context::frame_num
int frame_num
Definition: svq3.c:109
SVQ3Context::edge_emu_buffer
uint8_t * edge_emu_buffer
Definition: svq3.c:135
IS_INTRA4x4
#define IS_INTRA4x4(a)
Definition: mpegutils.h:68
A
#define A(x)
Definition: vpx_arith.h:28
ff_draw_horiz_band
void ff_draw_horiz_band(AVCodecContext *avctx, const AVFrame *cur, const AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
Definition: mpegutils.c:51
svq3_dequant_coeff
static const uint32_t svq3_dequant_coeff[32]
Definition: svq3.c:212
SVQ3Context::next_pic
SVQ3Frame * next_pic
Definition: svq3.c:92
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:291
SVQ3Context::slice_type
enum AVPictureType slice_type
Definition: svq3.c:115
SVQ3Context::gb_slice
GetBitContext gb_slice
Definition: svq3.c:95
SVQ3Context::vdsp
VideoDSPContext vdsp
Definition: svq3.c:89
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
SVQ3Context::slice_num
int slice_num
Definition: svq3.c:106
level
uint8_t level
Definition: svq3.c:204
av_clip
#define av_clip
Definition: common.h:95
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:839
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
DC_PRED8x8
#define DC_PRED8x8
Definition: h264pred.h:68
svq3_decode_slice_header
static int svq3_decode_slice_header(AVCodecContext *avctx)
Definition: svq3.c:1007
AV_WL32
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
mem_internal.h
SVQ3Context::avctx
AVCodecContext * avctx
Definition: svq3.c:83
DC_128_PRED
@ DC_128_PRED
Definition: vp9.h:58
SVQ3Context::mb_num
int mb_num
Definition: svq3.c:121
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:262
SVQ3Context::v_edge_pos
int v_edge_pos
Definition: svq3.c:104
AVPictureType
AVPictureType
Definition: avutil.h:272
ff_h264_chroma_qp
const uint8_t ff_h264_chroma_qp[7][QP_MAX_NUM+1]
Definition: h264data.c:203
mv
static const int8_t mv[256][2]
Definition: 4xm.c:80
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
SVQ3Context::left_samples_available
unsigned int left_samples_available
Definition: svq3.c:133
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
get_interleaved_ue_golomb
static unsigned get_interleaved_ue_golomb(GetBitContext *gb)
Definition: golomb.h:143
ff_h264_golomb_to_inter_cbp
const uint8_t ff_h264_golomb_to_inter_cbp[48]
Definition: h264data.c:48
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:116
h264_parse.h
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
SVQ3Context::h_edge_pos
int h_edge_pos
Definition: svq3.c:103
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:661
IMbInfo::cbp
uint8_t cbp
Definition: h264data.h:37
AVPacket::data
uint8_t * data
Definition: packet.h:374
DC_PRED
@ DC_PRED
Definition: vp9.h:48
MB_TYPE_INTRA4x4
#define MB_TYPE_INTRA4x4
Definition: mpegutils.h:44
SVQ3Context::slice_buf
uint8_t * slice_buf
Definition: svq3.c:96
VERT_LEFT_PRED
@ VERT_LEFT_PRED
Definition: vp9.h:53
MB_TYPE_16x16
#define MB_TYPE_16x16
Definition: mpegutils.h:47
SVQ3Context::mb
int16_t mb[16 *48 *2]
Definition: svq3.c:139
PREDICT_MODE
#define PREDICT_MODE
Definition: svq3.c:150
FFCodec
Definition: codec_internal.h:119
AV_WN32A
#define AV_WN32A(p, v)
Definition: intreadwrite.h:538
ff_h264_golomb_to_intra4x4_cbp
const uint8_t ff_h264_golomb_to_intra4x4_cbp[48]
Definition: h264data.c:42
SVQ3Context::frame_num_offset
int frame_num_offset
Definition: svq3.c:110
mpegutils.h
MB_TYPE_INTRA16x16
#define MB_TYPE_INTRA16x16
Definition: mpegutils.h:45
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:649
SVQ3Context::slice_buf_size
unsigned slice_buf_size
Definition: svq3.c:97
SVQ3Context::last_frame_output
int last_frame_output
Definition: svq3.c:105
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1328
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:346
get_buffer
static int get_buffer(AVCodecContext *avctx, SVQ3Frame *pic)
Definition: svq3.c:1328
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
crc.h
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
golomb.h
exp golomb vlc stuff
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:123
SVQ3Context::last_pic
SVQ3Frame * last_pic
Definition: svq3.c:93
SVQ3Context::qscale
int qscale
Definition: svq3.c:107
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1698
fail
#define fail()
Definition: checkasm.h:134
GetBitContext
Definition: get_bits.h:61
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:478
SVQ3Context::tdsp
TpelDSPContext tdsp
Definition: svq3.c:88
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
scan8
static const uint8_t scan8[16 *3+3]
Definition: h264_parse.h:40
SVQ3Context::thirdpel_flag
int thirdpel_flag
Definition: svq3.c:99
ff_h264_golomb_to_pict_type
const uint8_t ff_h264_golomb_to_pict_type[5]
Definition: h264data.c:37
pack16to32
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
Definition: h264_parse.h:127
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:104
SVQ3Context::intra4x4_pred_mode_cache
int8_t intra4x4_pred_mode_cache[5 *8]
Definition: svq3.c:129
C
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
Definition: writing_filters.txt:58
SVQ3Context::gb
GetBitContext gb
Definition: svq3.c:94
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
SVQ3Context::frames
SVQ3Frame frames[3]
Definition: svq3.c:144
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
SVQ3Context::cbp
int cbp
Definition: svq3.c:108
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:667
FULLPEL_MODE
#define FULLPEL_MODE
Definition: svq3.c:147
SVQ3Context::mb_y
int mb_y
Definition: svq3.c:118
SVQ3Context::mb_x
int mb_x
Definition: svq3.c:118
SVQ3Context::adaptive_quant
int adaptive_quant
Definition: svq3.c:102
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:500
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:694
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:298
s
#define s(width, name)
Definition: cbs_vp9.c:256
TpelDSPContext
thirdpel DSP context
Definition: tpeldsp.h:42
SVQ3Context::pict_type
enum AVPictureType pict_type
Definition: svq3.c:114
AV_ZERO32
#define AV_ZERO32(d)
Definition: intreadwrite.h:629
svq3_mc_dir
static int svq3_mc_dir(SVQ3Context *s, int size, int mode, int dir, int avg)
Definition: svq3.c:499
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:376
ff_tpeldsp_init
av_cold void ff_tpeldsp_init(TpelDSPContext *c)
Definition: tpeldsp.c:312
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:363
QP_MAX_NUM
#define QP_MAX_NUM
Definition: h264.h:27
h264data.h
B
#define B
Definition: huffyuv.h:42
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1451
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
svq3_pred_motion
static av_always_inline void svq3_pred_motion(const SVQ3Context *s, int n, int part_width, int list, int ref, int *const mx, int *const my)
Get the predicted MV.
Definition: svq3.c:375
decode.h
IS_SKIP
#define IS_SKIP(a)
Definition: mpegutils.h:74
SVQ3Context::top_samples_available
unsigned int top_samples_available
Definition: svq3.c:132
IS_INTRA
#define IS_INTRA(x, y)
AV_CODEC_ID_SVQ3
@ AV_CODEC_ID_SVQ3
Definition: codec_id.h:75
SVQ3Context::b_stride
int b_stride
Definition: svq3.c:122
SVQ3Context::prev_frame_num_offset
int prev_frame_num_offset
Definition: svq3.c:111
SVQ3Frame::mb_type_buf
uint32_t * mb_type_buf
Definition: svq3.c:79
SVQ3Context::h264dsp
H264DSPContext h264dsp
Definition: svq3.c:85
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:264
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
IMbInfo::pred_mode
uint8_t pred_mode
Definition: h264data.h:36
if
if(ret)
Definition: filter_design.txt:179
SVQ3Frame::motion_val
int16_t(*[2] motion_val)[2]
Definition: svq3.c:77
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:76
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
run
uint8_t run
Definition: svq3.c:203
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:982
SVQ3Context::mb_width
int mb_width
Definition: svq3.c:120
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
SVQ3Context::mb2br_xy
uint32_t * mb2br_xy
Definition: svq3.c:124
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
h264dsp.h
mathops.h
list
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
Definition: filter_design.txt:25
ff_h264_chroma_dc_scan
const uint8_t ff_h264_chroma_dc_scan[4]
Definition: h264data.c:54
SVQ3Context
Definition: svq3.c:82
AV_ZERO128
#define AV_ZERO128(d)
Definition: intreadwrite.h:637
SVQ3Context::mb_luma_dc
int16_t mb_luma_dc[3][16 *2]
Definition: svq3.c:140
tpeldsp.h
index
int index
Definition: gxfenc.c:89
hl_decode_mb_idct_luma
static av_always_inline void hl_decode_mb_idct_luma(SVQ3Context *s, int mb_type, const int *block_offset, int linesize, uint8_t *dest_y)
Definition: svq3.c:612
HpelDSPContext
Half-pel DSP context.
Definition: hpeldsp.h:45
H264DSPContext
Context for storing H.264 DSP functions.
Definition: h264dsp.h:42
SVQ3Context::intra16x16_pred_mode
int intra16x16_pred_mode
Definition: svq3.c:127
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:75
SVQ3Context::hpc
H264PredContext hpc
Definition: svq3.c:86
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1450
init_dequant4_coeff_table
static void init_dequant4_coeff_table(SVQ3Context *s)
Definition: svq3.c:1102
ff_zigzag_scan
const uint8_t ff_zigzag_scan[16+1]
Definition: mathtables.c:109
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
svq3_fetch_diagonal_mv
static av_always_inline int svq3_fetch_diagonal_mv(const SVQ3Context *s, const int16_t **C, int i, int list, int part_width)
Definition: svq3.c:354
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:259
AVPacket::size
int size
Definition: packet.h:375
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:353
codec_internal.h
shift
static int shift(int a, int b)
Definition: bonk.c:253
rectangle.h
hl_decode_mb
static void hl_decode_mb(SVQ3Context *s)
Definition: svq3.c:661
get_interleaved_se_golomb
static int get_interleaved_se_golomb(GetBitContext *gb)
Definition: golomb.h:301
free_picture
static void free_picture(SVQ3Frame *pic)
Definition: svq3.c:1317
size
int size
Definition: twinvq_data.h:10344
SVQ3Frame::mb_type
uint32_t * mb_type
Definition: svq3.c:79
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
MB_TYPE_SKIP
#define MB_TYPE_SKIP
Definition: mpegutils.h:55
avg
#define avg(a, b, c, d)
Definition: colorspacedsp_template.c:28
header
static const uint8_t header[24]
Definition: sdr2.c:67
height
#define height
av_crc_get_table
const AVCRC * av_crc_get_table(AVCRCId crc_id)
Get an initialized standard CRC table.
Definition: crc.c:374
av_bswap16
#define av_bswap16
Definition: bswap.h:31
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
attributes.h
ff_h264_quant_rem6
const uint8_t ff_h264_quant_rem6[QP_MAX_NUM+1]
Definition: h264data.c:174
skip_bits1
static void skip_bits1(GetBitContext *s)
Definition: get_bits.h:538
IS_INTRA16x16
#define IS_INTRA16x16(a)
Definition: mpegutils.h:69
hl_decode_mb_predict_luma
static av_always_inline void hl_decode_mb_predict_luma(SVQ3Context *s, int mb_type, const int *block_offset, int linesize, uint8_t *dest_y)
Definition: svq3.c:627
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
SVQ3Frame::motion_val_buf
int16_t(*[2] motion_val_buf)[2]
Definition: svq3.c:76
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
SVQ3Context::prev_frame_num
int prev_frame_num
Definition: svq3.c:112
svq3_add_idct_c
static void svq3_add_idct_c(uint8_t *dst, int16_t *block, int stride, int qp, int dc)
Definition: svq3.c:254
av_get_picture_type_char
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:40
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem.h:116
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
svq3_luma_dc_dequant_idct_c
static void svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
Definition: svq3.c:219
stride
#define stride
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AV_CRC_16_CCITT
@ AV_CRC_16_CCITT
Definition: crc.h:51
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:499
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:446
SVQ3Frame
Definition: svq3.c:73
THIRDPEL_MODE
#define THIRDPEL_MODE
Definition: svq3.c:149
SVQ3Context::mv_cache
int16_t mv_cache[2][5 *8][2]
Definition: svq3.c:137
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:49
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:487
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:264
AV_COPY32
#define AV_COPY32(d, s)
Definition: intreadwrite.h:601
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:211
svq3_decode_frame
static int svq3_decode_frame(AVCodecContext *avctx, AVFrame *rframe, int *got_frame, AVPacket *avpkt)
Definition: svq3.c:1373
SVQ3Context::non_zero_count_cache
uint8_t non_zero_count_cache[15 *8]
Definition: svq3.c:141
AVCodecContext::height
int height
Definition: avcodec.h:571
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:608
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:272
svq3_decode_mb
static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
Definition: svq3.c:709
svq3_scan
static const uint8_t svq3_scan[16]
Definition: svq3.c:161
avcodec.h
limit
static double limit(double x)
Definition: vf_pseudocolor.c:130
ff_h264dsp_init
av_cold void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
Definition: h264dsp.c:66
SVQ3Context::halfpel_flag
int halfpel_flag
Definition: svq3.c:98
mid_pred
#define mid_pred
Definition: mathops.h:98
svq3_pred_1
static const int8_t svq3_pred_1[6][6][5]
Definition: svq3.c:187
ret
ret
Definition: filter_design.txt:187
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
svq3_dct_tables
static const struct @147 svq3_dct_tables[2][16]
SVQ3Context::mb_height
int mb_height
Definition: svq3.c:120
SVQ3Context::hdsp
HpelDSPContext hdsp
Definition: svq3.c:87
SVQ3Context::low_delay
int low_delay
Definition: svq3.c:116
h264pred.h
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
svq3_decode_block
static int svq3_decode_block(GetBitContext *gb, int16_t *block, int index, const int type)
Definition: svq3.c:294
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
U
#define U(x)
Definition: vpx_arith.h:37
skip_1stop_8data_bits
static int skip_1stop_8data_bits(GetBitContext *gb)
Definition: get_bits.h:844
AVCodecContext
main external API structure.
Definition: avcodec.h:398
ff_h264_dequant4_coeff_init
const uint8_t ff_h264_dequant4_coeff_init[6][3]
Definition: h264data.c:152
SVQ3Frame::f
AVFrame * f
Definition: svq3.c:74
SVQ3Context::block_offset
int block_offset[2 *(16 *3)]
Definition: svq3.c:143
ff_h264_pred_init
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:437
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
av_crc
uint32_t av_crc(const AVCRC *ctx, uint32_t crc, const uint8_t *buffer, size_t length)
Calculate the CRC of a block.
Definition: crc.c:392
mode
mode
Definition: ebur128.h:83
ff_h264_check_intra4x4_pred_mode
int ff_h264_check_intra4x4_pred_mode(int8_t *pred_mode_cache, void *logctx, int top_samples_available, int left_samples_available)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264_parse.c:133
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
ff_h264_i_mb_type_info
const IMbInfo ff_h264_i_mb_type_info[26]
Definition: h264data.c:66
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:814
SVQ3Context::chroma_pred_mode
int chroma_pred_mode
Definition: svq3.c:126
SVQ3Context::watermark_key
uint32_t watermark_key
Definition: svq3.c:101
SVQ3Context::mb_xy
int mb_xy
Definition: svq3.c:119
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
temp
else temp
Definition: vf_mcdeint.c:248
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:82
luma_dc_zigzag_scan
static const uint8_t luma_dc_zigzag_scan[16]
Definition: svq3.c:168
PART_NOT_AVAILABLE
#define PART_NOT_AVAILABLE
Definition: h264pred.h:89
av_clip_uint8
#define av_clip_uint8
Definition: common.h:101
ff_h264_quant_div6
const uint8_t ff_h264_quant_div6[QP_MAX_NUM+1]
Definition: h264data.c:182
VideoDSPContext
Definition: videodsp.h:40
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1327
H264PredContext
Context for storing H.264 prediction functions.
Definition: h264pred.h:94
svq3_mc_dir_part
static void svq3_mc_dir_part(SVQ3Context *s, int x, int y, int width, int height, int mx, int my, int dxy, int thirdpel, int dir, int avg)
Definition: svq3.c:424
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
IS_INTER
#define IS_INTER(a)
Definition: mpegutils.h:72
svq3_decode_end
static av_cold int svq3_decode_end(AVCodecContext *avctx)
Definition: svq3.c:1574
AVCodecContext::frame_number
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:1046
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
SVQ3Context::dequant4_coeff
uint32_t dequant4_coeff[QP_MAX_NUM+1][16]
Definition: svq3.c:142
SVQ3Context::ref_cache
int8_t ref_cache[2][5 *8]
Definition: svq3.c:138
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVPacket
This structure stores compressed data.
Definition: packet.h:351
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:425
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
videodsp.h
SVQ3Context::mb_stride
int mb_stride
Definition: svq3.c:121
DIAG_DOWN_LEFT_PRED
@ DIAG_DOWN_LEFT_PRED
Definition: vp9.h:49
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:571
int32_t
int32_t
Definition: audioconvert.c:56
hpeldsp.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:370
AV_CODEC_CAP_DRAW_HORIZ_BAND
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: codec.h:44
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
svq3_decode_init
static av_cold int svq3_decode_init(AVCodecContext *avctx)
Definition: svq3.c:1116
h
h
Definition: vp9dsp_template.c:2038
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:72
ff_svq3_decoder
const FFCodec ff_svq3_decoder
Definition: svq3.c:1590
int
int
Definition: ffmpeg_filter.c:156
SVQ3Context::cur_pic
SVQ3Frame * cur_pic
Definition: svq3.c:91
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
SVQ3Context::has_watermark
int has_watermark
Definition: svq3.c:100
SVQ3Context::intra4x4_pred_mode
int8_t * intra4x4_pred_mode
Definition: svq3.c:130
svq3_pred_0
static const uint8_t svq3_pred_0[25][2]
Definition: svq3.c:175
HALFPEL_MODE
#define HALFPEL_MODE
Definition: svq3.c:148
ff_h264_check_intra_pred_mode
int ff_h264_check_intra_pred_mode(void *logctx, int top_samples_available, int left_samples_available, int mode, int is_chroma)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264_parse.c:181