FFmpeg
svq3.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 The FFmpeg Project
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /*
22  * How to use this decoder:
23  * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24  * have stsd atoms to describe media trak properties. A stsd atom for a
25  * video trak contains 1 or more ImageDescription atoms. These atoms begin
26  * with the 4-byte length of the atom followed by the codec fourcc. Some
27  * decoders need information in this atom to operate correctly. Such
28  * is the case with SVQ3. In order to get the best use out of this decoder,
29  * the calling app must make the SVQ3 ImageDescription atom available
30  * via the AVCodecContext's extradata[_size] field:
31  *
32  * AVCodecContext.extradata = pointer to ImageDescription, first characters
33  * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34  * AVCodecContext.extradata_size = size of ImageDescription atom memory
35  * buffer (which will be the same as the ImageDescription atom size field
36  * from the QT file, minus 4 bytes since the length is missing)
37  *
38  * You will know you have these parameters passed correctly when the decoder
39  * correctly decodes this file:
40  * http://samples.mplayerhq.hu/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
41  */
42 
43 #include <inttypes.h>
44 
45 #include "libavutil/attributes.h"
46 #include "libavutil/crc.h"
47 #include "libavutil/mem_internal.h"
48 
49 #include "internal.h"
50 #include "avcodec.h"
51 #include "mpegutils.h"
52 #include "h264dec.h"
53 #include "h264data.h"
54 #include "golomb.h"
55 #include "hpeldsp.h"
56 #include "mathops.h"
57 #include "rectangle.h"
58 #include "tpeldsp.h"
59 
60 #if CONFIG_ZLIB
61 #include <zlib.h>
62 #endif
63 
64 #include "svq1.h"
65 
66 /**
67  * @file
68  * svq3 decoder.
69  */
70 
71 typedef struct SVQ3Frame {
73 
74  int16_t (*motion_val_buf[2])[2];
75  int16_t (*motion_val[2])[2];
76 
77  uint32_t *mb_type_buf, *mb_type;
78 } SVQ3Frame;
79 
80 typedef struct SVQ3Context {
82 
88 
99  uint32_t watermark_key;
101  int buf_size;
108  int qscale;
109  int cbp;
114 
115  enum AVPictureType pict_type;
116  enum AVPictureType slice_type;
118 
119  int mb_x, mb_y;
120  int mb_xy;
121  int mb_width, mb_height;
122  int mb_stride, mb_num;
123  int b_stride;
124 
125  uint32_t *mb2br_xy;
126 
129 
130  int8_t intra4x4_pred_mode_cache[5 * 8];
131  int8_t (*intra4x4_pred_mode);
132 
133  unsigned int top_samples_available;
136 
138 
139  DECLARE_ALIGNED(16, int16_t, mv_cache)[2][5 * 8][2];
140  DECLARE_ALIGNED(8, int8_t, ref_cache)[2][5 * 8];
141  DECLARE_ALIGNED(16, int16_t, mb)[16 * 48 * 2];
142  DECLARE_ALIGNED(16, int16_t, mb_luma_dc)[3][16 * 2];
143  DECLARE_ALIGNED(8, uint8_t, non_zero_count_cache)[15 * 8];
144  uint32_t dequant4_coeff[QP_MAX_NUM + 1][16];
145  int block_offset[2 * (16 * 3)];
147 } SVQ3Context;
148 
149 #define FULLPEL_MODE 1
150 #define HALFPEL_MODE 2
151 #define THIRDPEL_MODE 3
152 #define PREDICT_MODE 4
153 
154 /* dual scan (from some older H.264 draft)
155  * o-->o-->o o
156  * | /|
157  * o o o / o
158  * | / | |/ |
159  * o o o o
160  * /
161  * o-->o-->o-->o
162  */
163 static const uint8_t svq3_scan[16] = {
164  0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
165  2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
166  0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
167  0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
168 };
169 
170 static const uint8_t luma_dc_zigzag_scan[16] = {
171  0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
172  3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
173  1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
174  3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
175 };
176 
177 static const uint8_t svq3_pred_0[25][2] = {
178  { 0, 0 },
179  { 1, 0 }, { 0, 1 },
180  { 0, 2 }, { 1, 1 }, { 2, 0 },
181  { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
182  { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
183  { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
184  { 2, 4 }, { 3, 3 }, { 4, 2 },
185  { 4, 3 }, { 3, 4 },
186  { 4, 4 }
187 };
188 
189 static const int8_t svq3_pred_1[6][6][5] = {
190  { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
191  { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
192  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
193  { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
194  { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
195  { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
196  { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
197  { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
198  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
199  { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
200  { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
201  { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
202 };
203 
204 static const struct {
207 } svq3_dct_tables[2][16] = {
208  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
209  { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
210  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
211  { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
212 };
213 
214 static const uint32_t svq3_dequant_coeff[32] = {
215  3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
216  9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
217  24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
218  61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
219 };
220 
221 static void svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
222 {
223  const unsigned qmul = svq3_dequant_coeff[qp];
224 #define stride 16
225  int i;
226  int temp[16];
227  static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
228 
229  for (i = 0; i < 4; i++) {
230  const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
231  const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
232  const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
233  const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
234 
235  temp[4 * i + 0] = z0 + z3;
236  temp[4 * i + 1] = z1 + z2;
237  temp[4 * i + 2] = z1 - z2;
238  temp[4 * i + 3] = z0 - z3;
239  }
240 
241  for (i = 0; i < 4; i++) {
242  const int offset = x_offset[i];
243  const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
244  const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
245  const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
246  const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
247 
248  output[stride * 0 + offset] = (int)((z0 + z3) * qmul + 0x80000) >> 20;
249  output[stride * 2 + offset] = (int)((z1 + z2) * qmul + 0x80000) >> 20;
250  output[stride * 8 + offset] = (int)((z1 - z2) * qmul + 0x80000) >> 20;
251  output[stride * 10 + offset] = (int)((z0 - z3) * qmul + 0x80000) >> 20;
252  }
253 }
254 #undef stride
255 
256 static void svq3_add_idct_c(uint8_t *dst, int16_t *block,
257  int stride, int qp, int dc)
258 {
259  const int qmul = svq3_dequant_coeff[qp];
260  int i;
261 
262  if (dc) {
263  dc = 13 * 13 * (dc == 1 ? 1538U* block[0]
264  : qmul * (block[0] >> 3) / 2);
265  block[0] = 0;
266  }
267 
268  for (i = 0; i < 4; i++) {
269  const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
270  const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
271  const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
272  const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
273 
274  block[0 + 4 * i] = z0 + z3;
275  block[1 + 4 * i] = z1 + z2;
276  block[2 + 4 * i] = z1 - z2;
277  block[3 + 4 * i] = z0 - z3;
278  }
279 
280  for (i = 0; i < 4; i++) {
281  const unsigned z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
282  const unsigned z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
283  const unsigned z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
284  const unsigned z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
285  const int rr = (dc + 0x80000u);
286 
287  dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((int)((z0 + z3) * qmul + rr) >> 20));
288  dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((int)((z1 + z2) * qmul + rr) >> 20));
289  dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((int)((z1 - z2) * qmul + rr) >> 20));
290  dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((int)((z0 - z3) * qmul + rr) >> 20));
291  }
292 
293  memset(block, 0, 16 * sizeof(int16_t));
294 }
295 
296 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
297  int index, const int type)
298 {
299  static const uint8_t *const scan_patterns[4] = {
301  };
302 
303  int run, level, sign, limit;
304  unsigned vlc;
305  const int intra = 3 * type >> 2;
306  const uint8_t *const scan = scan_patterns[type];
307 
308  for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
309  for (; (vlc = get_interleaved_ue_golomb(gb)) != 0; index++) {
310  if ((int32_t)vlc < 0)
311  return -1;
312 
313  sign = (vlc & 1) ? 0 : -1;
314  vlc = vlc + 1 >> 1;
315 
316  if (type == 3) {
317  if (vlc < 3) {
318  run = 0;
319  level = vlc;
320  } else if (vlc < 4) {
321  run = 1;
322  level = 1;
323  } else {
324  run = vlc & 0x3;
325  level = (vlc + 9 >> 2) - run;
326  }
327  } else {
328  if (vlc < 16U) {
329  run = svq3_dct_tables[intra][vlc].run;
330  level = svq3_dct_tables[intra][vlc].level;
331  } else if (intra) {
332  run = vlc & 0x7;
333  level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
334  } else {
335  run = vlc & 0xF;
336  level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
337  }
338  }
339 
340 
341  if ((index += run) >= limit)
342  return -1;
343 
344  block[scan[index]] = (level ^ sign) - sign;
345  }
346 
347  if (type != 2) {
348  break;
349  }
350  }
351 
352  return 0;
353 }
354 
355 static av_always_inline int
356 svq3_fetch_diagonal_mv(const SVQ3Context *s, const int16_t **C,
357  int i, int list, int part_width)
358 {
359  const int topright_ref = s->ref_cache[list][i - 8 + part_width];
360 
361  if (topright_ref != PART_NOT_AVAILABLE) {
362  *C = s->mv_cache[list][i - 8 + part_width];
363  return topright_ref;
364  } else {
365  *C = s->mv_cache[list][i - 8 - 1];
366  return s->ref_cache[list][i - 8 - 1];
367  }
368 }
369 
370 /**
371  * Get the predicted MV.
372  * @param n the block index
373  * @param part_width the width of the partition (4, 8,16) -> (1, 2, 4)
374  * @param mx the x component of the predicted motion vector
375  * @param my the y component of the predicted motion vector
376  */
377 static av_always_inline void svq3_pred_motion(const SVQ3Context *s, int n,
378  int part_width, int list,
379  int ref, int *const mx, int *const my)
380 {
381  const int index8 = scan8[n];
382  const int top_ref = s->ref_cache[list][index8 - 8];
383  const int left_ref = s->ref_cache[list][index8 - 1];
384  const int16_t *const A = s->mv_cache[list][index8 - 1];
385  const int16_t *const B = s->mv_cache[list][index8 - 8];
386  const int16_t *C;
387  int diagonal_ref, match_count;
388 
389 /* mv_cache
390  * B . . A T T T T
391  * U . . L . . , .
392  * U . . L . . . .
393  * U . . L . . , .
394  * . . . L . . . .
395  */
396 
397  diagonal_ref = svq3_fetch_diagonal_mv(s, &C, index8, list, part_width);
398  match_count = (diagonal_ref == ref) + (top_ref == ref) + (left_ref == ref);
399  if (match_count > 1) { //most common
400  *mx = mid_pred(A[0], B[0], C[0]);
401  *my = mid_pred(A[1], B[1], C[1]);
402  } else if (match_count == 1) {
403  if (left_ref == ref) {
404  *mx = A[0];
405  *my = A[1];
406  } else if (top_ref == ref) {
407  *mx = B[0];
408  *my = B[1];
409  } else {
410  *mx = C[0];
411  *my = C[1];
412  }
413  } else {
414  if (top_ref == PART_NOT_AVAILABLE &&
415  diagonal_ref == PART_NOT_AVAILABLE &&
416  left_ref != PART_NOT_AVAILABLE) {
417  *mx = A[0];
418  *my = A[1];
419  } else {
420  *mx = mid_pred(A[0], B[0], C[0]);
421  *my = mid_pred(A[1], B[1], C[1]);
422  }
423  }
424 }
425 
426 static inline void svq3_mc_dir_part(SVQ3Context *s,
427  int x, int y, int width, int height,
428  int mx, int my, int dxy,
429  int thirdpel, int dir, int avg)
430 {
431  const SVQ3Frame *pic = (dir == 0) ? s->last_pic : s->next_pic;
432  uint8_t *src, *dest;
433  int i, emu = 0;
434  int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
435  int linesize = s->cur_pic->f->linesize[0];
436  int uvlinesize = s->cur_pic->f->linesize[1];
437 
438  mx += x;
439  my += y;
440 
441  if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
442  my < 0 || my >= s->v_edge_pos - height - 1) {
443  emu = 1;
444  mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
445  my = av_clip(my, -16, s->v_edge_pos - height + 15);
446  }
447 
448  /* form component predictions */
449  dest = s->cur_pic->f->data[0] + x + y * linesize;
450  src = pic->f->data[0] + mx + my * linesize;
451 
452  if (emu) {
454  linesize, linesize,
455  width + 1, height + 1,
456  mx, my, s->h_edge_pos, s->v_edge_pos);
457  src = s->edge_emu_buffer;
458  }
459  if (thirdpel)
460  (avg ? s->tdsp.avg_tpel_pixels_tab
461  : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src, linesize,
462  width, height);
463  else
464  (avg ? s->hdsp.avg_pixels_tab
465  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, linesize,
466  height);
467 
468  if (!(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
469  mx = mx + (mx < (int) x) >> 1;
470  my = my + (my < (int) y) >> 1;
471  width = width >> 1;
472  height = height >> 1;
473  blocksize++;
474 
475  for (i = 1; i < 3; i++) {
476  dest = s->cur_pic->f->data[i] + (x >> 1) + (y >> 1) * uvlinesize;
477  src = pic->f->data[i] + mx + my * uvlinesize;
478 
479  if (emu) {
481  uvlinesize, uvlinesize,
482  width + 1, height + 1,
483  mx, my, (s->h_edge_pos >> 1),
484  s->v_edge_pos >> 1);
485  src = s->edge_emu_buffer;
486  }
487  if (thirdpel)
488  (avg ? s->tdsp.avg_tpel_pixels_tab
489  : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src,
490  uvlinesize,
491  width, height);
492  else
493  (avg ? s->hdsp.avg_pixels_tab
494  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
495  uvlinesize,
496  height);
497  }
498  }
499 }
500 
501 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
502  int dir, int avg)
503 {
504  int i, j, k, mx, my, dx, dy, x, y;
505  const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
506  const int part_height = 16 >> ((unsigned)(size + 1) / 3);
507  const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
508  const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
509  const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
510 
511  for (i = 0; i < 16; i += part_height)
512  for (j = 0; j < 16; j += part_width) {
513  const int b_xy = (4 * s->mb_x + (j >> 2)) +
514  (4 * s->mb_y + (i >> 2)) * s->b_stride;
515  int dxy;
516  x = 16 * s->mb_x + j;
517  y = 16 * s->mb_y + i;
518  k = (j >> 2 & 1) + (i >> 1 & 2) +
519  (j >> 1 & 4) + (i & 8);
520 
521  if (mode != PREDICT_MODE) {
522  svq3_pred_motion(s, k, part_width >> 2, dir, 1, &mx, &my);
523  } else {
524  mx = s->next_pic->motion_val[0][b_xy][0] * 2;
525  my = s->next_pic->motion_val[0][b_xy][1] * 2;
526 
527  if (dir == 0) {
528  mx = mx * s->frame_num_offset /
529  s->prev_frame_num_offset + 1 >> 1;
530  my = my * s->frame_num_offset /
531  s->prev_frame_num_offset + 1 >> 1;
532  } else {
533  mx = mx * (s->frame_num_offset - s->prev_frame_num_offset) /
534  s->prev_frame_num_offset + 1 >> 1;
535  my = my * (s->frame_num_offset - s->prev_frame_num_offset) /
536  s->prev_frame_num_offset + 1 >> 1;
537  }
538  }
539 
540  /* clip motion vector prediction to frame border */
541  mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
542  my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
543 
544  /* get (optional) motion vector differential */
545  if (mode == PREDICT_MODE) {
546  dx = dy = 0;
547  } else {
550 
551  if (dx != (int16_t)dx || dy != (int16_t)dy) {
552  av_log(s->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
553  return -1;
554  }
555  }
556 
557  /* compute motion vector */
558  if (mode == THIRDPEL_MODE) {
559  int fx, fy;
560  mx = (mx + 1 >> 1) + dx;
561  my = (my + 1 >> 1) + dy;
562  fx = (unsigned)(mx + 0x30000) / 3 - 0x10000;
563  fy = (unsigned)(my + 0x30000) / 3 - 0x10000;
564  dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
565 
566  svq3_mc_dir_part(s, x, y, part_width, part_height,
567  fx, fy, dxy, 1, dir, avg);
568  mx += mx;
569  my += my;
570  } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
571  mx = (unsigned)(mx + 1 + 0x30000) / 3 + dx - 0x10000;
572  my = (unsigned)(my + 1 + 0x30000) / 3 + dy - 0x10000;
573  dxy = (mx & 1) + 2 * (my & 1);
574 
575  svq3_mc_dir_part(s, x, y, part_width, part_height,
576  mx >> 1, my >> 1, dxy, 0, dir, avg);
577  mx *= 3;
578  my *= 3;
579  } else {
580  mx = (unsigned)(mx + 3 + 0x60000) / 6 + dx - 0x10000;
581  my = (unsigned)(my + 3 + 0x60000) / 6 + dy - 0x10000;
582 
583  svq3_mc_dir_part(s, x, y, part_width, part_height,
584  mx, my, 0, 0, dir, avg);
585  mx *= 6;
586  my *= 6;
587  }
588 
589  /* update mv_cache */
590  if (mode != PREDICT_MODE) {
591  int32_t mv = pack16to32(mx, my);
592 
593  if (part_height == 8 && i < 8) {
594  AV_WN32A(s->mv_cache[dir][scan8[k] + 1 * 8], mv);
595 
596  if (part_width == 8 && j < 8)
597  AV_WN32A(s->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
598  }
599  if (part_width == 8 && j < 8)
600  AV_WN32A(s->mv_cache[dir][scan8[k] + 1], mv);
601  if (part_width == 4 || part_height == 4)
602  AV_WN32A(s->mv_cache[dir][scan8[k]], mv);
603  }
604 
605  /* write back motion vectors */
606  fill_rectangle(s->cur_pic->motion_val[dir][b_xy],
607  part_width >> 2, part_height >> 2, s->b_stride,
608  pack16to32(mx, my), 4);
609  }
610 
611  return 0;
612 }
613 
615  int mb_type, const int *block_offset,
616  int linesize, uint8_t *dest_y)
617 {
618  int i;
619  if (!IS_INTRA4x4(mb_type)) {
620  for (i = 0; i < 16; i++)
621  if (s->non_zero_count_cache[scan8[i]] || s->mb[i * 16]) {
622  uint8_t *const ptr = dest_y + block_offset[i];
623  svq3_add_idct_c(ptr, s->mb + i * 16, linesize,
624  s->qscale, IS_INTRA(mb_type) ? 1 : 0);
625  }
626  }
627 }
628 
630  int mb_type,
631  const int *block_offset,
632  int linesize,
633  uint8_t *dest_y)
634 {
635  int i;
636  int qscale = s->qscale;
637 
638  if (IS_INTRA4x4(mb_type)) {
639  for (i = 0; i < 16; i++) {
640  uint8_t *const ptr = dest_y + block_offset[i];
641  const int dir = s->intra4x4_pred_mode_cache[scan8[i]];
642 
643  uint8_t *topright;
644  int nnz, tr;
645  if (dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED) {
646  const int topright_avail = (s->topright_samples_available << i) & 0x8000;
647  av_assert2(s->mb_y || linesize <= block_offset[i]);
648  if (!topright_avail) {
649  tr = ptr[3 - linesize] * 0x01010101u;
650  topright = (uint8_t *)&tr;
651  } else
652  topright = ptr + 4 - linesize;
653  } else
654  topright = NULL;
655 
656  s->hpc.pred4x4[dir](ptr, topright, linesize);
657  nnz = s->non_zero_count_cache[scan8[i]];
658  if (nnz) {
659  svq3_add_idct_c(ptr, s->mb + i * 16, linesize, qscale, 0);
660  }
661  }
662  } else {
663  s->hpc.pred16x16[s->intra16x16_pred_mode](dest_y, linesize);
664  svq3_luma_dc_dequant_idct_c(s->mb, s->mb_luma_dc[0], qscale);
665  }
666 }
667 
669 {
670  const int mb_x = s->mb_x;
671  const int mb_y = s->mb_y;
672  const int mb_xy = s->mb_xy;
673  const int mb_type = s->cur_pic->mb_type[mb_xy];
674  uint8_t *dest_y, *dest_cb, *dest_cr;
675  int linesize, uvlinesize;
676  int i, j;
677  const int *block_offset = &s->block_offset[0];
678  const int block_h = 16 >> 1;
679 
680  linesize = s->cur_pic->f->linesize[0];
681  uvlinesize = s->cur_pic->f->linesize[1];
682 
683  dest_y = s->cur_pic->f->data[0] + (mb_x + mb_y * linesize) * 16;
684  dest_cb = s->cur_pic->f->data[1] + mb_x * 8 + mb_y * uvlinesize * block_h;
685  dest_cr = s->cur_pic->f->data[2] + mb_x * 8 + mb_y * uvlinesize * block_h;
686 
687  s->vdsp.prefetch(dest_y + (s->mb_x & 3) * 4 * linesize + 64, linesize, 4);
688  s->vdsp.prefetch(dest_cb + (s->mb_x & 7) * uvlinesize + 64, dest_cr - dest_cb, 2);
689 
690  if (IS_INTRA(mb_type)) {
691  s->hpc.pred8x8[s->chroma_pred_mode](dest_cb, uvlinesize);
692  s->hpc.pred8x8[s->chroma_pred_mode](dest_cr, uvlinesize);
693 
694  hl_decode_mb_predict_luma(s, mb_type, block_offset, linesize, dest_y);
695  }
696 
697  hl_decode_mb_idct_luma(s, mb_type, block_offset, linesize, dest_y);
698 
699  if (s->cbp & 0x30) {
700  uint8_t *dest[2] = { dest_cb, dest_cr };
701  s->h264dsp.h264_chroma_dc_dequant_idct(s->mb + 16 * 16 * 1,
702  s->dequant4_coeff[4][0]);
703  s->h264dsp.h264_chroma_dc_dequant_idct(s->mb + 16 * 16 * 2,
704  s->dequant4_coeff[4][0]);
705  for (j = 1; j < 3; j++) {
706  for (i = j * 16; i < j * 16 + 4; i++)
707  if (s->non_zero_count_cache[scan8[i]] || s->mb[i * 16]) {
708  uint8_t *const ptr = dest[j - 1] + block_offset[i];
709  svq3_add_idct_c(ptr, s->mb + i * 16,
710  uvlinesize, ff_h264_chroma_qp[0][s->qscale + 12] - 12, 2);
711  }
712  }
713  }
714 }
715 
716 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
717 {
718  int i, j, k, m, dir, mode;
719  int cbp = 0;
720  uint32_t vlc;
721  int8_t *top, *left;
722  const int mb_xy = s->mb_xy;
723  const int b_xy = 4 * s->mb_x + 4 * s->mb_y * s->b_stride;
724 
725  s->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
726  s->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
727  s->topright_samples_available = 0xFFFF;
728 
729  if (mb_type == 0) { /* SKIP */
730  if (s->pict_type == AV_PICTURE_TYPE_P ||
731  s->next_pic->mb_type[mb_xy] == -1) {
732  svq3_mc_dir_part(s, 16 * s->mb_x, 16 * s->mb_y, 16, 16,
733  0, 0, 0, 0, 0, 0);
734 
735  if (s->pict_type == AV_PICTURE_TYPE_B)
736  svq3_mc_dir_part(s, 16 * s->mb_x, 16 * s->mb_y, 16, 16,
737  0, 0, 0, 0, 1, 1);
738 
739  mb_type = MB_TYPE_SKIP;
740  } else {
741  mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
742  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
743  return -1;
744  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
745  return -1;
746 
747  mb_type = MB_TYPE_16x16;
748  }
749  } else if (mb_type < 8) { /* INTER */
750  if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&s->gb_slice))
751  mode = THIRDPEL_MODE;
752  else if (s->halfpel_flag &&
753  s->thirdpel_flag == !get_bits1(&s->gb_slice))
754  mode = HALFPEL_MODE;
755  else
756  mode = FULLPEL_MODE;
757 
758  /* fill caches */
759  /* note ref_cache should contain here:
760  * ????????
761  * ???11111
762  * N??11111
763  * N??11111
764  * N??11111
765  */
766 
767  for (m = 0; m < 2; m++) {
768  if (s->mb_x > 0 && s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - 1] + 6] != -1) {
769  for (i = 0; i < 4; i++)
770  AV_COPY32(s->mv_cache[m][scan8[0] - 1 + i * 8],
771  s->cur_pic->motion_val[m][b_xy - 1 + i * s->b_stride]);
772  } else {
773  for (i = 0; i < 4; i++)
774  AV_ZERO32(s->mv_cache[m][scan8[0] - 1 + i * 8]);
775  }
776  if (s->mb_y > 0) {
777  memcpy(s->mv_cache[m][scan8[0] - 1 * 8],
778  s->cur_pic->motion_val[m][b_xy - s->b_stride],
779  4 * 2 * sizeof(int16_t));
780  memset(&s->ref_cache[m][scan8[0] - 1 * 8],
781  (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
782 
783  if (s->mb_x < s->mb_width - 1) {
784  AV_COPY32(s->mv_cache[m][scan8[0] + 4 - 1 * 8],
785  s->cur_pic->motion_val[m][b_xy - s->b_stride + 4]);
786  s->ref_cache[m][scan8[0] + 4 - 1 * 8] =
787  (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride + 1] + 6] == -1 ||
788  s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
789  } else
790  s->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
791  if (s->mb_x > 0) {
792  AV_COPY32(s->mv_cache[m][scan8[0] - 1 - 1 * 8],
793  s->cur_pic->motion_val[m][b_xy - s->b_stride - 1]);
794  s->ref_cache[m][scan8[0] - 1 - 1 * 8] =
795  (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
796  } else
797  s->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
798  } else
799  memset(&s->ref_cache[m][scan8[0] - 1 * 8 - 1],
800  PART_NOT_AVAILABLE, 8);
801 
802  if (s->pict_type != AV_PICTURE_TYPE_B)
803  break;
804  }
805 
806  /* decode motion vector(s) and form prediction(s) */
807  if (s->pict_type == AV_PICTURE_TYPE_P) {
808  if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
809  return -1;
810  } else { /* AV_PICTURE_TYPE_B */
811  if (mb_type != 2) {
812  if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
813  return -1;
814  } else {
815  for (i = 0; i < 4; i++)
816  memset(s->cur_pic->motion_val[0][b_xy + i * s->b_stride],
817  0, 4 * 2 * sizeof(int16_t));
818  }
819  if (mb_type != 1) {
820  if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
821  return -1;
822  } else {
823  for (i = 0; i < 4; i++)
824  memset(s->cur_pic->motion_val[1][b_xy + i * s->b_stride],
825  0, 4 * 2 * sizeof(int16_t));
826  }
827  }
828 
829  mb_type = MB_TYPE_16x16;
830  } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
831  int8_t *i4x4 = s->intra4x4_pred_mode + s->mb2br_xy[s->mb_xy];
832  int8_t *i4x4_cache = s->intra4x4_pred_mode_cache;
833 
834  memset(s->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
835 
836  if (mb_type == 8) {
837  if (s->mb_x > 0) {
838  for (i = 0; i < 4; i++)
839  s->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - 1] + 6 - i];
840  if (s->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
841  s->left_samples_available = 0x5F5F;
842  }
843  if (s->mb_y > 0) {
844  s->intra4x4_pred_mode_cache[4 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 0];
845  s->intra4x4_pred_mode_cache[5 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 1];
846  s->intra4x4_pred_mode_cache[6 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 2];
847  s->intra4x4_pred_mode_cache[7 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 3];
848 
849  if (s->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
850  s->top_samples_available = 0x33FF;
851  }
852 
853  /* decode prediction codes for luma blocks */
854  for (i = 0; i < 16; i += 2) {
856 
857  if (vlc >= 25U) {
859  "luma prediction:%"PRIu32"\n", vlc);
860  return -1;
861  }
862 
863  left = &s->intra4x4_pred_mode_cache[scan8[i] - 1];
864  top = &s->intra4x4_pred_mode_cache[scan8[i] - 8];
865 
866  left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
867  left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
868 
869  if (left[1] == -1 || left[2] == -1) {
870  av_log(s->avctx, AV_LOG_ERROR, "weird prediction\n");
871  return -1;
872  }
873  }
874  } else { /* mb_type == 33, DC_128_PRED block type */
875  for (i = 0; i < 4; i++)
876  memset(&s->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
877  }
878 
879  AV_COPY32(i4x4, i4x4_cache + 4 + 8 * 4);
880  i4x4[4] = i4x4_cache[7 + 8 * 3];
881  i4x4[5] = i4x4_cache[7 + 8 * 2];
882  i4x4[6] = i4x4_cache[7 + 8 * 1];
883 
884  if (mb_type == 8) {
888 
889  s->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
890  s->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
891  } else {
892  for (i = 0; i < 4; i++)
893  memset(&s->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
894 
895  s->top_samples_available = 0x33FF;
896  s->left_samples_available = 0x5F5F;
897  }
898 
899  mb_type = MB_TYPE_INTRA4x4;
900  } else { /* INTRA16x16 */
901  dir = ff_h264_i_mb_type_info[mb_type - 8].pred_mode;
902  dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
903 
905  s->left_samples_available, dir, 0)) < 0) {
906  av_log(s->avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
907  return s->intra16x16_pred_mode;
908  }
909 
910  cbp = ff_h264_i_mb_type_info[mb_type - 8].cbp;
911  mb_type = MB_TYPE_INTRA16x16;
912  }
913 
914  if (!IS_INTER(mb_type) && s->pict_type != AV_PICTURE_TYPE_I) {
915  for (i = 0; i < 4; i++)
916  memset(s->cur_pic->motion_val[0][b_xy + i * s->b_stride],
917  0, 4 * 2 * sizeof(int16_t));
918  if (s->pict_type == AV_PICTURE_TYPE_B) {
919  for (i = 0; i < 4; i++)
920  memset(s->cur_pic->motion_val[1][b_xy + i * s->b_stride],
921  0, 4 * 2 * sizeof(int16_t));
922  }
923  }
924  if (!IS_INTRA4x4(mb_type)) {
925  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy], DC_PRED, 8);
926  }
927  if (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B) {
928  memset(s->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
929  }
930 
931  if (!IS_INTRA16x16(mb_type) &&
932  (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B)) {
933  if ((vlc = get_interleaved_ue_golomb(&s->gb_slice)) >= 48U){
934  av_log(s->avctx, AV_LOG_ERROR, "cbp_vlc=%"PRIu32"\n", vlc);
935  return -1;
936  }
937 
938  cbp = IS_INTRA(mb_type) ? ff_h264_golomb_to_intra4x4_cbp[vlc]
940  }
941  if (IS_INTRA16x16(mb_type) ||
942  (s->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
944 
945  if (s->qscale > 31u) {
946  av_log(s->avctx, AV_LOG_ERROR, "qscale:%d\n", s->qscale);
947  return -1;
948  }
949  }
950  if (IS_INTRA16x16(mb_type)) {
951  AV_ZERO128(s->mb_luma_dc[0] + 0);
952  AV_ZERO128(s->mb_luma_dc[0] + 8);
953  if (svq3_decode_block(&s->gb_slice, s->mb_luma_dc[0], 0, 1)) {
955  "error while decoding intra luma dc\n");
956  return -1;
957  }
958  }
959 
960  if (cbp) {
961  const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
962  const int type = ((s->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
963 
964  for (i = 0; i < 4; i++)
965  if ((cbp & (1 << i))) {
966  for (j = 0; j < 4; j++) {
967  k = index ? (1 * (j & 1) + 2 * (i & 1) +
968  2 * (j & 2) + 4 * (i & 2))
969  : (4 * i + j);
970  s->non_zero_count_cache[scan8[k]] = 1;
971 
972  if (svq3_decode_block(&s->gb_slice, &s->mb[16 * k], index, type)) {
974  "error while decoding block\n");
975  return -1;
976  }
977  }
978  }
979 
980  if ((cbp & 0x30)) {
981  for (i = 1; i < 3; ++i)
982  if (svq3_decode_block(&s->gb_slice, &s->mb[16 * 16 * i], 0, 3)) {
984  "error while decoding chroma dc block\n");
985  return -1;
986  }
987 
988  if ((cbp & 0x20)) {
989  for (i = 1; i < 3; i++) {
990  for (j = 0; j < 4; j++) {
991  k = 16 * i + j;
992  s->non_zero_count_cache[scan8[k]] = 1;
993 
994  if (svq3_decode_block(&s->gb_slice, &s->mb[16 * k], 1, 1)) {
996  "error while decoding chroma ac block\n");
997  return -1;
998  }
999  }
1000  }
1001  }
1002  }
1003  }
1004 
1005  s->cbp = cbp;
1006  s->cur_pic->mb_type[mb_xy] = mb_type;
1007 
1008  if (IS_INTRA(mb_type))
1011 
1012  return 0;
1013 }
1014 
1016 {
1017  SVQ3Context *s = avctx->priv_data;
1018  const int mb_xy = s->mb_xy;
1019  int i, header;
1020  unsigned slice_id;
1021 
1022  header = get_bits(&s->gb, 8);
1023 
1024  if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
1025  /* TODO: what? */
1026  av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
1027  return -1;
1028  } else {
1029  int slice_bits, slice_bytes, slice_length;
1030  int length = header >> 5 & 3;
1031 
1032  slice_length = show_bits(&s->gb, 8 * length);
1033  slice_bits = slice_length * 8;
1034  slice_bytes = slice_length + length - 1;
1035 
1036  skip_bits(&s->gb, 8);
1037 
1039  if (!s->slice_buf)
1040  return AVERROR(ENOMEM);
1041 
1042  if (slice_bytes * 8LL > get_bits_left(&s->gb)) {
1043  av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
1044  return AVERROR_INVALIDDATA;
1045  }
1046  memcpy(s->slice_buf, s->gb.buffer + s->gb.index / 8, slice_bytes);
1047 
1048  if (s->watermark_key) {
1049  uint32_t header = AV_RL32(&s->slice_buf[1]);
1050  AV_WL32(&s->slice_buf[1], header ^ s->watermark_key);
1051  }
1052  init_get_bits(&s->gb_slice, s->slice_buf, slice_bits);
1053 
1054  if (length > 0) {
1055  memmove(s->slice_buf, &s->slice_buf[slice_length], length - 1);
1056  }
1057  skip_bits_long(&s->gb, slice_bytes * 8);
1058  }
1059 
1060  if ((slice_id = get_interleaved_ue_golomb(&s->gb_slice)) >= 3) {
1061  av_log(s->avctx, AV_LOG_ERROR, "illegal slice type %u \n", slice_id);
1062  return -1;
1063  }
1064 
1065  s->slice_type = ff_h264_golomb_to_pict_type[slice_id];
1066 
1067  if ((header & 0x9F) == 2) {
1068  i = (s->mb_num < 64) ? 6 : (1 + av_log2(s->mb_num - 1));
1069  get_bits(&s->gb_slice, i);
1070  } else if (get_bits1(&s->gb_slice)) {
1071  avpriv_report_missing_feature(s->avctx, "Media key encryption");
1072  return AVERROR_PATCHWELCOME;
1073  }
1074 
1075  s->slice_num = get_bits(&s->gb_slice, 8);
1076  s->qscale = get_bits(&s->gb_slice, 5);
1077  s->adaptive_quant = get_bits1(&s->gb_slice);
1078 
1079  /* unknown fields */
1080  skip_bits1(&s->gb_slice);
1081 
1082  if (s->has_watermark)
1083  skip_bits1(&s->gb_slice);
1084 
1085  skip_bits1(&s->gb_slice);
1086  skip_bits(&s->gb_slice, 2);
1087 
1088  if (skip_1stop_8data_bits(&s->gb_slice) < 0)
1089  return AVERROR_INVALIDDATA;
1090 
1091  /* reset intra predictors and invalidate motion vector references */
1092  if (s->mb_x > 0) {
1093  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - 1] + 3,
1094  -1, 4 * sizeof(int8_t));
1095  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - s->mb_x],
1096  -1, 8 * sizeof(int8_t) * s->mb_x);
1097  }
1098  if (s->mb_y > 0) {
1099  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - s->mb_stride],
1100  -1, 8 * sizeof(int8_t) * (s->mb_width - s->mb_x));
1101 
1102  if (s->mb_x > 0)
1103  s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride - 1] + 3] = -1;
1104  }
1105 
1106  return 0;
1107 }
1108 
1110 {
1111  int q, x;
1112  const int max_qp = 51;
1113 
1114  for (q = 0; q < max_qp + 1; q++) {
1115  int shift = ff_h264_quant_div6[q] + 2;
1116  int idx = ff_h264_quant_rem6[q];
1117  for (x = 0; x < 16; x++)
1118  s->dequant4_coeff[q][(x >> 2) | ((x << 2) & 0xF)] =
1119  ((uint32_t)ff_h264_dequant4_coeff_init[idx][(x & 1) + ((x >> 2) & 1)] * 16) << shift;
1120  }
1121 }
1122 
1124 {
1125  SVQ3Context *s = avctx->priv_data;
1126  int m, x, y;
1127  unsigned char *extradata;
1128  unsigned char *extradata_end;
1129  unsigned int size;
1130  int marker_found = 0;
1131  int ret;
1132 
1133  s->cur_pic = &s->frames[0];
1134  s->last_pic = &s->frames[1];
1135  s->next_pic = &s->frames[2];
1136 
1137  s->cur_pic->f = av_frame_alloc();
1138  s->last_pic->f = av_frame_alloc();
1139  s->next_pic->f = av_frame_alloc();
1140  if (!s->cur_pic->f || !s->last_pic->f || !s->next_pic->f)
1141  return AVERROR(ENOMEM);
1142 
1143  ff_h264dsp_init(&s->h264dsp, 8, 1);
1145  ff_videodsp_init(&s->vdsp, 8);
1146 
1147 
1148  avctx->bits_per_raw_sample = 8;
1149 
1150  ff_hpeldsp_init(&s->hdsp, avctx->flags);
1151  ff_tpeldsp_init(&s->tdsp);
1152 
1153  avctx->pix_fmt = AV_PIX_FMT_YUVJ420P;
1154  avctx->color_range = AVCOL_RANGE_JPEG;
1155 
1156  s->avctx = avctx;
1157  s->halfpel_flag = 1;
1158  s->thirdpel_flag = 1;
1159  s->has_watermark = 0;
1160 
1161  /* prowl for the "SEQH" marker in the extradata */
1162  extradata = (unsigned char *)avctx->extradata;
1163  extradata_end = avctx->extradata + avctx->extradata_size;
1164  if (extradata) {
1165  for (m = 0; m + 8 < avctx->extradata_size; m++) {
1166  if (!memcmp(extradata, "SEQH", 4)) {
1167  marker_found = 1;
1168  break;
1169  }
1170  extradata++;
1171  }
1172  }
1173 
1174  /* if a match was found, parse the extra data */
1175  if (marker_found) {
1176  GetBitContext gb;
1177  int frame_size_code;
1178  int unk0, unk1, unk2, unk3, unk4;
1179  int w,h;
1180 
1181  size = AV_RB32(&extradata[4]);
1182  if (size > extradata_end - extradata - 8)
1183  return AVERROR_INVALIDDATA;
1184  init_get_bits(&gb, extradata + 8, size * 8);
1185 
1186  /* 'frame size code' and optional 'width, height' */
1187  frame_size_code = get_bits(&gb, 3);
1188  switch (frame_size_code) {
1189  case 0:
1190  w = 160;
1191  h = 120;
1192  break;
1193  case 1:
1194  w = 128;
1195  h = 96;
1196  break;
1197  case 2:
1198  w = 176;
1199  h = 144;
1200  break;
1201  case 3:
1202  w = 352;
1203  h = 288;
1204  break;
1205  case 4:
1206  w = 704;
1207  h = 576;
1208  break;
1209  case 5:
1210  w = 240;
1211  h = 180;
1212  break;
1213  case 6:
1214  w = 320;
1215  h = 240;
1216  break;
1217  case 7:
1218  w = get_bits(&gb, 12);
1219  h = get_bits(&gb, 12);
1220  break;
1221  }
1222  ret = ff_set_dimensions(avctx, w, h);
1223  if (ret < 0)
1224  return ret;
1225 
1226  s->halfpel_flag = get_bits1(&gb);
1227  s->thirdpel_flag = get_bits1(&gb);
1228 
1229  /* unknown fields */
1230  unk0 = get_bits1(&gb);
1231  unk1 = get_bits1(&gb);
1232  unk2 = get_bits1(&gb);
1233  unk3 = get_bits1(&gb);
1234 
1235  s->low_delay = get_bits1(&gb);
1236 
1237  /* unknown field */
1238  unk4 = get_bits1(&gb);
1239 
1240  av_log(avctx, AV_LOG_DEBUG, "Unknown fields %d %d %d %d %d\n",
1241  unk0, unk1, unk2, unk3, unk4);
1242 
1243  if (skip_1stop_8data_bits(&gb) < 0)
1244  return AVERROR_INVALIDDATA;
1245 
1246  s->has_watermark = get_bits1(&gb);
1247  avctx->has_b_frames = !s->low_delay;
1248  if (s->has_watermark) {
1249 #if CONFIG_ZLIB
1250  unsigned watermark_width = get_interleaved_ue_golomb(&gb);
1251  unsigned watermark_height = get_interleaved_ue_golomb(&gb);
1252  int u1 = get_interleaved_ue_golomb(&gb);
1253  int u2 = get_bits(&gb, 8);
1254  int u3 = get_bits(&gb, 2);
1255  int u4 = get_interleaved_ue_golomb(&gb);
1256  unsigned long buf_len = watermark_width *
1257  watermark_height * 4;
1258  int offset = get_bits_count(&gb) + 7 >> 3;
1259  uint8_t *buf;
1260 
1261  if (watermark_height <= 0 ||
1262  (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height)
1263  return AVERROR_INVALIDDATA;
1264 
1265  buf = av_malloc(buf_len);
1266  if (!buf)
1267  return AVERROR(ENOMEM);
1268 
1269  av_log(avctx, AV_LOG_DEBUG, "watermark size: %ux%u\n",
1270  watermark_width, watermark_height);
1271  av_log(avctx, AV_LOG_DEBUG,
1272  "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
1273  u1, u2, u3, u4, offset);
1274  if (uncompress(buf, &buf_len, extradata + 8 + offset,
1275  size - offset) != Z_OK) {
1276  av_log(avctx, AV_LOG_ERROR,
1277  "could not uncompress watermark logo\n");
1278  av_free(buf);
1279  return -1;
1280  }
1282 
1283  s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1284  av_log(avctx, AV_LOG_DEBUG,
1285  "watermark key %#"PRIx32"\n", s->watermark_key);
1286  av_free(buf);
1287 #else
1288  av_log(avctx, AV_LOG_ERROR,
1289  "this svq3 file contains watermark which need zlib support compiled in\n");
1290  return AVERROR(ENOSYS);
1291 #endif
1292  }
1293  }
1294 
1295  s->mb_width = (avctx->width + 15) / 16;
1296  s->mb_height = (avctx->height + 15) / 16;
1297  s->mb_stride = s->mb_width + 1;
1298  s->mb_num = s->mb_width * s->mb_height;
1299  s->b_stride = 4 * s->mb_width;
1300  s->h_edge_pos = s->mb_width * 16;
1301  s->v_edge_pos = s->mb_height * 16;
1302 
1303  s->intra4x4_pred_mode = av_mallocz(s->mb_stride * 2 * 8);
1304  if (!s->intra4x4_pred_mode)
1305  return AVERROR(ENOMEM);
1306 
1307  s->mb2br_xy = av_mallocz(s->mb_stride * (s->mb_height + 1) *
1308  sizeof(*s->mb2br_xy));
1309  if (!s->mb2br_xy)
1310  return AVERROR(ENOMEM);
1311 
1312  for (y = 0; y < s->mb_height; y++)
1313  for (x = 0; x < s->mb_width; x++) {
1314  const int mb_xy = x + y * s->mb_stride;
1315 
1316  s->mb2br_xy[mb_xy] = 8 * (mb_xy % (2 * s->mb_stride));
1317  }
1318 
1320 
1321  return 0;
1322 }
1323 
1324 static void free_picture(AVCodecContext *avctx, SVQ3Frame *pic)
1325 {
1326  int i;
1327  for (i = 0; i < 2; i++) {
1328  av_freep(&pic->motion_val_buf[i]);
1329  }
1330  av_freep(&pic->mb_type_buf);
1331 
1332  av_frame_unref(pic->f);
1333 }
1334 
1335 static int get_buffer(AVCodecContext *avctx, SVQ3Frame *pic)
1336 {
1337  SVQ3Context *s = avctx->priv_data;
1338  const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
1339  const int b4_stride = s->mb_width * 4 + 1;
1340  const int b4_array_size = b4_stride * s->mb_height * 4;
1341  int ret;
1342 
1343  if (!pic->motion_val_buf[0]) {
1344  int i;
1345 
1346  pic->mb_type_buf = av_calloc(big_mb_num + s->mb_stride, sizeof(uint32_t));
1347  if (!pic->mb_type_buf)
1348  return AVERROR(ENOMEM);
1349  pic->mb_type = pic->mb_type_buf + 2 * s->mb_stride + 1;
1350 
1351  for (i = 0; i < 2; i++) {
1352  pic->motion_val_buf[i] = av_calloc(b4_array_size + 4, 2 * sizeof(int16_t));
1353  if (!pic->motion_val_buf[i]) {
1354  ret = AVERROR(ENOMEM);
1355  goto fail;
1356  }
1357 
1358  pic->motion_val[i] = pic->motion_val_buf[i] + 4;
1359  }
1360  }
1361 
1362  ret = ff_get_buffer(avctx, pic->f,
1363  (s->pict_type != AV_PICTURE_TYPE_B) ?
1365  if (ret < 0)
1366  goto fail;
1367 
1368  if (!s->edge_emu_buffer) {
1369  s->edge_emu_buffer = av_mallocz_array(pic->f->linesize[0], 17);
1370  if (!s->edge_emu_buffer)
1371  return AVERROR(ENOMEM);
1372  }
1373 
1374  return 0;
1375 fail:
1376  free_picture(avctx, pic);
1377  return ret;
1378 }
1379 
1380 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1381  int *got_frame, AVPacket *avpkt)
1382 {
1383  SVQ3Context *s = avctx->priv_data;
1384  int buf_size = avpkt->size;
1385  int left;
1386  uint8_t *buf;
1387  int ret, m, i;
1388 
1389  /* special case for last picture */
1390  if (buf_size == 0) {
1391  if (s->next_pic->f->data[0] && !s->low_delay && !s->last_frame_output) {
1392  ret = av_frame_ref(data, s->next_pic->f);
1393  if (ret < 0)
1394  return ret;
1395  s->last_frame_output = 1;
1396  *got_frame = 1;
1397  }
1398  return 0;
1399  }
1400 
1401  s->mb_x = s->mb_y = s->mb_xy = 0;
1402 
1403  if (s->watermark_key) {
1404  av_fast_padded_malloc(&s->buf, &s->buf_size, buf_size);
1405  if (!s->buf)
1406  return AVERROR(ENOMEM);
1407  memcpy(s->buf, avpkt->data, buf_size);
1408  buf = s->buf;
1409  } else {
1410  buf = avpkt->data;
1411  }
1412 
1413  ret = init_get_bits(&s->gb, buf, 8 * buf_size);
1414  if (ret < 0)
1415  return ret;
1416 
1417  if (svq3_decode_slice_header(avctx))
1418  return -1;
1419 
1420  s->pict_type = s->slice_type;
1421 
1422  if (s->pict_type != AV_PICTURE_TYPE_B)
1423  FFSWAP(SVQ3Frame*, s->next_pic, s->last_pic);
1424 
1425  av_frame_unref(s->cur_pic->f);
1426 
1427  /* for skipping the frame */
1428  s->cur_pic->f->pict_type = s->pict_type;
1430 
1431  ret = get_buffer(avctx, s->cur_pic);
1432  if (ret < 0)
1433  return ret;
1434 
1435  for (i = 0; i < 16; i++) {
1436  s->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * s->cur_pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
1437  s->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * s->cur_pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
1438  }
1439  for (i = 0; i < 16; i++) {
1440  s->block_offset[16 + i] =
1441  s->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * s->cur_pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
1442  s->block_offset[48 + 16 + i] =
1443  s->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * s->cur_pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
1444  }
1445 
1446  if (s->pict_type != AV_PICTURE_TYPE_I) {
1447  if (!s->last_pic->f->data[0]) {
1448  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1449  av_frame_unref(s->last_pic->f);
1450  ret = get_buffer(avctx, s->last_pic);
1451  if (ret < 0)
1452  return ret;
1453  memset(s->last_pic->f->data[0], 0, avctx->height * s->last_pic->f->linesize[0]);
1454  memset(s->last_pic->f->data[1], 0x80, (avctx->height / 2) *
1455  s->last_pic->f->linesize[1]);
1456  memset(s->last_pic->f->data[2], 0x80, (avctx->height / 2) *
1457  s->last_pic->f->linesize[2]);
1458  }
1459 
1460  if (s->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f->data[0]) {
1461  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1462  av_frame_unref(s->next_pic->f);
1463  ret = get_buffer(avctx, s->next_pic);
1464  if (ret < 0)
1465  return ret;
1466  memset(s->next_pic->f->data[0], 0, avctx->height * s->next_pic->f->linesize[0]);
1467  memset(s->next_pic->f->data[1], 0x80, (avctx->height / 2) *
1468  s->next_pic->f->linesize[1]);
1469  memset(s->next_pic->f->data[2], 0x80, (avctx->height / 2) *
1470  s->next_pic->f->linesize[2]);
1471  }
1472  }
1473 
1474  if (avctx->debug & FF_DEBUG_PICT_INFO)
1476  "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1478  s->halfpel_flag, s->thirdpel_flag,
1479  s->adaptive_quant, s->qscale, s->slice_num);
1480 
1481  if (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B ||
1483  avctx->skip_frame >= AVDISCARD_ALL)
1484  return 0;
1485 
1486  if (s->next_p_frame_damaged) {
1487  if (s->pict_type == AV_PICTURE_TYPE_B)
1488  return 0;
1489  else
1490  s->next_p_frame_damaged = 0;
1491  }
1492 
1493  if (s->pict_type == AV_PICTURE_TYPE_B) {
1495 
1496  if (s->frame_num_offset < 0)
1497  s->frame_num_offset += 256;
1498  if (s->frame_num_offset == 0 ||
1500  av_log(s->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1501  return -1;
1502  }
1503  } else {
1504  s->prev_frame_num = s->frame_num;
1505  s->frame_num = s->slice_num;
1507 
1508  if (s->prev_frame_num_offset < 0)
1509  s->prev_frame_num_offset += 256;
1510  }
1511 
1512  for (m = 0; m < 2; m++) {
1513  int i;
1514  for (i = 0; i < 4; i++) {
1515  int j;
1516  for (j = -1; j < 4; j++)
1517  s->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1518  if (i < 3)
1519  s->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1520  }
1521  }
1522 
1523  for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
1524  for (s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
1525  unsigned mb_type;
1526  s->mb_xy = s->mb_x + s->mb_y * s->mb_stride;
1527 
1528  if ((get_bits_left(&s->gb_slice)) <= 7) {
1529  if (((get_bits_count(&s->gb_slice) & 7) == 0 ||
1530  show_bits(&s->gb_slice, get_bits_left(&s->gb_slice) & 7) == 0)) {
1531 
1532  if (svq3_decode_slice_header(avctx))
1533  return -1;
1534  }
1535  if (s->slice_type != s->pict_type) {
1536  avpriv_request_sample(avctx, "non constant slice type");
1537  }
1538  /* TODO: support s->mb_skip_run */
1539  }
1540 
1541  mb_type = get_interleaved_ue_golomb(&s->gb_slice);
1542 
1543  if (s->pict_type == AV_PICTURE_TYPE_I)
1544  mb_type += 8;
1545  else if (s->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1546  mb_type += 4;
1547  if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1549  "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
1550  return -1;
1551  }
1552 
1553  if (mb_type != 0 || s->cbp)
1554  hl_decode_mb(s);
1555 
1556  if (s->pict_type != AV_PICTURE_TYPE_B && !s->low_delay)
1557  s->cur_pic->mb_type[s->mb_x + s->mb_y * s->mb_stride] =
1558  (s->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1559  }
1560 
1561  ff_draw_horiz_band(avctx, s->cur_pic->f,
1562  s->last_pic->f->data[0] ? s->last_pic->f : NULL,
1563  16 * s->mb_y, 16, PICT_FRAME, 0,
1564  s->low_delay);
1565  }
1566 
1567  left = buf_size*8 - get_bits_count(&s->gb_slice);
1568 
1569  if (s->mb_y != s->mb_height || s->mb_x != s->mb_width) {
1570  av_log(avctx, AV_LOG_INFO, "frame num %d incomplete pic x %d y %d left %d\n", avctx->frame_number, s->mb_y, s->mb_x, left);
1571  //av_hex_dump(stderr, buf+buf_size-8, 8);
1572  }
1573 
1574  if (left < 0) {
1575  av_log(avctx, AV_LOG_ERROR, "frame num %d left %d\n", avctx->frame_number, left);
1576  return -1;
1577  }
1578 
1579  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay)
1580  ret = av_frame_ref(data, s->cur_pic->f);
1581  else if (s->last_pic->f->data[0])
1582  ret = av_frame_ref(data, s->last_pic->f);
1583  if (ret < 0)
1584  return ret;
1585 
1586  /* Do not output the last pic after seeking. */
1587  if (s->last_pic->f->data[0] || s->low_delay)
1588  *got_frame = 1;
1589 
1590  if (s->pict_type != AV_PICTURE_TYPE_B) {
1591  FFSWAP(SVQ3Frame*, s->cur_pic, s->next_pic);
1592  } else {
1593  av_frame_unref(s->cur_pic->f);
1594  }
1595 
1596  return buf_size;
1597 }
1598 
1600 {
1601  SVQ3Context *s = avctx->priv_data;
1602 
1603  free_picture(avctx, s->cur_pic);
1604  free_picture(avctx, s->next_pic);
1605  free_picture(avctx, s->last_pic);
1606  av_frame_free(&s->cur_pic->f);
1607  av_frame_free(&s->next_pic->f);
1608  av_frame_free(&s->last_pic->f);
1609  av_freep(&s->slice_buf);
1612  av_freep(&s->mb2br_xy);
1613 
1614 
1615  av_freep(&s->buf);
1616  s->buf_size = 0;
1617 
1618  return 0;
1619 }
1620 
1622  .name = "svq3",
1623  .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1624  .type = AVMEDIA_TYPE_VIDEO,
1625  .id = AV_CODEC_ID_SVQ3,
1626  .priv_data_size = sizeof(SVQ3Context),
1628  .close = svq3_decode_end,
1630  .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND |
1633  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,
1634  AV_PIX_FMT_NONE},
1635  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
1636 };
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
uint8_t pred_mode
Definition: h264data.h:35
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
#define NULL
Definition: coverity.c:32
discard all frames except keyframes
Definition: avcodec.h:235
void(* h264_chroma_dc_dequant_idct)(int16_t *block, int qmul)
Definition: h264dsp.h:104
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
int cbp
Definition: svq3.c:109
static int shift(int a, int b)
Definition: sonic.c:82
This structure describes decoded (raw) audio or video data.
Definition: frame.h:314
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
HpelDSPContext hdsp
Definition: svq3.c:85
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:68
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
static int svq3_decode_block(GetBitContext *gb, int16_t *block, int index, const int type)
Definition: svq3.c:296
else temp
Definition: vf_mcdeint.c:256
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:106
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:291
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
int prev_frame_num
Definition: svq3.c:113
static av_always_inline void svq3_pred_motion(const SVQ3Context *s, int n, int part_width, int list, int ref, int *const mx, int *const my)
Get the predicted MV.
Definition: svq3.c:377
#define avpriv_request_sample(...)
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1166
int size
Definition: packet.h:364
int mb_xy
Definition: svq3.c:120
const uint8_t * buffer
Definition: get_bits.h:62
#define av_bswap16
Definition: bswap.h:31
int av_log2(unsigned v)
Definition: intmath.c:26
uint8_t * slice_buf
Definition: svq3.c:94
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:741
GLint GLenum type
Definition: opengl_enc.c:104
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:411
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:72
int v_edge_pos
Definition: svq3.c:105
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
const uint8_t ff_h264_quant_rem6[QP_MAX_NUM+1]
Definition: h264data.c:174
discard all
Definition: avcodec.h:236
void(* prefetch)(uint8_t *buf, ptrdiff_t stride, int h)
Prefetch memory into cache (if supported by hardware).
Definition: videodsp.h:76
uint8_t run
Definition: svq3.c:205
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1742
#define FULLPEL_MODE
Definition: svq3.c:149
void ff_draw_horiz_band(AVCodecContext *avctx, AVFrame *cur, AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
Definition: mpegutils.c:51
AVCodec.
Definition: codec.h:190
#define MB_TYPE_INTRA4x4
Definition: mpegutils.h:51
if it could not because there are no more frames
#define AV_WN32A(p, v)
Definition: intreadwrite.h:538
#define AV_COPY32(d, s)
Definition: intreadwrite.h:601
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
int16_t mb[16 *48 *2]
Definition: svq3.c:141
Macro definitions for various function/variable attributes.
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:245
static int svq3_mc_dir(SVQ3Context *s, int size, int mode, int dir, int avg)
Definition: svq3.c:501
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1997
SVQ3Frame frames[3]
Definition: svq3.c:146
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:75
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:63
int has_watermark
Definition: svq3.c:98
int thirdpel_flag
Definition: svq3.c:97
#define MB_TYPE_INTRA16x16
Definition: mpegutils.h:52
int mb_num
Definition: svq3.c:122
const uint8_t ff_h264_dequant4_coeff_init[6][3]
Definition: h264data.c:152
static const uint8_t luma_dc_zigzag_scan[16]
Definition: svq3.c:170
The exact code depends on how similar the blocks are and how related they are to the block
uint8_t
static av_always_inline void hl_decode_mb_idct_luma(SVQ3Context *s, int mb_type, const int *block_offset, int linesize, uint8_t *dest_y)
Definition: svq3.c:614
#define av_cold
Definition: attributes.h:88
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
#define mb
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
#define DC_PRED8x8
Definition: h264pred.h:68
int block_offset[2 *(16 *3)]
Definition: svq3.c:145
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1619
uint32_t * mb_type_buf
Definition: svq3.c:77
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
static const struct @135 svq3_dct_tables[2][16]
static av_always_inline int svq3_fetch_diagonal_mv(const SVQ3Context *s, const int16_t **C, int i, int list, int part_width)
Definition: svq3.c:356
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:632
#define MB_TYPE_16x16
Definition: mpegutils.h:54
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:264
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:91
const uint8_t ff_h264_chroma_dc_scan[4]
Definition: h264data.c:54
Context for storing H.264 prediction functions.
Definition: h264pred.h:92
void(* pred8x8[4+3+4])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:97
Public header for CRC hash function implementation.
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
Definition: mem.h:117
#define height
#define av_clip
Definition: common.h:122
uint8_t * data
Definition: packet.h:363
thirdpel DSP context
Definition: tpeldsp.h:42
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
const IMbInfo ff_h264_i_mb_type_info[26]
Definition: h264data.c:66
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:88
const uint8_t ff_h264_golomb_to_inter_cbp[48]
Definition: h264data.c:48
int ff_h264_check_intra4x4_pred_mode(int8_t *pred_mode_cache, void *logctx, int top_samples_available, int left_samples_available)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264_parse.c:131
thirdpel DSP functions
ptrdiff_t size
Definition: opengl_enc.c:100
static const uint8_t header[24]
Definition: sdr2.c:67
enum AVPictureType slice_type
Definition: svq3.c:116
void(* pred4x4[9+3+3])(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.h:93
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:308
#define A(x)
Definition: vp56_arith.h:28
#define av_log(a,...)
int prev_frame_num_offset
Definition: svq3.c:112
int low_delay
Definition: svq3.c:117
static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
Definition: svq3.c:716
#define U(x)
Definition: vp56_arith.h:37
#define src
Definition: vp8dsp.c:255
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:821
#define HALFPEL_MODE
Definition: svq3.c:150
AVCodecContext * avctx
Definition: svq3.c:81
int8_t * intra4x4_pred_mode
Definition: svq3.c:131
uint8_t * edge_emu_buffer
Definition: svq3.c:137
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
#define B
Definition: huffyuvdsp.h:32
av_cold void ff_tpeldsp_init(TpelDSPContext *c)
Definition: tpeldsp.c:312
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:115
const uint8_t ff_zigzag_scan[16+1]
Definition: mathtables.c:109
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:611
uint32_t * mb_type
Definition: svq3.c:77
int frame_num
Definition: svq3.c:110
int mb_x
Definition: svq3.c:119
static int get_interleaved_se_golomb(GetBitContext *gb)
Definition: golomb.h:303
GLsizei GLsizei * length
Definition: opengl_enc.c:114
unsigned int left_samples_available
Definition: svq3.c:135
const char * name
Name of the codec implementation.
Definition: codec.h:197
#define IS_SKIP(a)
Definition: mpegutils.h:81
int chroma_pred_mode
Definition: svq3.c:127
const uint8_t ff_h264_golomb_to_pict_type[5]
Definition: h264data.c:37
#define PREDICT_MODE
Definition: svq3.c:152
#define fail()
Definition: checkasm.h:133
unsigned int topright_samples_available
Definition: svq3.c:134
Sorenson Vector Quantizer #1 (SVQ1) video codec.
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:502
Definition: svq3.c:71
useful rectangle filling function
tpel_mc_func avg_tpel_pixels_tab[11]
Definition: tpeldsp.h:54
Half-pel DSP context.
Definition: hpeldsp.h:45
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: codec.h:44
SVQ3Frame * cur_pic
Definition: svq3.c:89
Context for storing H.264 DSP functions.
Definition: h264dsp.h:42
uint32_t dequant4_coeff[QP_MAX_NUM+1][16]
Definition: svq3.c:144
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:397
int16_t(*[2] motion_val)[2]
Definition: svq3.c:75
#define FFMIN(a, b)
Definition: common.h:105
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
#define width
int width
picture width / height.
Definition: avcodec.h:704
uint8_t w
Definition: llviddspenc.c:39
int32_t
GetBitContext gb_slice
Definition: svq3.c:93
uint32_t av_crc(const AVCRC *ctx, uint32_t crc, const uint8_t *buffer, size_t length)
Calculate the CRC of a block.
Definition: crc.c:392
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:446
static av_cold int svq3_decode_init(AVCodecContext *avctx)
Definition: svq3.c:1123
#define s(width, name)
Definition: cbs_vp9.c:257
tpel_mc_func put_tpel_pixels_tab[11]
Thirdpel motion compensation with rounding (a + b + 1) >> 1.
Definition: tpeldsp.h:53
H.264 / AVC / MPEG-4 part10 codec.
int b_stride
Definition: svq3.c:123
H264PredContext hpc
Definition: svq3.c:84
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:828
int last_frame_output
Definition: svq3.c:106
int next_p_frame_damaged
Definition: svq3.c:103
Full range content.
Definition: pixfmt.h:586
#define IS_INTRA16x16(a)
Definition: mpegutils.h:76
if(ret)
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
static const int8_t mv[256][2]
Definition: 4xm.c:78
H264DSPContext h264dsp
Definition: svq3.c:83
Half-pel DSP functions.
AVCodec ff_svq3_decoder
Definition: svq3.c:1621
GetBitContext gb
Definition: svq3.c:92
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
Libavcodec external API header.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:345
int debug
debug
Definition: avcodec.h:1618
int intra16x16_pred_mode
Definition: svq3.c:128
main external API structure.
Definition: avcodec.h:531
const uint8_t ff_h264_chroma_qp[7][QP_MAX_NUM+1]
Definition: h264data.c:203
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff)*mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
#define QP_MAX_NUM
Definition: h264.h:27
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1884
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:206
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
static const uint8_t scan8[16 *3+3]
Definition: h264dec.h:651
int extradata_size
Definition: avcodec.h:633
void(* pred16x16[4+3+2])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:98
int qscale
Definition: svq3.c:108
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
const uint8_t ff_h264_golomb_to_intra4x4_cbp[48]
Definition: h264data.c:42
static void skip_bits1(GetBitContext *s)
Definition: get_bits.h:538
int mb_height
Definition: svq3.c:121
enum AVPictureType pict_type
Definition: svq3.c:115
const uint8_t ff_h264_quant_div6[QP_MAX_NUM+1]
Definition: h264data.c:182
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
int index
Definition: gxfenc.c:89
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
Definition: h264dec.h:667
static void svq3_mc_dir_part(SVQ3Context *s, int x, int y, int width, int height, int mx, int my, int dxy, int thirdpel, int dir, int avg)
Definition: svq3.c:426
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
static void init_dequant4_coeff_table(SVQ3Context *s)
Definition: svq3.c:1109
static av_cold int svq3_decode_end(AVCodecContext *avctx)
Definition: svq3.c:1599
#define mid_pred
Definition: mathops.h:97
int8_t ref_cache[2][5 *8]
Definition: svq3.c:140
static const uint8_t svq3_pred_0[25][2]
Definition: svq3.c:177
int mb_y
Definition: svq3.c:119
AVPictureType
Definition: avutil.h:272
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
#define IS_INTER(a)
Definition: mpegutils.h:79
int16_t(*[2] motion_val_buf)[2]
Definition: svq3.c:74
int slice_num
Definition: svq3.c:107
AVFrame * f
Definition: svq3.c:72
#define MB_TYPE_SKIP
Definition: mpegutils.h:62
uint8_t * buf
Definition: svq3.c:100
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:303
SVQ3Frame * last_pic
Definition: svq3.c:91
VideoDSPContext vdsp
Definition: svq3.c:87
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
static void free_picture(AVCodecContext *avctx, SVQ3Frame *pic)
Definition: svq3.c:1324
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
uint32_t * mb2br_xy
Definition: svq3.c:125
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:328
uint8_t level
Definition: svq3.c:206
Definition: vp9.h:48
#define AV_ZERO128(d)
Definition: intreadwrite.h:637
const AVCRC * av_crc_get_table(AVCRCId crc_id)
Get an initialized standard CRC table.
Definition: crc.c:374
#define avg(a, b, c, d)
discard all non reference
Definition: avcodec.h:232
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
int
static av_always_inline void hl_decode_mb_predict_luma(SVQ3Context *s, int mb_type, const int *block_offset, int linesize, uint8_t *dest_y)
Definition: svq3.c:629
uint8_t non_zero_count_cache[15 *8]
Definition: svq3.c:143
uint8_t cbp
Definition: h264data.h:36
common internal api header.
static int get_buffer(AVCodecContext *avctx, SVQ3Frame *pic)
Definition: svq3.c:1335
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
int mb_stride
Definition: svq3.c:122
int ff_h264_check_intra_pred_mode(void *logctx, int top_samples_available, int left_samples_available, int mode, int is_chroma)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264_parse.c:179
int16_t mb_luma_dc[3][16 *2]
Definition: svq3.c:142
int h_edge_pos
Definition: svq3.c:104
Bi-dir predicted.
Definition: avutil.h:276
static void svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
Definition: svq3.c:221
#define stride
int frame_num_offset
Definition: svq3.c:111
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:215
#define IS_INTRA(x, y)
static const uint32_t svq3_dequant_coeff[32]
Definition: svq3.c:214
void * priv_data
Definition: avcodec.h:558
#define THIRDPEL_MODE
Definition: svq3.c:151
#define PICT_FRAME
Definition: mpegutils.h:39
unsigned int top_samples_available
Definition: svq3.c:133
#define IS_INTRA4x4(a)
Definition: mpegutils.h:75
#define av_free(p)
static void hl_decode_mb(SVQ3Context *s)
Definition: svq3.c:668
#define av_clip_uint8
Definition: common.h:128
static int svq3_decode_slice_header(AVCodecContext *avctx)
Definition: svq3.c:1015
#define PART_NOT_AVAILABLE
Definition: h264dec.h:398
int slice_size
Definition: svq3.c:95
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:392
#define AV_ZERO32(d)
Definition: intreadwrite.h:629
TpelDSPContext tdsp
Definition: svq3.c:86
static const uint8_t svq3_scan[16]
Definition: svq3.c:163
int8_t intra4x4_pred_mode_cache[5 *8]
Definition: svq3.c:130
int mb_width
Definition: svq3.c:121
av_cold void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
Definition: h264dsp.c:67
static const int8_t svq3_pred_1[6][6][5]
Definition: svq3.c:189
static void svq3_add_idct_c(uint8_t *dst, int16_t *block, int stride, int qp, int dc)
Definition: svq3.c:256
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:1222
#define av_freep(p)
uint32_t watermark_key
Definition: svq3.c:99
static int skip_1stop_8data_bits(GetBitContext *gb)
Definition: get_bits.h:854
#define av_always_inline
Definition: attributes.h:45
SVQ3Frame * next_pic
Definition: svq3.c:90
#define FFSWAP(type, a, b)
Definition: common.h:108
static unsigned get_interleaved_ue_golomb(GetBitContext *gb)
Definition: golomb.h:145
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int buf_size
Definition: svq3.c:101
exp golomb vlc stuff
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:91
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
This structure stores compressed data.
Definition: packet.h:340
static int svq3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: svq3.c:1380
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:514
mode
Use these values in ebur128_init (or&#39;ed).
Definition: ebur128.h:83
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
int i
Definition: input.c:407
Predicted.
Definition: avutil.h:275
int halfpel_flag
Definition: svq3.c:96
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
int adaptive_quant
Definition: svq3.c:102
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190
int16_t mv_cache[2][5 *8][2]
Definition: svq3.c:139