FFmpeg
svq3.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 The FFmpeg Project
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /*
22  * How to use this decoder:
23  * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24  * have stsd atoms to describe media trak properties. A stsd atom for a
25  * video trak contains 1 or more ImageDescription atoms. These atoms begin
26  * with the 4-byte length of the atom followed by the codec fourcc. Some
27  * decoders need information in this atom to operate correctly. Such
28  * is the case with SVQ3. In order to get the best use out of this decoder,
29  * the calling app must make the SVQ3 ImageDescription atom available
30  * via the AVCodecContext's extradata[_size] field:
31  *
32  * AVCodecContext.extradata = pointer to ImageDescription, first characters
33  * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34  * AVCodecContext.extradata_size = size of ImageDescription atom memory
35  * buffer (which will be the same as the ImageDescription atom size field
36  * from the QT file, minus 4 bytes since the length is missing)
37  *
38  * You will know you have these parameters passed correctly when the decoder
39  * correctly decodes this file:
40  * http://samples.mplayerhq.hu/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
41  */
42 
43 #include <inttypes.h>
44 
45 #include "libavutil/attributes.h"
46 #include "libavutil/crc.h"
47 
48 #include "internal.h"
49 #include "avcodec.h"
50 #include "mpegutils.h"
51 #include "h264dec.h"
52 #include "h264data.h"
53 #include "golomb.h"
54 #include "hpeldsp.h"
55 #include "mathops.h"
56 #include "rectangle.h"
57 #include "tpeldsp.h"
58 
59 #if CONFIG_ZLIB
60 #include <zlib.h>
61 #endif
62 
63 #include "svq1.h"
64 
65 /**
66  * @file
67  * svq3 decoder.
68  */
69 
70 typedef struct SVQ3Frame {
72 
73  int16_t (*motion_val_buf[2])[2];
74  int16_t (*motion_val[2])[2];
75 
76  uint32_t *mb_type_buf, *mb_type;
77 } SVQ3Frame;
78 
79 typedef struct SVQ3Context {
81 
87 
98  uint32_t watermark_key;
100  int buf_size;
107  int qscale;
108  int cbp;
113 
114  enum AVPictureType pict_type;
115  enum AVPictureType slice_type;
117 
118  int mb_x, mb_y;
119  int mb_xy;
120  int mb_width, mb_height;
121  int mb_stride, mb_num;
122  int b_stride;
123 
124  uint32_t *mb2br_xy;
125 
128 
129  int8_t intra4x4_pred_mode_cache[5 * 8];
130  int8_t (*intra4x4_pred_mode);
131 
132  unsigned int top_samples_available;
135 
137 
138  DECLARE_ALIGNED(16, int16_t, mv_cache)[2][5 * 8][2];
139  DECLARE_ALIGNED(8, int8_t, ref_cache)[2][5 * 8];
140  DECLARE_ALIGNED(16, int16_t, mb)[16 * 48 * 2];
141  DECLARE_ALIGNED(16, int16_t, mb_luma_dc)[3][16 * 2];
142  DECLARE_ALIGNED(8, uint8_t, non_zero_count_cache)[15 * 8];
143  uint32_t dequant4_coeff[QP_MAX_NUM + 1][16];
144  int block_offset[2 * (16 * 3)];
146 } SVQ3Context;
147 
148 #define FULLPEL_MODE 1
149 #define HALFPEL_MODE 2
150 #define THIRDPEL_MODE 3
151 #define PREDICT_MODE 4
152 
153 /* dual scan (from some older H.264 draft)
154  * o-->o-->o o
155  * | /|
156  * o o o / o
157  * | / | |/ |
158  * o o o o
159  * /
160  * o-->o-->o-->o
161  */
162 static const uint8_t svq3_scan[16] = {
163  0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
164  2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
165  0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
166  0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
167 };
168 
169 static const uint8_t luma_dc_zigzag_scan[16] = {
170  0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
171  3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
172  1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
173  3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
174 };
175 
176 static const uint8_t svq3_pred_0[25][2] = {
177  { 0, 0 },
178  { 1, 0 }, { 0, 1 },
179  { 0, 2 }, { 1, 1 }, { 2, 0 },
180  { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
181  { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
182  { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
183  { 2, 4 }, { 3, 3 }, { 4, 2 },
184  { 4, 3 }, { 3, 4 },
185  { 4, 4 }
186 };
187 
188 static const int8_t svq3_pred_1[6][6][5] = {
189  { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
190  { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
191  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
192  { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
193  { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
194  { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
195  { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
196  { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
197  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
198  { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
199  { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
200  { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
201 };
202 
203 static const struct {
206 } svq3_dct_tables[2][16] = {
207  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
208  { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
209  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
210  { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
211 };
212 
213 static const uint32_t svq3_dequant_coeff[32] = {
214  3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
215  9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
216  24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
217  61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
218 };
219 
220 static void svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
221 {
222  const unsigned qmul = svq3_dequant_coeff[qp];
223 #define stride 16
224  int i;
225  int temp[16];
226  static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
227 
228  for (i = 0; i < 4; i++) {
229  const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
230  const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
231  const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
232  const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
233 
234  temp[4 * i + 0] = z0 + z3;
235  temp[4 * i + 1] = z1 + z2;
236  temp[4 * i + 2] = z1 - z2;
237  temp[4 * i + 3] = z0 - z3;
238  }
239 
240  for (i = 0; i < 4; i++) {
241  const int offset = x_offset[i];
242  const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
243  const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
244  const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
245  const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
246 
247  output[stride * 0 + offset] = (int)((z0 + z3) * qmul + 0x80000) >> 20;
248  output[stride * 2 + offset] = (int)((z1 + z2) * qmul + 0x80000) >> 20;
249  output[stride * 8 + offset] = (int)((z1 - z2) * qmul + 0x80000) >> 20;
250  output[stride * 10 + offset] = (int)((z0 - z3) * qmul + 0x80000) >> 20;
251  }
252 }
253 #undef stride
254 
255 static void svq3_add_idct_c(uint8_t *dst, int16_t *block,
256  int stride, int qp, int dc)
257 {
258  const int qmul = svq3_dequant_coeff[qp];
259  int i;
260 
261  if (dc) {
262  dc = 13 * 13 * (dc == 1 ? 1538U* block[0]
263  : qmul * (block[0] >> 3) / 2);
264  block[0] = 0;
265  }
266 
267  for (i = 0; i < 4; i++) {
268  const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
269  const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
270  const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
271  const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
272 
273  block[0 + 4 * i] = z0 + z3;
274  block[1 + 4 * i] = z1 + z2;
275  block[2 + 4 * i] = z1 - z2;
276  block[3 + 4 * i] = z0 - z3;
277  }
278 
279  for (i = 0; i < 4; i++) {
280  const unsigned z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
281  const unsigned z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
282  const unsigned z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
283  const unsigned z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
284  const int rr = (dc + 0x80000u);
285 
286  dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((int)((z0 + z3) * qmul + rr) >> 20));
287  dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((int)((z1 + z2) * qmul + rr) >> 20));
288  dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((int)((z1 - z2) * qmul + rr) >> 20));
289  dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((int)((z0 - z3) * qmul + rr) >> 20));
290  }
291 
292  memset(block, 0, 16 * sizeof(int16_t));
293 }
294 
295 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
296  int index, const int type)
297 {
298  static const uint8_t *const scan_patterns[4] = {
300  };
301 
302  int run, level, sign, limit;
303  unsigned vlc;
304  const int intra = 3 * type >> 2;
305  const uint8_t *const scan = scan_patterns[type];
306 
307  for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
308  for (; (vlc = get_interleaved_ue_golomb(gb)) != 0; index++) {
309  if ((int32_t)vlc < 0)
310  return -1;
311 
312  sign = (vlc & 1) ? 0 : -1;
313  vlc = vlc + 1 >> 1;
314 
315  if (type == 3) {
316  if (vlc < 3) {
317  run = 0;
318  level = vlc;
319  } else if (vlc < 4) {
320  run = 1;
321  level = 1;
322  } else {
323  run = vlc & 0x3;
324  level = (vlc + 9 >> 2) - run;
325  }
326  } else {
327  if (vlc < 16U) {
328  run = svq3_dct_tables[intra][vlc].run;
329  level = svq3_dct_tables[intra][vlc].level;
330  } else if (intra) {
331  run = vlc & 0x7;
332  level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
333  } else {
334  run = vlc & 0xF;
335  level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
336  }
337  }
338 
339 
340  if ((index += run) >= limit)
341  return -1;
342 
343  block[scan[index]] = (level ^ sign) - sign;
344  }
345 
346  if (type != 2) {
347  break;
348  }
349  }
350 
351  return 0;
352 }
353 
354 static av_always_inline int
355 svq3_fetch_diagonal_mv(const SVQ3Context *s, const int16_t **C,
356  int i, int list, int part_width)
357 {
358  const int topright_ref = s->ref_cache[list][i - 8 + part_width];
359 
360  if (topright_ref != PART_NOT_AVAILABLE) {
361  *C = s->mv_cache[list][i - 8 + part_width];
362  return topright_ref;
363  } else {
364  *C = s->mv_cache[list][i - 8 - 1];
365  return s->ref_cache[list][i - 8 - 1];
366  }
367 }
368 
369 /**
370  * Get the predicted MV.
371  * @param n the block index
372  * @param part_width the width of the partition (4, 8,16) -> (1, 2, 4)
373  * @param mx the x component of the predicted motion vector
374  * @param my the y component of the predicted motion vector
375  */
376 static av_always_inline void svq3_pred_motion(const SVQ3Context *s, int n,
377  int part_width, int list,
378  int ref, int *const mx, int *const my)
379 {
380  const int index8 = scan8[n];
381  const int top_ref = s->ref_cache[list][index8 - 8];
382  const int left_ref = s->ref_cache[list][index8 - 1];
383  const int16_t *const A = s->mv_cache[list][index8 - 1];
384  const int16_t *const B = s->mv_cache[list][index8 - 8];
385  const int16_t *C;
386  int diagonal_ref, match_count;
387 
388 /* mv_cache
389  * B . . A T T T T
390  * U . . L . . , .
391  * U . . L . . . .
392  * U . . L . . , .
393  * . . . L . . . .
394  */
395 
396  diagonal_ref = svq3_fetch_diagonal_mv(s, &C, index8, list, part_width);
397  match_count = (diagonal_ref == ref) + (top_ref == ref) + (left_ref == ref);
398  if (match_count > 1) { //most common
399  *mx = mid_pred(A[0], B[0], C[0]);
400  *my = mid_pred(A[1], B[1], C[1]);
401  } else if (match_count == 1) {
402  if (left_ref == ref) {
403  *mx = A[0];
404  *my = A[1];
405  } else if (top_ref == ref) {
406  *mx = B[0];
407  *my = B[1];
408  } else {
409  *mx = C[0];
410  *my = C[1];
411  }
412  } else {
413  if (top_ref == PART_NOT_AVAILABLE &&
414  diagonal_ref == PART_NOT_AVAILABLE &&
415  left_ref != PART_NOT_AVAILABLE) {
416  *mx = A[0];
417  *my = A[1];
418  } else {
419  *mx = mid_pred(A[0], B[0], C[0]);
420  *my = mid_pred(A[1], B[1], C[1]);
421  }
422  }
423 }
424 
425 static inline void svq3_mc_dir_part(SVQ3Context *s,
426  int x, int y, int width, int height,
427  int mx, int my, int dxy,
428  int thirdpel, int dir, int avg)
429 {
430  const SVQ3Frame *pic = (dir == 0) ? s->last_pic : s->next_pic;
431  uint8_t *src, *dest;
432  int i, emu = 0;
433  int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
434  int linesize = s->cur_pic->f->linesize[0];
435  int uvlinesize = s->cur_pic->f->linesize[1];
436 
437  mx += x;
438  my += y;
439 
440  if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
441  my < 0 || my >= s->v_edge_pos - height - 1) {
442  emu = 1;
443  mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
444  my = av_clip(my, -16, s->v_edge_pos - height + 15);
445  }
446 
447  /* form component predictions */
448  dest = s->cur_pic->f->data[0] + x + y * linesize;
449  src = pic->f->data[0] + mx + my * linesize;
450 
451  if (emu) {
453  linesize, linesize,
454  width + 1, height + 1,
455  mx, my, s->h_edge_pos, s->v_edge_pos);
456  src = s->edge_emu_buffer;
457  }
458  if (thirdpel)
459  (avg ? s->tdsp.avg_tpel_pixels_tab
460  : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src, linesize,
461  width, height);
462  else
463  (avg ? s->hdsp.avg_pixels_tab
464  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, linesize,
465  height);
466 
467  if (!(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
468  mx = mx + (mx < (int) x) >> 1;
469  my = my + (my < (int) y) >> 1;
470  width = width >> 1;
471  height = height >> 1;
472  blocksize++;
473 
474  for (i = 1; i < 3; i++) {
475  dest = s->cur_pic->f->data[i] + (x >> 1) + (y >> 1) * uvlinesize;
476  src = pic->f->data[i] + mx + my * uvlinesize;
477 
478  if (emu) {
480  uvlinesize, uvlinesize,
481  width + 1, height + 1,
482  mx, my, (s->h_edge_pos >> 1),
483  s->v_edge_pos >> 1);
484  src = s->edge_emu_buffer;
485  }
486  if (thirdpel)
487  (avg ? s->tdsp.avg_tpel_pixels_tab
488  : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src,
489  uvlinesize,
490  width, height);
491  else
492  (avg ? s->hdsp.avg_pixels_tab
493  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
494  uvlinesize,
495  height);
496  }
497  }
498 }
499 
500 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
501  int dir, int avg)
502 {
503  int i, j, k, mx, my, dx, dy, x, y;
504  const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
505  const int part_height = 16 >> ((unsigned)(size + 1) / 3);
506  const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
507  const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
508  const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
509 
510  for (i = 0; i < 16; i += part_height)
511  for (j = 0; j < 16; j += part_width) {
512  const int b_xy = (4 * s->mb_x + (j >> 2)) +
513  (4 * s->mb_y + (i >> 2)) * s->b_stride;
514  int dxy;
515  x = 16 * s->mb_x + j;
516  y = 16 * s->mb_y + i;
517  k = (j >> 2 & 1) + (i >> 1 & 2) +
518  (j >> 1 & 4) + (i & 8);
519 
520  if (mode != PREDICT_MODE) {
521  svq3_pred_motion(s, k, part_width >> 2, dir, 1, &mx, &my);
522  } else {
523  mx = s->next_pic->motion_val[0][b_xy][0] * 2;
524  my = s->next_pic->motion_val[0][b_xy][1] * 2;
525 
526  if (dir == 0) {
527  mx = mx * s->frame_num_offset /
528  s->prev_frame_num_offset + 1 >> 1;
529  my = my * s->frame_num_offset /
530  s->prev_frame_num_offset + 1 >> 1;
531  } else {
532  mx = mx * (s->frame_num_offset - s->prev_frame_num_offset) /
533  s->prev_frame_num_offset + 1 >> 1;
534  my = my * (s->frame_num_offset - s->prev_frame_num_offset) /
535  s->prev_frame_num_offset + 1 >> 1;
536  }
537  }
538 
539  /* clip motion vector prediction to frame border */
540  mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
541  my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
542 
543  /* get (optional) motion vector differential */
544  if (mode == PREDICT_MODE) {
545  dx = dy = 0;
546  } else {
549 
550  if (dx != (int16_t)dx || dy != (int16_t)dy) {
551  av_log(s->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
552  return -1;
553  }
554  }
555 
556  /* compute motion vector */
557  if (mode == THIRDPEL_MODE) {
558  int fx, fy;
559  mx = (mx + 1 >> 1) + dx;
560  my = (my + 1 >> 1) + dy;
561  fx = (unsigned)(mx + 0x30000) / 3 - 0x10000;
562  fy = (unsigned)(my + 0x30000) / 3 - 0x10000;
563  dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
564 
565  svq3_mc_dir_part(s, x, y, part_width, part_height,
566  fx, fy, dxy, 1, dir, avg);
567  mx += mx;
568  my += my;
569  } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
570  mx = (unsigned)(mx + 1 + 0x30000) / 3 + dx - 0x10000;
571  my = (unsigned)(my + 1 + 0x30000) / 3 + dy - 0x10000;
572  dxy = (mx & 1) + 2 * (my & 1);
573 
574  svq3_mc_dir_part(s, x, y, part_width, part_height,
575  mx >> 1, my >> 1, dxy, 0, dir, avg);
576  mx *= 3;
577  my *= 3;
578  } else {
579  mx = (unsigned)(mx + 3 + 0x60000) / 6 + dx - 0x10000;
580  my = (unsigned)(my + 3 + 0x60000) / 6 + dy - 0x10000;
581 
582  svq3_mc_dir_part(s, x, y, part_width, part_height,
583  mx, my, 0, 0, dir, avg);
584  mx *= 6;
585  my *= 6;
586  }
587 
588  /* update mv_cache */
589  if (mode != PREDICT_MODE) {
590  int32_t mv = pack16to32(mx, my);
591 
592  if (part_height == 8 && i < 8) {
593  AV_WN32A(s->mv_cache[dir][scan8[k] + 1 * 8], mv);
594 
595  if (part_width == 8 && j < 8)
596  AV_WN32A(s->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
597  }
598  if (part_width == 8 && j < 8)
599  AV_WN32A(s->mv_cache[dir][scan8[k] + 1], mv);
600  if (part_width == 4 || part_height == 4)
601  AV_WN32A(s->mv_cache[dir][scan8[k]], mv);
602  }
603 
604  /* write back motion vectors */
605  fill_rectangle(s->cur_pic->motion_val[dir][b_xy],
606  part_width >> 2, part_height >> 2, s->b_stride,
607  pack16to32(mx, my), 4);
608  }
609 
610  return 0;
611 }
612 
614  int mb_type, const int *block_offset,
615  int linesize, uint8_t *dest_y)
616 {
617  int i;
618  if (!IS_INTRA4x4(mb_type)) {
619  for (i = 0; i < 16; i++)
620  if (s->non_zero_count_cache[scan8[i]] || s->mb[i * 16]) {
621  uint8_t *const ptr = dest_y + block_offset[i];
622  svq3_add_idct_c(ptr, s->mb + i * 16, linesize,
623  s->qscale, IS_INTRA(mb_type) ? 1 : 0);
624  }
625  }
626 }
627 
629  int mb_type,
630  const int *block_offset,
631  int linesize,
632  uint8_t *dest_y)
633 {
634  int i;
635  int qscale = s->qscale;
636 
637  if (IS_INTRA4x4(mb_type)) {
638  for (i = 0; i < 16; i++) {
639  uint8_t *const ptr = dest_y + block_offset[i];
640  const int dir = s->intra4x4_pred_mode_cache[scan8[i]];
641 
642  uint8_t *topright;
643  int nnz, tr;
644  if (dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED) {
645  const int topright_avail = (s->topright_samples_available << i) & 0x8000;
646  av_assert2(s->mb_y || linesize <= block_offset[i]);
647  if (!topright_avail) {
648  tr = ptr[3 - linesize] * 0x01010101u;
649  topright = (uint8_t *)&tr;
650  } else
651  topright = ptr + 4 - linesize;
652  } else
653  topright = NULL;
654 
655  s->hpc.pred4x4[dir](ptr, topright, linesize);
656  nnz = s->non_zero_count_cache[scan8[i]];
657  if (nnz) {
658  svq3_add_idct_c(ptr, s->mb + i * 16, linesize, qscale, 0);
659  }
660  }
661  } else {
662  s->hpc.pred16x16[s->intra16x16_pred_mode](dest_y, linesize);
663  svq3_luma_dc_dequant_idct_c(s->mb, s->mb_luma_dc[0], qscale);
664  }
665 }
666 
668 {
669  const int mb_x = s->mb_x;
670  const int mb_y = s->mb_y;
671  const int mb_xy = s->mb_xy;
672  const int mb_type = s->cur_pic->mb_type[mb_xy];
673  uint8_t *dest_y, *dest_cb, *dest_cr;
674  int linesize, uvlinesize;
675  int i, j;
676  const int *block_offset = &s->block_offset[0];
677  const int block_h = 16 >> 1;
678 
679  linesize = s->cur_pic->f->linesize[0];
680  uvlinesize = s->cur_pic->f->linesize[1];
681 
682  dest_y = s->cur_pic->f->data[0] + (mb_x + mb_y * linesize) * 16;
683  dest_cb = s->cur_pic->f->data[1] + mb_x * 8 + mb_y * uvlinesize * block_h;
684  dest_cr = s->cur_pic->f->data[2] + mb_x * 8 + mb_y * uvlinesize * block_h;
685 
686  s->vdsp.prefetch(dest_y + (s->mb_x & 3) * 4 * linesize + 64, linesize, 4);
687  s->vdsp.prefetch(dest_cb + (s->mb_x & 7) * uvlinesize + 64, dest_cr - dest_cb, 2);
688 
689  if (IS_INTRA(mb_type)) {
690  s->hpc.pred8x8[s->chroma_pred_mode](dest_cb, uvlinesize);
691  s->hpc.pred8x8[s->chroma_pred_mode](dest_cr, uvlinesize);
692 
693  hl_decode_mb_predict_luma(s, mb_type, block_offset, linesize, dest_y);
694  }
695 
696  hl_decode_mb_idct_luma(s, mb_type, block_offset, linesize, dest_y);
697 
698  if (s->cbp & 0x30) {
699  uint8_t *dest[2] = { dest_cb, dest_cr };
700  s->h264dsp.h264_chroma_dc_dequant_idct(s->mb + 16 * 16 * 1,
701  s->dequant4_coeff[4][0]);
702  s->h264dsp.h264_chroma_dc_dequant_idct(s->mb + 16 * 16 * 2,
703  s->dequant4_coeff[4][0]);
704  for (j = 1; j < 3; j++) {
705  for (i = j * 16; i < j * 16 + 4; i++)
706  if (s->non_zero_count_cache[scan8[i]] || s->mb[i * 16]) {
707  uint8_t *const ptr = dest[j - 1] + block_offset[i];
708  svq3_add_idct_c(ptr, s->mb + i * 16,
709  uvlinesize, ff_h264_chroma_qp[0][s->qscale + 12] - 12, 2);
710  }
711  }
712  }
713 }
714 
715 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
716 {
717  int i, j, k, m, dir, mode;
718  int cbp = 0;
719  uint32_t vlc;
720  int8_t *top, *left;
721  const int mb_xy = s->mb_xy;
722  const int b_xy = 4 * s->mb_x + 4 * s->mb_y * s->b_stride;
723 
724  s->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
725  s->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
726  s->topright_samples_available = 0xFFFF;
727 
728  if (mb_type == 0) { /* SKIP */
729  if (s->pict_type == AV_PICTURE_TYPE_P ||
730  s->next_pic->mb_type[mb_xy] == -1) {
731  svq3_mc_dir_part(s, 16 * s->mb_x, 16 * s->mb_y, 16, 16,
732  0, 0, 0, 0, 0, 0);
733 
734  if (s->pict_type == AV_PICTURE_TYPE_B)
735  svq3_mc_dir_part(s, 16 * s->mb_x, 16 * s->mb_y, 16, 16,
736  0, 0, 0, 0, 1, 1);
737 
738  mb_type = MB_TYPE_SKIP;
739  } else {
740  mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
741  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
742  return -1;
743  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
744  return -1;
745 
746  mb_type = MB_TYPE_16x16;
747  }
748  } else if (mb_type < 8) { /* INTER */
749  if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&s->gb_slice))
750  mode = THIRDPEL_MODE;
751  else if (s->halfpel_flag &&
752  s->thirdpel_flag == !get_bits1(&s->gb_slice))
753  mode = HALFPEL_MODE;
754  else
755  mode = FULLPEL_MODE;
756 
757  /* fill caches */
758  /* note ref_cache should contain here:
759  * ????????
760  * ???11111
761  * N??11111
762  * N??11111
763  * N??11111
764  */
765 
766  for (m = 0; m < 2; m++) {
767  if (s->mb_x > 0 && s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - 1] + 6] != -1) {
768  for (i = 0; i < 4; i++)
769  AV_COPY32(s->mv_cache[m][scan8[0] - 1 + i * 8],
770  s->cur_pic->motion_val[m][b_xy - 1 + i * s->b_stride]);
771  } else {
772  for (i = 0; i < 4; i++)
773  AV_ZERO32(s->mv_cache[m][scan8[0] - 1 + i * 8]);
774  }
775  if (s->mb_y > 0) {
776  memcpy(s->mv_cache[m][scan8[0] - 1 * 8],
777  s->cur_pic->motion_val[m][b_xy - s->b_stride],
778  4 * 2 * sizeof(int16_t));
779  memset(&s->ref_cache[m][scan8[0] - 1 * 8],
780  (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
781 
782  if (s->mb_x < s->mb_width - 1) {
783  AV_COPY32(s->mv_cache[m][scan8[0] + 4 - 1 * 8],
784  s->cur_pic->motion_val[m][b_xy - s->b_stride + 4]);
785  s->ref_cache[m][scan8[0] + 4 - 1 * 8] =
786  (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride + 1] + 6] == -1 ||
787  s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
788  } else
789  s->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
790  if (s->mb_x > 0) {
791  AV_COPY32(s->mv_cache[m][scan8[0] - 1 - 1 * 8],
792  s->cur_pic->motion_val[m][b_xy - s->b_stride - 1]);
793  s->ref_cache[m][scan8[0] - 1 - 1 * 8] =
794  (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
795  } else
796  s->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
797  } else
798  memset(&s->ref_cache[m][scan8[0] - 1 * 8 - 1],
799  PART_NOT_AVAILABLE, 8);
800 
801  if (s->pict_type != AV_PICTURE_TYPE_B)
802  break;
803  }
804 
805  /* decode motion vector(s) and form prediction(s) */
806  if (s->pict_type == AV_PICTURE_TYPE_P) {
807  if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
808  return -1;
809  } else { /* AV_PICTURE_TYPE_B */
810  if (mb_type != 2) {
811  if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
812  return -1;
813  } else {
814  for (i = 0; i < 4; i++)
815  memset(s->cur_pic->motion_val[0][b_xy + i * s->b_stride],
816  0, 4 * 2 * sizeof(int16_t));
817  }
818  if (mb_type != 1) {
819  if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
820  return -1;
821  } else {
822  for (i = 0; i < 4; i++)
823  memset(s->cur_pic->motion_val[1][b_xy + i * s->b_stride],
824  0, 4 * 2 * sizeof(int16_t));
825  }
826  }
827 
828  mb_type = MB_TYPE_16x16;
829  } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
830  int8_t *i4x4 = s->intra4x4_pred_mode + s->mb2br_xy[s->mb_xy];
831  int8_t *i4x4_cache = s->intra4x4_pred_mode_cache;
832 
833  memset(s->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
834 
835  if (mb_type == 8) {
836  if (s->mb_x > 0) {
837  for (i = 0; i < 4; i++)
838  s->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - 1] + 6 - i];
839  if (s->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
840  s->left_samples_available = 0x5F5F;
841  }
842  if (s->mb_y > 0) {
843  s->intra4x4_pred_mode_cache[4 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 0];
844  s->intra4x4_pred_mode_cache[5 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 1];
845  s->intra4x4_pred_mode_cache[6 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 2];
846  s->intra4x4_pred_mode_cache[7 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 3];
847 
848  if (s->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
849  s->top_samples_available = 0x33FF;
850  }
851 
852  /* decode prediction codes for luma blocks */
853  for (i = 0; i < 16; i += 2) {
855 
856  if (vlc >= 25U) {
858  "luma prediction:%"PRIu32"\n", vlc);
859  return -1;
860  }
861 
862  left = &s->intra4x4_pred_mode_cache[scan8[i] - 1];
863  top = &s->intra4x4_pred_mode_cache[scan8[i] - 8];
864 
865  left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
866  left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
867 
868  if (left[1] == -1 || left[2] == -1) {
869  av_log(s->avctx, AV_LOG_ERROR, "weird prediction\n");
870  return -1;
871  }
872  }
873  } else { /* mb_type == 33, DC_128_PRED block type */
874  for (i = 0; i < 4; i++)
875  memset(&s->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
876  }
877 
878  AV_COPY32(i4x4, i4x4_cache + 4 + 8 * 4);
879  i4x4[4] = i4x4_cache[7 + 8 * 3];
880  i4x4[5] = i4x4_cache[7 + 8 * 2];
881  i4x4[6] = i4x4_cache[7 + 8 * 1];
882 
883  if (mb_type == 8) {
887 
888  s->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
889  s->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
890  } else {
891  for (i = 0; i < 4; i++)
892  memset(&s->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
893 
894  s->top_samples_available = 0x33FF;
895  s->left_samples_available = 0x5F5F;
896  }
897 
898  mb_type = MB_TYPE_INTRA4x4;
899  } else { /* INTRA16x16 */
900  dir = ff_h264_i_mb_type_info[mb_type - 8].pred_mode;
901  dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
902 
904  s->left_samples_available, dir, 0)) < 0) {
905  av_log(s->avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
906  return s->intra16x16_pred_mode;
907  }
908 
909  cbp = ff_h264_i_mb_type_info[mb_type - 8].cbp;
910  mb_type = MB_TYPE_INTRA16x16;
911  }
912 
913  if (!IS_INTER(mb_type) && s->pict_type != AV_PICTURE_TYPE_I) {
914  for (i = 0; i < 4; i++)
915  memset(s->cur_pic->motion_val[0][b_xy + i * s->b_stride],
916  0, 4 * 2 * sizeof(int16_t));
917  if (s->pict_type == AV_PICTURE_TYPE_B) {
918  for (i = 0; i < 4; i++)
919  memset(s->cur_pic->motion_val[1][b_xy + i * s->b_stride],
920  0, 4 * 2 * sizeof(int16_t));
921  }
922  }
923  if (!IS_INTRA4x4(mb_type)) {
924  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy], DC_PRED, 8);
925  }
926  if (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B) {
927  memset(s->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
928  }
929 
930  if (!IS_INTRA16x16(mb_type) &&
931  (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B)) {
932  if ((vlc = get_interleaved_ue_golomb(&s->gb_slice)) >= 48U){
933  av_log(s->avctx, AV_LOG_ERROR, "cbp_vlc=%"PRIu32"\n", vlc);
934  return -1;
935  }
936 
937  cbp = IS_INTRA(mb_type) ? ff_h264_golomb_to_intra4x4_cbp[vlc]
939  }
940  if (IS_INTRA16x16(mb_type) ||
941  (s->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
943 
944  if (s->qscale > 31u) {
945  av_log(s->avctx, AV_LOG_ERROR, "qscale:%d\n", s->qscale);
946  return -1;
947  }
948  }
949  if (IS_INTRA16x16(mb_type)) {
950  AV_ZERO128(s->mb_luma_dc[0] + 0);
951  AV_ZERO128(s->mb_luma_dc[0] + 8);
952  if (svq3_decode_block(&s->gb_slice, s->mb_luma_dc[0], 0, 1)) {
954  "error while decoding intra luma dc\n");
955  return -1;
956  }
957  }
958 
959  if (cbp) {
960  const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
961  const int type = ((s->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
962 
963  for (i = 0; i < 4; i++)
964  if ((cbp & (1 << i))) {
965  for (j = 0; j < 4; j++) {
966  k = index ? (1 * (j & 1) + 2 * (i & 1) +
967  2 * (j & 2) + 4 * (i & 2))
968  : (4 * i + j);
969  s->non_zero_count_cache[scan8[k]] = 1;
970 
971  if (svq3_decode_block(&s->gb_slice, &s->mb[16 * k], index, type)) {
973  "error while decoding block\n");
974  return -1;
975  }
976  }
977  }
978 
979  if ((cbp & 0x30)) {
980  for (i = 1; i < 3; ++i)
981  if (svq3_decode_block(&s->gb_slice, &s->mb[16 * 16 * i], 0, 3)) {
983  "error while decoding chroma dc block\n");
984  return -1;
985  }
986 
987  if ((cbp & 0x20)) {
988  for (i = 1; i < 3; i++) {
989  for (j = 0; j < 4; j++) {
990  k = 16 * i + j;
991  s->non_zero_count_cache[scan8[k]] = 1;
992 
993  if (svq3_decode_block(&s->gb_slice, &s->mb[16 * k], 1, 1)) {
995  "error while decoding chroma ac block\n");
996  return -1;
997  }
998  }
999  }
1000  }
1001  }
1002  }
1003 
1004  s->cbp = cbp;
1005  s->cur_pic->mb_type[mb_xy] = mb_type;
1006 
1007  if (IS_INTRA(mb_type))
1010 
1011  return 0;
1012 }
1013 
1015 {
1016  SVQ3Context *s = avctx->priv_data;
1017  const int mb_xy = s->mb_xy;
1018  int i, header;
1019  unsigned slice_id;
1020 
1021  header = get_bits(&s->gb, 8);
1022 
1023  if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
1024  /* TODO: what? */
1025  av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
1026  return -1;
1027  } else {
1028  int slice_bits, slice_bytes, slice_length;
1029  int length = header >> 5 & 3;
1030 
1031  slice_length = show_bits(&s->gb, 8 * length);
1032  slice_bits = slice_length * 8;
1033  slice_bytes = slice_length + length - 1;
1034 
1035  skip_bits(&s->gb, 8);
1036 
1038  if (!s->slice_buf)
1039  return AVERROR(ENOMEM);
1040 
1041  if (slice_bytes * 8LL > get_bits_left(&s->gb)) {
1042  av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
1043  return AVERROR_INVALIDDATA;
1044  }
1045  memcpy(s->slice_buf, s->gb.buffer + s->gb.index / 8, slice_bytes);
1046 
1047  if (s->watermark_key) {
1048  uint32_t header = AV_RL32(&s->slice_buf[1]);
1049  AV_WL32(&s->slice_buf[1], header ^ s->watermark_key);
1050  }
1051  init_get_bits(&s->gb_slice, s->slice_buf, slice_bits);
1052 
1053  if (length > 0) {
1054  memmove(s->slice_buf, &s->slice_buf[slice_length], length - 1);
1055  }
1056  skip_bits_long(&s->gb, slice_bytes * 8);
1057  }
1058 
1059  if ((slice_id = get_interleaved_ue_golomb(&s->gb_slice)) >= 3) {
1060  av_log(s->avctx, AV_LOG_ERROR, "illegal slice type %u \n", slice_id);
1061  return -1;
1062  }
1063 
1064  s->slice_type = ff_h264_golomb_to_pict_type[slice_id];
1065 
1066  if ((header & 0x9F) == 2) {
1067  i = (s->mb_num < 64) ? 6 : (1 + av_log2(s->mb_num - 1));
1068  get_bits(&s->gb_slice, i);
1069  } else if (get_bits1(&s->gb_slice)) {
1070  avpriv_report_missing_feature(s->avctx, "Media key encryption");
1071  return AVERROR_PATCHWELCOME;
1072  }
1073 
1074  s->slice_num = get_bits(&s->gb_slice, 8);
1075  s->qscale = get_bits(&s->gb_slice, 5);
1076  s->adaptive_quant = get_bits1(&s->gb_slice);
1077 
1078  /* unknown fields */
1079  skip_bits1(&s->gb_slice);
1080 
1081  if (s->has_watermark)
1082  skip_bits1(&s->gb_slice);
1083 
1084  skip_bits1(&s->gb_slice);
1085  skip_bits(&s->gb_slice, 2);
1086 
1087  if (skip_1stop_8data_bits(&s->gb_slice) < 0)
1088  return AVERROR_INVALIDDATA;
1089 
1090  /* reset intra predictors and invalidate motion vector references */
1091  if (s->mb_x > 0) {
1092  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - 1] + 3,
1093  -1, 4 * sizeof(int8_t));
1094  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - s->mb_x],
1095  -1, 8 * sizeof(int8_t) * s->mb_x);
1096  }
1097  if (s->mb_y > 0) {
1098  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - s->mb_stride],
1099  -1, 8 * sizeof(int8_t) * (s->mb_width - s->mb_x));
1100 
1101  if (s->mb_x > 0)
1102  s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride - 1] + 3] = -1;
1103  }
1104 
1105  return 0;
1106 }
1107 
1109 {
1110  int q, x;
1111  const int max_qp = 51;
1112 
1113  for (q = 0; q < max_qp + 1; q++) {
1114  int shift = ff_h264_quant_div6[q] + 2;
1115  int idx = ff_h264_quant_rem6[q];
1116  for (x = 0; x < 16; x++)
1117  s->dequant4_coeff[q][(x >> 2) | ((x << 2) & 0xF)] =
1118  ((uint32_t)ff_h264_dequant4_coeff_init[idx][(x & 1) + ((x >> 2) & 1)] * 16) << shift;
1119  }
1120 }
1121 
1123 {
1124  SVQ3Context *s = avctx->priv_data;
1125  int m, x, y;
1126  unsigned char *extradata;
1127  unsigned char *extradata_end;
1128  unsigned int size;
1129  int marker_found = 0;
1130  int ret;
1131 
1132  s->cur_pic = &s->frames[0];
1133  s->last_pic = &s->frames[1];
1134  s->next_pic = &s->frames[2];
1135 
1136  s->cur_pic->f = av_frame_alloc();
1137  s->last_pic->f = av_frame_alloc();
1138  s->next_pic->f = av_frame_alloc();
1139  if (!s->cur_pic->f || !s->last_pic->f || !s->next_pic->f)
1140  return AVERROR(ENOMEM);
1141 
1142  ff_h264dsp_init(&s->h264dsp, 8, 1);
1144  ff_videodsp_init(&s->vdsp, 8);
1145 
1146 
1147  avctx->bits_per_raw_sample = 8;
1148 
1149  ff_hpeldsp_init(&s->hdsp, avctx->flags);
1150  ff_tpeldsp_init(&s->tdsp);
1151 
1152  avctx->pix_fmt = AV_PIX_FMT_YUVJ420P;
1153  avctx->color_range = AVCOL_RANGE_JPEG;
1154 
1155  s->avctx = avctx;
1156  s->halfpel_flag = 1;
1157  s->thirdpel_flag = 1;
1158  s->has_watermark = 0;
1159 
1160  /* prowl for the "SEQH" marker in the extradata */
1161  extradata = (unsigned char *)avctx->extradata;
1162  extradata_end = avctx->extradata + avctx->extradata_size;
1163  if (extradata) {
1164  for (m = 0; m + 8 < avctx->extradata_size; m++) {
1165  if (!memcmp(extradata, "SEQH", 4)) {
1166  marker_found = 1;
1167  break;
1168  }
1169  extradata++;
1170  }
1171  }
1172 
1173  /* if a match was found, parse the extra data */
1174  if (marker_found) {
1175  GetBitContext gb;
1176  int frame_size_code;
1177  int unk0, unk1, unk2, unk3, unk4;
1178  int w,h;
1179 
1180  size = AV_RB32(&extradata[4]);
1181  if (size > extradata_end - extradata - 8)
1182  return AVERROR_INVALIDDATA;
1183  init_get_bits(&gb, extradata + 8, size * 8);
1184 
1185  /* 'frame size code' and optional 'width, height' */
1186  frame_size_code = get_bits(&gb, 3);
1187  switch (frame_size_code) {
1188  case 0:
1189  w = 160;
1190  h = 120;
1191  break;
1192  case 1:
1193  w = 128;
1194  h = 96;
1195  break;
1196  case 2:
1197  w = 176;
1198  h = 144;
1199  break;
1200  case 3:
1201  w = 352;
1202  h = 288;
1203  break;
1204  case 4:
1205  w = 704;
1206  h = 576;
1207  break;
1208  case 5:
1209  w = 240;
1210  h = 180;
1211  break;
1212  case 6:
1213  w = 320;
1214  h = 240;
1215  break;
1216  case 7:
1217  w = get_bits(&gb, 12);
1218  h = get_bits(&gb, 12);
1219  break;
1220  }
1221  ret = ff_set_dimensions(avctx, w, h);
1222  if (ret < 0)
1223  return ret;
1224 
1225  s->halfpel_flag = get_bits1(&gb);
1226  s->thirdpel_flag = get_bits1(&gb);
1227 
1228  /* unknown fields */
1229  unk0 = get_bits1(&gb);
1230  unk1 = get_bits1(&gb);
1231  unk2 = get_bits1(&gb);
1232  unk3 = get_bits1(&gb);
1233 
1234  s->low_delay = get_bits1(&gb);
1235 
1236  /* unknown field */
1237  unk4 = get_bits1(&gb);
1238 
1239  av_log(avctx, AV_LOG_DEBUG, "Unknown fields %d %d %d %d %d\n",
1240  unk0, unk1, unk2, unk3, unk4);
1241 
1242  if (skip_1stop_8data_bits(&gb) < 0)
1243  return AVERROR_INVALIDDATA;
1244 
1245  s->has_watermark = get_bits1(&gb);
1246  avctx->has_b_frames = !s->low_delay;
1247  if (s->has_watermark) {
1248 #if CONFIG_ZLIB
1249  unsigned watermark_width = get_interleaved_ue_golomb(&gb);
1250  unsigned watermark_height = get_interleaved_ue_golomb(&gb);
1251  int u1 = get_interleaved_ue_golomb(&gb);
1252  int u2 = get_bits(&gb, 8);
1253  int u3 = get_bits(&gb, 2);
1254  int u4 = get_interleaved_ue_golomb(&gb);
1255  unsigned long buf_len = watermark_width *
1256  watermark_height * 4;
1257  int offset = get_bits_count(&gb) + 7 >> 3;
1258  uint8_t *buf;
1259 
1260  if (watermark_height <= 0 ||
1261  (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height)
1262  return AVERROR_INVALIDDATA;
1263 
1264  buf = av_malloc(buf_len);
1265  if (!buf)
1266  return AVERROR(ENOMEM);
1267 
1268  av_log(avctx, AV_LOG_DEBUG, "watermark size: %ux%u\n",
1269  watermark_width, watermark_height);
1270  av_log(avctx, AV_LOG_DEBUG,
1271  "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
1272  u1, u2, u3, u4, offset);
1273  if (uncompress(buf, &buf_len, extradata + 8 + offset,
1274  size - offset) != Z_OK) {
1275  av_log(avctx, AV_LOG_ERROR,
1276  "could not uncompress watermark logo\n");
1277  av_free(buf);
1278  return -1;
1279  }
1281 
1282  s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1283  av_log(avctx, AV_LOG_DEBUG,
1284  "watermark key %#"PRIx32"\n", s->watermark_key);
1285  av_free(buf);
1286 #else
1287  av_log(avctx, AV_LOG_ERROR,
1288  "this svq3 file contains watermark which need zlib support compiled in\n");
1289  return AVERROR(ENOSYS);
1290 #endif
1291  }
1292  }
1293 
1294  s->mb_width = (avctx->width + 15) / 16;
1295  s->mb_height = (avctx->height + 15) / 16;
1296  s->mb_stride = s->mb_width + 1;
1297  s->mb_num = s->mb_width * s->mb_height;
1298  s->b_stride = 4 * s->mb_width;
1299  s->h_edge_pos = s->mb_width * 16;
1300  s->v_edge_pos = s->mb_height * 16;
1301 
1302  s->intra4x4_pred_mode = av_mallocz(s->mb_stride * 2 * 8);
1303  if (!s->intra4x4_pred_mode)
1304  return AVERROR(ENOMEM);
1305 
1306  s->mb2br_xy = av_mallocz(s->mb_stride * (s->mb_height + 1) *
1307  sizeof(*s->mb2br_xy));
1308  if (!s->mb2br_xy)
1309  return AVERROR(ENOMEM);
1310 
1311  for (y = 0; y < s->mb_height; y++)
1312  for (x = 0; x < s->mb_width; x++) {
1313  const int mb_xy = x + y * s->mb_stride;
1314 
1315  s->mb2br_xy[mb_xy] = 8 * (mb_xy % (2 * s->mb_stride));
1316  }
1317 
1319 
1320  return 0;
1321 }
1322 
1323 static void free_picture(AVCodecContext *avctx, SVQ3Frame *pic)
1324 {
1325  int i;
1326  for (i = 0; i < 2; i++) {
1327  av_freep(&pic->motion_val_buf[i]);
1328  }
1329  av_freep(&pic->mb_type_buf);
1330 
1331  av_frame_unref(pic->f);
1332 }
1333 
1334 static int get_buffer(AVCodecContext *avctx, SVQ3Frame *pic)
1335 {
1336  SVQ3Context *s = avctx->priv_data;
1337  const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
1338  const int b4_stride = s->mb_width * 4 + 1;
1339  const int b4_array_size = b4_stride * s->mb_height * 4;
1340  int ret;
1341 
1342  if (!pic->motion_val_buf[0]) {
1343  int i;
1344 
1345  pic->mb_type_buf = av_calloc(big_mb_num + s->mb_stride, sizeof(uint32_t));
1346  if (!pic->mb_type_buf)
1347  return AVERROR(ENOMEM);
1348  pic->mb_type = pic->mb_type_buf + 2 * s->mb_stride + 1;
1349 
1350  for (i = 0; i < 2; i++) {
1351  pic->motion_val_buf[i] = av_calloc(b4_array_size + 4, 2 * sizeof(int16_t));
1352  if (!pic->motion_val_buf[i]) {
1353  ret = AVERROR(ENOMEM);
1354  goto fail;
1355  }
1356 
1357  pic->motion_val[i] = pic->motion_val_buf[i] + 4;
1358  }
1359  }
1360 
1361  ret = ff_get_buffer(avctx, pic->f,
1362  (s->pict_type != AV_PICTURE_TYPE_B) ?
1364  if (ret < 0)
1365  goto fail;
1366 
1367  if (!s->edge_emu_buffer) {
1368  s->edge_emu_buffer = av_mallocz_array(pic->f->linesize[0], 17);
1369  if (!s->edge_emu_buffer)
1370  return AVERROR(ENOMEM);
1371  }
1372 
1373  return 0;
1374 fail:
1375  free_picture(avctx, pic);
1376  return ret;
1377 }
1378 
1379 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1380  int *got_frame, AVPacket *avpkt)
1381 {
1382  SVQ3Context *s = avctx->priv_data;
1383  int buf_size = avpkt->size;
1384  int left;
1385  uint8_t *buf;
1386  int ret, m, i;
1387 
1388  /* special case for last picture */
1389  if (buf_size == 0) {
1390  if (s->next_pic->f->data[0] && !s->low_delay && !s->last_frame_output) {
1391  ret = av_frame_ref(data, s->next_pic->f);
1392  if (ret < 0)
1393  return ret;
1394  s->last_frame_output = 1;
1395  *got_frame = 1;
1396  }
1397  return 0;
1398  }
1399 
1400  s->mb_x = s->mb_y = s->mb_xy = 0;
1401 
1402  if (s->watermark_key) {
1403  av_fast_padded_malloc(&s->buf, &s->buf_size, buf_size);
1404  if (!s->buf)
1405  return AVERROR(ENOMEM);
1406  memcpy(s->buf, avpkt->data, buf_size);
1407  buf = s->buf;
1408  } else {
1409  buf = avpkt->data;
1410  }
1411 
1412  ret = init_get_bits(&s->gb, buf, 8 * buf_size);
1413  if (ret < 0)
1414  return ret;
1415 
1416  if (svq3_decode_slice_header(avctx))
1417  return -1;
1418 
1419  s->pict_type = s->slice_type;
1420 
1421  if (s->pict_type != AV_PICTURE_TYPE_B)
1422  FFSWAP(SVQ3Frame*, s->next_pic, s->last_pic);
1423 
1424  av_frame_unref(s->cur_pic->f);
1425 
1426  /* for skipping the frame */
1427  s->cur_pic->f->pict_type = s->pict_type;
1429 
1430  ret = get_buffer(avctx, s->cur_pic);
1431  if (ret < 0)
1432  return ret;
1433 
1434  for (i = 0; i < 16; i++) {
1435  s->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * s->cur_pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
1436  s->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * s->cur_pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
1437  }
1438  for (i = 0; i < 16; i++) {
1439  s->block_offset[16 + i] =
1440  s->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * s->cur_pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
1441  s->block_offset[48 + 16 + i] =
1442  s->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * s->cur_pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
1443  }
1444 
1445  if (s->pict_type != AV_PICTURE_TYPE_I) {
1446  if (!s->last_pic->f->data[0]) {
1447  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1448  av_frame_unref(s->last_pic->f);
1449  ret = get_buffer(avctx, s->last_pic);
1450  if (ret < 0)
1451  return ret;
1452  memset(s->last_pic->f->data[0], 0, avctx->height * s->last_pic->f->linesize[0]);
1453  memset(s->last_pic->f->data[1], 0x80, (avctx->height / 2) *
1454  s->last_pic->f->linesize[1]);
1455  memset(s->last_pic->f->data[2], 0x80, (avctx->height / 2) *
1456  s->last_pic->f->linesize[2]);
1457  }
1458 
1459  if (s->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f->data[0]) {
1460  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1461  av_frame_unref(s->next_pic->f);
1462  ret = get_buffer(avctx, s->next_pic);
1463  if (ret < 0)
1464  return ret;
1465  memset(s->next_pic->f->data[0], 0, avctx->height * s->next_pic->f->linesize[0]);
1466  memset(s->next_pic->f->data[1], 0x80, (avctx->height / 2) *
1467  s->next_pic->f->linesize[1]);
1468  memset(s->next_pic->f->data[2], 0x80, (avctx->height / 2) *
1469  s->next_pic->f->linesize[2]);
1470  }
1471  }
1472 
1473  if (avctx->debug & FF_DEBUG_PICT_INFO)
1475  "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1477  s->halfpel_flag, s->thirdpel_flag,
1478  s->adaptive_quant, s->qscale, s->slice_num);
1479 
1480  if (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B ||
1482  avctx->skip_frame >= AVDISCARD_ALL)
1483  return 0;
1484 
1485  if (s->next_p_frame_damaged) {
1486  if (s->pict_type == AV_PICTURE_TYPE_B)
1487  return 0;
1488  else
1489  s->next_p_frame_damaged = 0;
1490  }
1491 
1492  if (s->pict_type == AV_PICTURE_TYPE_B) {
1494 
1495  if (s->frame_num_offset < 0)
1496  s->frame_num_offset += 256;
1497  if (s->frame_num_offset == 0 ||
1499  av_log(s->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1500  return -1;
1501  }
1502  } else {
1503  s->prev_frame_num = s->frame_num;
1504  s->frame_num = s->slice_num;
1506 
1507  if (s->prev_frame_num_offset < 0)
1508  s->prev_frame_num_offset += 256;
1509  }
1510 
1511  for (m = 0; m < 2; m++) {
1512  int i;
1513  for (i = 0; i < 4; i++) {
1514  int j;
1515  for (j = -1; j < 4; j++)
1516  s->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1517  if (i < 3)
1518  s->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1519  }
1520  }
1521 
1522  for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
1523  for (s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
1524  unsigned mb_type;
1525  s->mb_xy = s->mb_x + s->mb_y * s->mb_stride;
1526 
1527  if ((get_bits_left(&s->gb_slice)) <= 7) {
1528  if (((get_bits_count(&s->gb_slice) & 7) == 0 ||
1529  show_bits(&s->gb_slice, get_bits_left(&s->gb_slice) & 7) == 0)) {
1530 
1531  if (svq3_decode_slice_header(avctx))
1532  return -1;
1533  }
1534  if (s->slice_type != s->pict_type) {
1535  avpriv_request_sample(avctx, "non constant slice type");
1536  }
1537  /* TODO: support s->mb_skip_run */
1538  }
1539 
1540  mb_type = get_interleaved_ue_golomb(&s->gb_slice);
1541 
1542  if (s->pict_type == AV_PICTURE_TYPE_I)
1543  mb_type += 8;
1544  else if (s->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1545  mb_type += 4;
1546  if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1548  "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
1549  return -1;
1550  }
1551 
1552  if (mb_type != 0 || s->cbp)
1553  hl_decode_mb(s);
1554 
1555  if (s->pict_type != AV_PICTURE_TYPE_B && !s->low_delay)
1556  s->cur_pic->mb_type[s->mb_x + s->mb_y * s->mb_stride] =
1557  (s->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1558  }
1559 
1560  ff_draw_horiz_band(avctx, s->cur_pic->f,
1561  s->last_pic->f->data[0] ? s->last_pic->f : NULL,
1562  16 * s->mb_y, 16, PICT_FRAME, 0,
1563  s->low_delay);
1564  }
1565 
1566  left = buf_size*8 - get_bits_count(&s->gb_slice);
1567 
1568  if (s->mb_y != s->mb_height || s->mb_x != s->mb_width) {
1569  av_log(avctx, AV_LOG_INFO, "frame num %d incomplete pic x %d y %d left %d\n", avctx->frame_number, s->mb_y, s->mb_x, left);
1570  //av_hex_dump(stderr, buf+buf_size-8, 8);
1571  }
1572 
1573  if (left < 0) {
1574  av_log(avctx, AV_LOG_ERROR, "frame num %d left %d\n", avctx->frame_number, left);
1575  return -1;
1576  }
1577 
1578  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay)
1579  ret = av_frame_ref(data, s->cur_pic->f);
1580  else if (s->last_pic->f->data[0])
1581  ret = av_frame_ref(data, s->last_pic->f);
1582  if (ret < 0)
1583  return ret;
1584 
1585  /* Do not output the last pic after seeking. */
1586  if (s->last_pic->f->data[0] || s->low_delay)
1587  *got_frame = 1;
1588 
1589  if (s->pict_type != AV_PICTURE_TYPE_B) {
1590  FFSWAP(SVQ3Frame*, s->cur_pic, s->next_pic);
1591  } else {
1592  av_frame_unref(s->cur_pic->f);
1593  }
1594 
1595  return buf_size;
1596 }
1597 
1599 {
1600  SVQ3Context *s = avctx->priv_data;
1601 
1602  free_picture(avctx, s->cur_pic);
1603  free_picture(avctx, s->next_pic);
1604  free_picture(avctx, s->last_pic);
1605  av_frame_free(&s->cur_pic->f);
1606  av_frame_free(&s->next_pic->f);
1607  av_frame_free(&s->last_pic->f);
1608  av_freep(&s->slice_buf);
1611  av_freep(&s->mb2br_xy);
1612 
1613 
1614  av_freep(&s->buf);
1615  s->buf_size = 0;
1616 
1617  return 0;
1618 }
1619 
1621  .name = "svq3",
1622  .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1623  .type = AVMEDIA_TYPE_VIDEO,
1624  .id = AV_CODEC_ID_SVQ3,
1625  .priv_data_size = sizeof(SVQ3Context),
1627  .close = svq3_decode_end,
1629  .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND |
1632  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,
1633  AV_PIX_FMT_NONE},
1634  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
1635 };
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
uint8_t pred_mode
Definition: h264data.h:35
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
#define NULL
Definition: coverity.c:32
discard all frames except keyframes
Definition: avcodec.h:235
void(* h264_chroma_dc_dequant_idct)(int16_t *block, int qmul)
Definition: h264dsp.h:104
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
int cbp
Definition: svq3.c:108
static int shift(int a, int b)
Definition: sonic.c:82
This structure describes decoded (raw) audio or video data.
Definition: frame.h:308
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
HpelDSPContext hdsp
Definition: svq3.c:84
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:68
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
static int svq3_decode_block(GetBitContext *gb, int16_t *block, int index, const int type)
Definition: svq3.c:295
else temp
Definition: vf_mcdeint.c:256
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:106
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:291
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
int prev_frame_num
Definition: svq3.c:112
static av_always_inline void svq3_pred_motion(const SVQ3Context *s, int n, int part_width, int list, int ref, int *const mx, int *const my)
Get the predicted MV.
Definition: svq3.c:376
#define avpriv_request_sample(...)
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1161
int size
Definition: packet.h:364
int mb_xy
Definition: svq3.c:119
const uint8_t * buffer
Definition: get_bits.h:62
#define av_bswap16
Definition: bswap.h:31
int av_log2(unsigned v)
Definition: intmath.c:26
uint8_t * slice_buf
Definition: svq3.c:93
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
GLint GLenum type
Definition: opengl_enc.c:104
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:411
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:72
int v_edge_pos
Definition: svq3.c:104
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
const uint8_t ff_h264_quant_rem6[QP_MAX_NUM+1]
Definition: h264data.c:174
discard all
Definition: avcodec.h:236
void(* prefetch)(uint8_t *buf, ptrdiff_t stride, int h)
Prefetch memory into cache (if supported by hardware).
Definition: videodsp.h:76
uint8_t run
Definition: svq3.c:204
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1757
#define FULLPEL_MODE
Definition: svq3.c:148
void ff_draw_horiz_band(AVCodecContext *avctx, AVFrame *cur, AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
Definition: mpegutils.c:51
AVCodec.
Definition: codec.h:190
#define MB_TYPE_INTRA4x4
Definition: mpegutils.h:51
if it could not because there are no more frames
#define AV_WN32A(p, v)
Definition: intreadwrite.h:538
#define AV_COPY32(d, s)
Definition: intreadwrite.h:601
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
int16_t mb[16 *48 *2]
Definition: svq3.c:140
Macro definitions for various function/variable attributes.
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:245
static int svq3_mc_dir(SVQ3Context *s, int size, int mode, int dir, int avg)
Definition: svq3.c:500
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:2004
SVQ3Frame frames[3]
Definition: svq3.c:145
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:75
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:63
int has_watermark
Definition: svq3.c:97
int thirdpel_flag
Definition: svq3.c:96
#define MB_TYPE_INTRA16x16
Definition: mpegutils.h:52
int mb_num
Definition: svq3.c:121
const uint8_t ff_h264_dequant4_coeff_init[6][3]
Definition: h264data.c:152
static const uint8_t luma_dc_zigzag_scan[16]
Definition: svq3.c:169
The exact code depends on how similar the blocks are and how related they are to the block
uint8_t
static av_always_inline void hl_decode_mb_idct_luma(SVQ3Context *s, int mb_type, const int *block_offset, int linesize, uint8_t *dest_y)
Definition: svq3.c:613
#define av_cold
Definition: attributes.h:88
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
#define mb
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
#define DC_PRED8x8
Definition: h264pred.h:68
int block_offset[2 *(16 *3)]
Definition: svq3.c:144
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1612
uint32_t * mb_type_buf
Definition: svq3.c:76
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
static av_always_inline int svq3_fetch_diagonal_mv(const SVQ3Context *s, const int16_t **C, int i, int list, int part_width)
Definition: svq3.c:355
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:456
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:627
#define MB_TYPE_16x16
Definition: mpegutils.h:54
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:262
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:87
const uint8_t ff_h264_chroma_dc_scan[4]
Definition: h264data.c:54
Context for storing H.264 prediction functions.
Definition: h264pred.h:92
void(* pred8x8[4+3+4])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:97
Public header for CRC hash function implementation.
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
Definition: mem.h:112
#define height
uint8_t * data
Definition: packet.h:363
thirdpel DSP context
Definition: tpeldsp.h:42
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
const IMbInfo ff_h264_i_mb_type_info[26]
Definition: h264data.c:66
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:88
const uint8_t ff_h264_golomb_to_inter_cbp[48]
Definition: h264data.c:48
int ff_h264_check_intra4x4_pred_mode(int8_t *pred_mode_cache, void *logctx, int top_samples_available, int left_samples_available)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264_parse.c:131
thirdpel DSP functions
ptrdiff_t size
Definition: opengl_enc.c:100
static const uint8_t header[24]
Definition: sdr2.c:67
enum AVPictureType slice_type
Definition: svq3.c:115
void(* pred4x4[9+3+3])(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.h:93
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:308
#define A(x)
Definition: vp56_arith.h:28
#define av_log(a,...)
int prev_frame_num_offset
Definition: svq3.c:111
int low_delay
Definition: svq3.c:116
static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
Definition: svq3.c:715
#define U(x)
Definition: vp56_arith.h:37
#define src
Definition: vp8dsp.c:254
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:816
#define HALFPEL_MODE
Definition: svq3.c:149
AVCodecContext * avctx
Definition: svq3.c:80
int8_t * intra4x4_pred_mode
Definition: svq3.c:130
uint8_t * edge_emu_buffer
Definition: svq3.c:136
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
#define B
Definition: huffyuvdsp.h:32
av_cold void ff_tpeldsp_init(TpelDSPContext *c)
Definition: tpeldsp.c:312
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:153
const uint8_t ff_zigzag_scan[16+1]
Definition: mathtables.c:109
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:606
uint32_t * mb_type
Definition: svq3.c:76
int frame_num
Definition: svq3.c:109
int mb_x
Definition: svq3.c:118
static int get_interleaved_se_golomb(GetBitContext *gb)
Definition: golomb.h:303
GLsizei GLsizei * length
Definition: opengl_enc.c:114
unsigned int left_samples_available
Definition: svq3.c:134
const char * name
Name of the codec implementation.
Definition: codec.h:197
#define IS_SKIP(a)
Definition: mpegutils.h:81
int chroma_pred_mode
Definition: svq3.c:126
const uint8_t ff_h264_golomb_to_pict_type[5]
Definition: h264data.c:37
#define PREDICT_MODE
Definition: svq3.c:151
#define fail()
Definition: checkasm.h:123
unsigned int topright_samples_available
Definition: svq3.c:133
Sorenson Vector Quantizer #1 (SVQ1) video codec.
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:502
Definition: svq3.c:70
useful rectangle filling function
tpel_mc_func avg_tpel_pixels_tab[11]
Definition: tpeldsp.h:54
Half-pel DSP context.
Definition: hpeldsp.h:45
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: codec.h:44
SVQ3Frame * cur_pic
Definition: svq3.c:88
Context for storing H.264 DSP functions.
Definition: h264dsp.h:42
uint32_t dequant4_coeff[QP_MAX_NUM+1][16]
Definition: svq3.c:143
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:391
int16_t(*[2] motion_val)[2]
Definition: svq3.c:74
#define FFMIN(a, b)
Definition: common.h:96
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
#define width
int width
picture width / height.
Definition: avcodec.h:699
uint8_t w
Definition: llviddspenc.c:38
int32_t
GetBitContext gb_slice
Definition: svq3.c:92
uint32_t av_crc(const AVCRC *ctx, uint32_t crc, const uint8_t *buffer, size_t length)
Calculate the CRC of a block.
Definition: crc.c:392
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:446
static av_cold int svq3_decode_init(AVCodecContext *avctx)
Definition: svq3.c:1122
#define s(width, name)
Definition: cbs_vp9.c:257
tpel_mc_func put_tpel_pixels_tab[11]
Thirdpel motion compensation with rounding (a + b + 1) >> 1.
Definition: tpeldsp.h:53
H.264 / AVC / MPEG-4 part10 codec.
int b_stride
Definition: svq3.c:122
H264PredContext hpc
Definition: svq3.c:83
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:831
int last_frame_output
Definition: svq3.c:105
int next_p_frame_damaged
Definition: svq3.c:102
Full range content.
Definition: pixfmt.h:586
#define IS_INTRA16x16(a)
Definition: mpegutils.h:76
if(ret)
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
static const int8_t mv[256][2]
Definition: 4xm.c:77
H264DSPContext h264dsp
Definition: svq3.c:82
Half-pel DSP functions.
AVCodec ff_svq3_decoder
Definition: svq3.c:1620
GetBitContext gb
Definition: svq3.c:91
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
Libavcodec external API header.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:339
int debug
debug
Definition: avcodec.h:1611
int intra16x16_pred_mode
Definition: svq3.c:127
main external API structure.
Definition: avcodec.h:526
const uint8_t ff_h264_chroma_qp[7][QP_MAX_NUM+1]
Definition: h264data.c:203
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff)*mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
#define QP_MAX_NUM
Definition: h264.h:27
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1872
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:206
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
static const uint8_t scan8[16 *3+3]
Definition: h264dec.h:650
int extradata_size
Definition: avcodec.h:628
void(* pred16x16[4+3+2])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:98
int qscale
Definition: svq3.c:107
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
const uint8_t ff_h264_golomb_to_intra4x4_cbp[48]
Definition: h264data.c:42
static void skip_bits1(GetBitContext *s)
Definition: get_bits.h:538
int mb_height
Definition: svq3.c:120
enum AVPictureType pict_type
Definition: svq3.c:114
const uint8_t ff_h264_quant_div6[QP_MAX_NUM+1]
Definition: h264data.c:182
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
int index
Definition: gxfenc.c:89
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
Definition: h264dec.h:666
static void svq3_mc_dir_part(SVQ3Context *s, int x, int y, int width, int height, int mx, int my, int dxy, int thirdpel, int dir, int avg)
Definition: svq3.c:425
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
static void init_dequant4_coeff_table(SVQ3Context *s)
Definition: svq3.c:1108
static av_cold int svq3_decode_end(AVCodecContext *avctx)
Definition: svq3.c:1598
#define mid_pred
Definition: mathops.h:97
int8_t ref_cache[2][5 *8]
Definition: svq3.c:139
static const uint8_t svq3_pred_0[25][2]
Definition: svq3.c:176
int mb_y
Definition: svq3.c:118
static const struct @140 svq3_dct_tables[2][16]
AVPictureType
Definition: avutil.h:272
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
#define IS_INTER(a)
Definition: mpegutils.h:79
int16_t(*[2] motion_val_buf)[2]
Definition: svq3.c:73
int slice_num
Definition: svq3.c:106
AVFrame * f
Definition: svq3.c:71
#define MB_TYPE_SKIP
Definition: mpegutils.h:62
uint8_t * buf
Definition: svq3.c:99
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:300
SVQ3Frame * last_pic
Definition: svq3.c:90
VideoDSPContext vdsp
Definition: svq3.c:86
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:566
static void free_picture(AVCodecContext *avctx, SVQ3Frame *pic)
Definition: svq3.c:1323
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
uint32_t * mb2br_xy
Definition: svq3.c:124
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:322
uint8_t level
Definition: svq3.c:205
Definition: vp9.h:48
#define AV_ZERO128(d)
Definition: intreadwrite.h:637
const AVCRC * av_crc_get_table(AVCRCId crc_id)
Get an initialized standard CRC table.
Definition: crc.c:374
#define avg(a, b, c, d)
discard all non reference
Definition: avcodec.h:232
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
int
static av_always_inline void hl_decode_mb_predict_luma(SVQ3Context *s, int mb_type, const int *block_offset, int linesize, uint8_t *dest_y)
Definition: svq3.c:628
uint8_t non_zero_count_cache[15 *8]
Definition: svq3.c:142
uint8_t cbp
Definition: h264data.h:36
common internal api header.
static int get_buffer(AVCodecContext *avctx, SVQ3Frame *pic)
Definition: svq3.c:1334
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
int mb_stride
Definition: svq3.c:121
int ff_h264_check_intra_pred_mode(void *logctx, int top_samples_available, int left_samples_available, int mode, int is_chroma)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264_parse.c:179
int16_t mb_luma_dc[3][16 *2]
Definition: svq3.c:141
int h_edge_pos
Definition: svq3.c:103
Bi-dir predicted.
Definition: avutil.h:276
static void svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
Definition: svq3.c:220
#define stride
int frame_num_offset
Definition: svq3.c:110
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:215
#define IS_INTRA(x, y)
static const uint32_t svq3_dequant_coeff[32]
Definition: svq3.c:213
void * priv_data
Definition: avcodec.h:553
#define THIRDPEL_MODE
Definition: svq3.c:150
#define PICT_FRAME
Definition: mpegutils.h:39
unsigned int top_samples_available
Definition: svq3.c:132
#define IS_INTRA4x4(a)
Definition: mpegutils.h:75
#define av_free(p)
static void hl_decode_mb(SVQ3Context *s)
Definition: svq3.c:667
static int svq3_decode_slice_header(AVCodecContext *avctx)
Definition: svq3.c:1014
#define PART_NOT_AVAILABLE
Definition: h264dec.h:397
int slice_size
Definition: svq3.c:94
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:386
#define AV_ZERO32(d)
Definition: intreadwrite.h:629
TpelDSPContext tdsp
Definition: svq3.c:85
static const uint8_t svq3_scan[16]
Definition: svq3.c:162
int8_t intra4x4_pred_mode_cache[5 *8]
Definition: svq3.c:129
int mb_width
Definition: svq3.c:120
av_cold void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
Definition: h264dsp.c:67
static const int8_t svq3_pred_1[6][6][5]
Definition: svq3.c:188
static void svq3_add_idct_c(uint8_t *dst, int16_t *block, int stride, int qp, int dc)
Definition: svq3.c:255
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:1217
#define av_freep(p)
uint32_t watermark_key
Definition: svq3.c:98
static int skip_1stop_8data_bits(GetBitContext *gb)
Definition: get_bits.h:854
#define av_always_inline
Definition: attributes.h:45
SVQ3Frame * next_pic
Definition: svq3.c:89
#define FFSWAP(type, a, b)
Definition: common.h:99
static unsigned get_interleaved_ue_golomb(GetBitContext *gb)
Definition: golomb.h:145
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int buf_size
Definition: svq3.c:100
exp golomb vlc stuff
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
This structure stores compressed data.
Definition: packet.h:340
static int svq3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: svq3.c:1379
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:509
mode
Use these values in ebur128_init (or&#39;ed).
Definition: ebur128.h:83
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
int i
Definition: input.c:407
Predicted.
Definition: avutil.h:275
int halfpel_flag
Definition: svq3.c:95
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
int adaptive_quant
Definition: svq3.c:101
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190
int16_t mv_cache[2][5 *8][2]
Definition: svq3.c:138