FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
svq3.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 The FFmpeg Project
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /*
22  * How to use this decoder:
23  * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24  * have stsd atoms to describe media trak properties. A stsd atom for a
25  * video trak contains 1 or more ImageDescription atoms. These atoms begin
26  * with the 4-byte length of the atom followed by the codec fourcc. Some
27  * decoders need information in this atom to operate correctly. Such
28  * is the case with SVQ3. In order to get the best use out of this decoder,
29  * the calling app must make the SVQ3 ImageDescription atom available
30  * via the AVCodecContext's extradata[_size] field:
31  *
32  * AVCodecContext.extradata = pointer to ImageDescription, first characters
33  * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34  * AVCodecContext.extradata_size = size of ImageDescription atom memory
35  * buffer (which will be the same as the ImageDescription atom size field
36  * from the QT file, minus 4 bytes since the length is missing)
37  *
38  * You will know you have these parameters passed correctly when the decoder
39  * correctly decodes this file:
40  * http://samples.mplayerhq.hu/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
41  */
42 
43 #include <inttypes.h>
44 
45 #include "libavutil/attributes.h"
46 #include "internal.h"
47 #include "avcodec.h"
48 #include "mpegutils.h"
49 #include "h264dec.h"
50 #include "h264data.h"
51 #include "golomb.h"
52 #include "hpeldsp.h"
53 #include "mathops.h"
54 #include "rectangle.h"
55 #include "tpeldsp.h"
56 
57 #if CONFIG_ZLIB
58 #include <zlib.h>
59 #endif
60 
61 #include "svq1.h"
62 
63 /**
64  * @file
65  * svq3 decoder.
66  */
67 
68 typedef struct SVQ3Frame {
70 
72  int16_t (*motion_val[2])[2];
73 
75  uint32_t *mb_type;
76 
77 
79  int8_t *ref_index[2];
80 } SVQ3Frame;
81 
82 typedef struct SVQ3Context {
84 
90 
101  uint32_t watermark_key;
103  int buf_size;
110  int qscale;
111  int cbp;
116 
119 
120  int mb_x, mb_y;
121  int mb_xy;
124  int b_stride;
125 
126  uint32_t *mb2br_xy;
127 
130 
133 
134  unsigned int top_samples_available;
137 
139 
140  DECLARE_ALIGNED(16, int16_t, mv_cache)[2][5 * 8][2];
141  DECLARE_ALIGNED(8, int8_t, ref_cache)[2][5 * 8];
142  DECLARE_ALIGNED(16, int16_t, mb)[16 * 48 * 2];
143  DECLARE_ALIGNED(16, int16_t, mb_luma_dc)[3][16 * 2];
145  uint32_t dequant4_coeff[QP_MAX_NUM + 1][16];
146  int block_offset[2 * (16 * 3)];
147 } SVQ3Context;
148 
149 #define FULLPEL_MODE 1
150 #define HALFPEL_MODE 2
151 #define THIRDPEL_MODE 3
152 #define PREDICT_MODE 4
153 
154 /* dual scan (from some older H.264 draft)
155  * o-->o-->o o
156  * | /|
157  * o o o / o
158  * | / | |/ |
159  * o o o o
160  * /
161  * o-->o-->o-->o
162  */
163 static const uint8_t svq3_scan[16] = {
164  0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
165  2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
166  0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
167  0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
168 };
169 
170 static const uint8_t luma_dc_zigzag_scan[16] = {
171  0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
172  3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
173  1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
174  3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
175 };
176 
177 static const uint8_t svq3_pred_0[25][2] = {
178  { 0, 0 },
179  { 1, 0 }, { 0, 1 },
180  { 0, 2 }, { 1, 1 }, { 2, 0 },
181  { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
182  { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
183  { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
184  { 2, 4 }, { 3, 3 }, { 4, 2 },
185  { 4, 3 }, { 3, 4 },
186  { 4, 4 }
187 };
188 
189 static const int8_t svq3_pred_1[6][6][5] = {
190  { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
191  { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
192  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
193  { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
194  { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
195  { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
196  { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
197  { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
198  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
199  { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
200  { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
201  { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
202 };
203 
204 static const struct {
207 } svq3_dct_tables[2][16] = {
208  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
209  { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
210  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
211  { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
212 };
213 
214 static const uint32_t svq3_dequant_coeff[32] = {
215  3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
216  9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
217  24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
218  61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
219 };
220 
221 static int svq3_decode_end(AVCodecContext *avctx);
222 
223 static void svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
224 {
225  const int qmul = svq3_dequant_coeff[qp];
226 #define stride 16
227  int i;
228  int temp[16];
229  static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
230 
231  for (i = 0; i < 4; i++) {
232  const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
233  const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
234  const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
235  const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
236 
237  temp[4 * i + 0] = z0 + z3;
238  temp[4 * i + 1] = z1 + z2;
239  temp[4 * i + 2] = z1 - z2;
240  temp[4 * i + 3] = z0 - z3;
241  }
242 
243  for (i = 0; i < 4; i++) {
244  const int offset = x_offset[i];
245  const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
246  const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
247  const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
248  const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
249 
250  output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
251  output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
252  output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
253  output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
254  }
255 }
256 #undef stride
257 
258 static void svq3_add_idct_c(uint8_t *dst, int16_t *block,
259  int stride, int qp, int dc)
260 {
261  const int qmul = svq3_dequant_coeff[qp];
262  int i;
263 
264  if (dc) {
265  dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
266  : qmul * (block[0] >> 3) / 2);
267  block[0] = 0;
268  }
269 
270  for (i = 0; i < 4; i++) {
271  const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
272  const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
273  const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
274  const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
275 
276  block[0 + 4 * i] = z0 + z3;
277  block[1 + 4 * i] = z1 + z2;
278  block[2 + 4 * i] = z1 - z2;
279  block[3 + 4 * i] = z0 - z3;
280  }
281 
282  for (i = 0; i < 4; i++) {
283  const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
284  const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
285  const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
286  const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
287  const int rr = (dc + 0x80000);
288 
289  dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
290  dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
291  dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
292  dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
293  }
294 
295  memset(block, 0, 16 * sizeof(int16_t));
296 }
297 
298 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
299  int index, const int type)
300 {
301  static const uint8_t *const scan_patterns[4] = {
303  };
304 
305  int run, level, sign, limit;
306  unsigned vlc;
307  const int intra = 3 * type >> 2;
308  const uint8_t *const scan = scan_patterns[type];
309 
310  for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
311  for (; (vlc = get_interleaved_ue_golomb(gb)) != 0; index++) {
312  if ((int32_t)vlc < 0)
313  return -1;
314 
315  sign = (vlc & 1) ? 0 : -1;
316  vlc = vlc + 1 >> 1;
317 
318  if (type == 3) {
319  if (vlc < 3) {
320  run = 0;
321  level = vlc;
322  } else if (vlc < 4) {
323  run = 1;
324  level = 1;
325  } else {
326  run = vlc & 0x3;
327  level = (vlc + 9 >> 2) - run;
328  }
329  } else {
330  if (vlc < 16U) {
331  run = svq3_dct_tables[intra][vlc].run;
332  level = svq3_dct_tables[intra][vlc].level;
333  } else if (intra) {
334  run = vlc & 0x7;
335  level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
336  } else {
337  run = vlc & 0xF;
338  level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
339  }
340  }
341 
342 
343  if ((index += run) >= limit)
344  return -1;
345 
346  block[scan[index]] = (level ^ sign) - sign;
347  }
348 
349  if (type != 2) {
350  break;
351  }
352  }
353 
354  return 0;
355 }
356 
357 static av_always_inline int
358 svq3_fetch_diagonal_mv(const SVQ3Context *s, const int16_t **C,
359  int i, int list, int part_width)
360 {
361  const int topright_ref = s->ref_cache[list][i - 8 + part_width];
362 
363  if (topright_ref != PART_NOT_AVAILABLE) {
364  *C = s->mv_cache[list][i - 8 + part_width];
365  return topright_ref;
366  } else {
367  *C = s->mv_cache[list][i - 8 - 1];
368  return s->ref_cache[list][i - 8 - 1];
369  }
370 }
371 
372 /**
373  * Get the predicted MV.
374  * @param n the block index
375  * @param part_width the width of the partition (4, 8,16) -> (1, 2, 4)
376  * @param mx the x component of the predicted motion vector
377  * @param my the y component of the predicted motion vector
378  */
380  int part_width, int list,
381  int ref, int *const mx, int *const my)
382 {
383  const int index8 = scan8[n];
384  const int top_ref = s->ref_cache[list][index8 - 8];
385  const int left_ref = s->ref_cache[list][index8 - 1];
386  const int16_t *const A = s->mv_cache[list][index8 - 1];
387  const int16_t *const B = s->mv_cache[list][index8 - 8];
388  const int16_t *C;
389  int diagonal_ref, match_count;
390 
391 /* mv_cache
392  * B . . A T T T T
393  * U . . L . . , .
394  * U . . L . . . .
395  * U . . L . . , .
396  * . . . L . . . .
397  */
398 
399  diagonal_ref = svq3_fetch_diagonal_mv(s, &C, index8, list, part_width);
400  match_count = (diagonal_ref == ref) + (top_ref == ref) + (left_ref == ref);
401  if (match_count > 1) { //most common
402  *mx = mid_pred(A[0], B[0], C[0]);
403  *my = mid_pred(A[1], B[1], C[1]);
404  } else if (match_count == 1) {
405  if (left_ref == ref) {
406  *mx = A[0];
407  *my = A[1];
408  } else if (top_ref == ref) {
409  *mx = B[0];
410  *my = B[1];
411  } else {
412  *mx = C[0];
413  *my = C[1];
414  }
415  } else {
416  if (top_ref == PART_NOT_AVAILABLE &&
417  diagonal_ref == PART_NOT_AVAILABLE &&
418  left_ref != PART_NOT_AVAILABLE) {
419  *mx = A[0];
420  *my = A[1];
421  } else {
422  *mx = mid_pred(A[0], B[0], C[0]);
423  *my = mid_pred(A[1], B[1], C[1]);
424  }
425  }
426 }
427 
428 static inline void svq3_mc_dir_part(SVQ3Context *s,
429  int x, int y, int width, int height,
430  int mx, int my, int dxy,
431  int thirdpel, int dir, int avg)
432 {
433  const SVQ3Frame *pic = (dir == 0) ? s->last_pic : s->next_pic;
434  uint8_t *src, *dest;
435  int i, emu = 0;
436  int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
437  int linesize = s->cur_pic->f->linesize[0];
438  int uvlinesize = s->cur_pic->f->linesize[1];
439 
440  mx += x;
441  my += y;
442 
443  if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
444  my < 0 || my >= s->v_edge_pos - height - 1) {
445  emu = 1;
446  mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
447  my = av_clip(my, -16, s->v_edge_pos - height + 15);
448  }
449 
450  /* form component predictions */
451  dest = s->cur_pic->f->data[0] + x + y * linesize;
452  src = pic->f->data[0] + mx + my * linesize;
453 
454  if (emu) {
456  linesize, linesize,
457  width + 1, height + 1,
458  mx, my, s->h_edge_pos, s->v_edge_pos);
459  src = s->edge_emu_buffer;
460  }
461  if (thirdpel)
462  (avg ? s->tdsp.avg_tpel_pixels_tab
463  : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src, linesize,
464  width, height);
465  else
466  (avg ? s->hdsp.avg_pixels_tab
467  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, linesize,
468  height);
469 
470  if (!(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
471  mx = mx + (mx < (int) x) >> 1;
472  my = my + (my < (int) y) >> 1;
473  width = width >> 1;
474  height = height >> 1;
475  blocksize++;
476 
477  for (i = 1; i < 3; i++) {
478  dest = s->cur_pic->f->data[i] + (x >> 1) + (y >> 1) * uvlinesize;
479  src = pic->f->data[i] + mx + my * uvlinesize;
480 
481  if (emu) {
483  uvlinesize, uvlinesize,
484  width + 1, height + 1,
485  mx, my, (s->h_edge_pos >> 1),
486  s->v_edge_pos >> 1);
487  src = s->edge_emu_buffer;
488  }
489  if (thirdpel)
490  (avg ? s->tdsp.avg_tpel_pixels_tab
491  : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src,
492  uvlinesize,
493  width, height);
494  else
495  (avg ? s->hdsp.avg_pixels_tab
496  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
497  uvlinesize,
498  height);
499  }
500  }
501 }
502 
503 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
504  int dir, int avg)
505 {
506  int i, j, k, mx, my, dx, dy, x, y;
507  const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
508  const int part_height = 16 >> ((unsigned)(size + 1) / 3);
509  const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
510  const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
511  const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
512 
513  for (i = 0; i < 16; i += part_height)
514  for (j = 0; j < 16; j += part_width) {
515  const int b_xy = (4 * s->mb_x + (j >> 2)) +
516  (4 * s->mb_y + (i >> 2)) * s->b_stride;
517  int dxy;
518  x = 16 * s->mb_x + j;
519  y = 16 * s->mb_y + i;
520  k = (j >> 2 & 1) + (i >> 1 & 2) +
521  (j >> 1 & 4) + (i & 8);
522 
523  if (mode != PREDICT_MODE) {
524  svq3_pred_motion(s, k, part_width >> 2, dir, 1, &mx, &my);
525  } else {
526  mx = s->next_pic->motion_val[0][b_xy][0] << 1;
527  my = s->next_pic->motion_val[0][b_xy][1] << 1;
528 
529  if (dir == 0) {
530  mx = mx * s->frame_num_offset /
531  s->prev_frame_num_offset + 1 >> 1;
532  my = my * s->frame_num_offset /
533  s->prev_frame_num_offset + 1 >> 1;
534  } else {
535  mx = mx * (s->frame_num_offset - s->prev_frame_num_offset) /
536  s->prev_frame_num_offset + 1 >> 1;
537  my = my * (s->frame_num_offset - s->prev_frame_num_offset) /
538  s->prev_frame_num_offset + 1 >> 1;
539  }
540  }
541 
542  /* clip motion vector prediction to frame border */
543  mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
544  my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
545 
546  /* get (optional) motion vector differential */
547  if (mode == PREDICT_MODE) {
548  dx = dy = 0;
549  } else {
552 
553  if (dx == INVALID_VLC || dy == INVALID_VLC) {
554  av_log(s->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
555  return -1;
556  }
557  }
558 
559  /* compute motion vector */
560  if (mode == THIRDPEL_MODE) {
561  int fx, fy;
562  mx = (mx + 1 >> 1) + dx;
563  my = (my + 1 >> 1) + dy;
564  fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
565  fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
566  dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
567 
568  svq3_mc_dir_part(s, x, y, part_width, part_height,
569  fx, fy, dxy, 1, dir, avg);
570  mx += mx;
571  my += my;
572  } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
573  mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
574  my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
575  dxy = (mx & 1) + 2 * (my & 1);
576 
577  svq3_mc_dir_part(s, x, y, part_width, part_height,
578  mx >> 1, my >> 1, dxy, 0, dir, avg);
579  mx *= 3;
580  my *= 3;
581  } else {
582  mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
583  my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
584 
585  svq3_mc_dir_part(s, x, y, part_width, part_height,
586  mx, my, 0, 0, dir, avg);
587  mx *= 6;
588  my *= 6;
589  }
590 
591  /* update mv_cache */
592  if (mode != PREDICT_MODE) {
593  int32_t mv = pack16to32(mx, my);
594 
595  if (part_height == 8 && i < 8) {
596  AV_WN32A(s->mv_cache[dir][scan8[k] + 1 * 8], mv);
597 
598  if (part_width == 8 && j < 8)
599  AV_WN32A(s->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
600  }
601  if (part_width == 8 && j < 8)
602  AV_WN32A(s->mv_cache[dir][scan8[k] + 1], mv);
603  if (part_width == 4 || part_height == 4)
604  AV_WN32A(s->mv_cache[dir][scan8[k]], mv);
605  }
606 
607  /* write back motion vectors */
608  fill_rectangle(s->cur_pic->motion_val[dir][b_xy],
609  part_width >> 2, part_height >> 2, s->b_stride,
610  pack16to32(mx, my), 4);
611  }
612 
613  return 0;
614 }
615 
617  int mb_type, const int *block_offset,
618  int linesize, uint8_t *dest_y)
619 {
620  int i;
621  if (!IS_INTRA4x4(mb_type)) {
622  for (i = 0; i < 16; i++)
623  if (s->non_zero_count_cache[scan8[i]] || s->mb[i * 16]) {
624  uint8_t *const ptr = dest_y + block_offset[i];
625  svq3_add_idct_c(ptr, s->mb + i * 16, linesize,
626  s->qscale, IS_INTRA(mb_type) ? 1 : 0);
627  }
628  }
629 }
630 
631 static av_always_inline int dctcoef_get(int16_t *mb, int index)
632 {
633  return AV_RN16A(mb + index);
634 }
635 
637  int mb_type,
638  const int *block_offset,
639  int linesize,
640  uint8_t *dest_y)
641 {
642  int i;
643  int qscale = s->qscale;
644 
645  if (IS_INTRA4x4(mb_type)) {
646  for (i = 0; i < 16; i++) {
647  uint8_t *const ptr = dest_y + block_offset[i];
648  const int dir = s->intra4x4_pred_mode_cache[scan8[i]];
649 
650  uint8_t *topright;
651  int nnz, tr;
652  if (dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED) {
653  const int topright_avail = (s->topright_samples_available << i) & 0x8000;
654  av_assert2(s->mb_y || linesize <= block_offset[i]);
655  if (!topright_avail) {
656  tr = ptr[3 - linesize] * 0x01010101u;
657  topright = (uint8_t *)&tr;
658  } else
659  topright = ptr + 4 - linesize;
660  } else
661  topright = NULL;
662 
663  s->hpc.pred4x4[dir](ptr, topright, linesize);
664  nnz = s->non_zero_count_cache[scan8[i]];
665  if (nnz) {
666  svq3_add_idct_c(ptr, s->mb + i * 16, linesize, qscale, 0);
667  }
668  }
669  } else {
670  s->hpc.pred16x16[s->intra16x16_pred_mode](dest_y, linesize);
671  svq3_luma_dc_dequant_idct_c(s->mb, s->mb_luma_dc[0], qscale);
672  }
673 }
674 
676 {
677  const int mb_x = s->mb_x;
678  const int mb_y = s->mb_y;
679  const int mb_xy = s->mb_xy;
680  const int mb_type = s->cur_pic->mb_type[mb_xy];
681  uint8_t *dest_y, *dest_cb, *dest_cr;
682  int linesize, uvlinesize;
683  int i, j;
684  const int *block_offset = &s->block_offset[0];
685  const int block_h = 16 >> 1;
686 
687  linesize = s->cur_pic->f->linesize[0];
688  uvlinesize = s->cur_pic->f->linesize[1];
689 
690  dest_y = s->cur_pic->f->data[0] + (mb_x + mb_y * linesize) * 16;
691  dest_cb = s->cur_pic->f->data[1] + mb_x * 8 + mb_y * uvlinesize * block_h;
692  dest_cr = s->cur_pic->f->data[2] + mb_x * 8 + mb_y * uvlinesize * block_h;
693 
694  s->vdsp.prefetch(dest_y + (s->mb_x & 3) * 4 * linesize + 64, linesize, 4);
695  s->vdsp.prefetch(dest_cb + (s->mb_x & 7) * uvlinesize + 64, dest_cr - dest_cb, 2);
696 
697  if (IS_INTRA(mb_type)) {
698  s->hpc.pred8x8[s->chroma_pred_mode](dest_cb, uvlinesize);
699  s->hpc.pred8x8[s->chroma_pred_mode](dest_cr, uvlinesize);
700 
701  hl_decode_mb_predict_luma(s, mb_type, block_offset, linesize, dest_y);
702  }
703 
704  hl_decode_mb_idct_luma(s, mb_type, block_offset, linesize, dest_y);
705 
706  if (s->cbp & 0x30) {
707  uint8_t *dest[2] = { dest_cb, dest_cr };
708  s->h264dsp.h264_chroma_dc_dequant_idct(s->mb + 16 * 16 * 1,
709  s->dequant4_coeff[4][0]);
710  s->h264dsp.h264_chroma_dc_dequant_idct(s->mb + 16 * 16 * 2,
711  s->dequant4_coeff[4][0]);
712  for (j = 1; j < 3; j++) {
713  for (i = j * 16; i < j * 16 + 4; i++)
714  if (s->non_zero_count_cache[scan8[i]] || s->mb[i * 16]) {
715  uint8_t *const ptr = dest[j - 1] + block_offset[i];
716  svq3_add_idct_c(ptr, s->mb + i * 16,
717  uvlinesize, ff_h264_chroma_qp[0][s->qscale + 12] - 12, 2);
718  }
719  }
720  }
721 }
722 
723 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
724 {
725  int i, j, k, m, dir, mode;
726  int cbp = 0;
727  uint32_t vlc;
728  int8_t *top, *left;
729  const int mb_xy = s->mb_xy;
730  const int b_xy = 4 * s->mb_x + 4 * s->mb_y * s->b_stride;
731 
732  s->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
733  s->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
734  s->topright_samples_available = 0xFFFF;
735 
736  if (mb_type == 0) { /* SKIP */
737  if (s->pict_type == AV_PICTURE_TYPE_P ||
738  s->next_pic->mb_type[mb_xy] == -1) {
739  svq3_mc_dir_part(s, 16 * s->mb_x, 16 * s->mb_y, 16, 16,
740  0, 0, 0, 0, 0, 0);
741 
742  if (s->pict_type == AV_PICTURE_TYPE_B)
743  svq3_mc_dir_part(s, 16 * s->mb_x, 16 * s->mb_y, 16, 16,
744  0, 0, 0, 0, 1, 1);
745 
746  mb_type = MB_TYPE_SKIP;
747  } else {
748  mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
749  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
750  return -1;
751  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
752  return -1;
753 
754  mb_type = MB_TYPE_16x16;
755  }
756  } else if (mb_type < 8) { /* INTER */
757  if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&s->gb_slice))
758  mode = THIRDPEL_MODE;
759  else if (s->halfpel_flag &&
760  s->thirdpel_flag == !get_bits1(&s->gb_slice))
761  mode = HALFPEL_MODE;
762  else
763  mode = FULLPEL_MODE;
764 
765  /* fill caches */
766  /* note ref_cache should contain here:
767  * ????????
768  * ???11111
769  * N??11111
770  * N??11111
771  * N??11111
772  */
773 
774  for (m = 0; m < 2; m++) {
775  if (s->mb_x > 0 && s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - 1] + 6] != -1) {
776  for (i = 0; i < 4; i++)
777  AV_COPY32(s->mv_cache[m][scan8[0] - 1 + i * 8],
778  s->cur_pic->motion_val[m][b_xy - 1 + i * s->b_stride]);
779  } else {
780  for (i = 0; i < 4; i++)
781  AV_ZERO32(s->mv_cache[m][scan8[0] - 1 + i * 8]);
782  }
783  if (s->mb_y > 0) {
784  memcpy(s->mv_cache[m][scan8[0] - 1 * 8],
785  s->cur_pic->motion_val[m][b_xy - s->b_stride],
786  4 * 2 * sizeof(int16_t));
787  memset(&s->ref_cache[m][scan8[0] - 1 * 8],
788  (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
789 
790  if (s->mb_x < s->mb_width - 1) {
791  AV_COPY32(s->mv_cache[m][scan8[0] + 4 - 1 * 8],
792  s->cur_pic->motion_val[m][b_xy - s->b_stride + 4]);
793  s->ref_cache[m][scan8[0] + 4 - 1 * 8] =
794  (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride + 1] + 6] == -1 ||
795  s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
796  } else
797  s->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
798  if (s->mb_x > 0) {
799  AV_COPY32(s->mv_cache[m][scan8[0] - 1 - 1 * 8],
800  s->cur_pic->motion_val[m][b_xy - s->b_stride - 1]);
801  s->ref_cache[m][scan8[0] - 1 - 1 * 8] =
802  (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
803  } else
804  s->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
805  } else
806  memset(&s->ref_cache[m][scan8[0] - 1 * 8 - 1],
807  PART_NOT_AVAILABLE, 8);
808 
809  if (s->pict_type != AV_PICTURE_TYPE_B)
810  break;
811  }
812 
813  /* decode motion vector(s) and form prediction(s) */
814  if (s->pict_type == AV_PICTURE_TYPE_P) {
815  if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
816  return -1;
817  } else { /* AV_PICTURE_TYPE_B */
818  if (mb_type != 2) {
819  if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
820  return -1;
821  } else {
822  for (i = 0; i < 4; i++)
823  memset(s->cur_pic->motion_val[0][b_xy + i * s->b_stride],
824  0, 4 * 2 * sizeof(int16_t));
825  }
826  if (mb_type != 1) {
827  if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
828  return -1;
829  } else {
830  for (i = 0; i < 4; i++)
831  memset(s->cur_pic->motion_val[1][b_xy + i * s->b_stride],
832  0, 4 * 2 * sizeof(int16_t));
833  }
834  }
835 
836  mb_type = MB_TYPE_16x16;
837  } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
838  int8_t *i4x4 = s->intra4x4_pred_mode + s->mb2br_xy[s->mb_xy];
839  int8_t *i4x4_cache = s->intra4x4_pred_mode_cache;
840 
841  memset(s->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
842 
843  if (mb_type == 8) {
844  if (s->mb_x > 0) {
845  for (i = 0; i < 4; i++)
846  s->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - 1] + 6 - i];
847  if (s->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
848  s->left_samples_available = 0x5F5F;
849  }
850  if (s->mb_y > 0) {
851  s->intra4x4_pred_mode_cache[4 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 0];
852  s->intra4x4_pred_mode_cache[5 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 1];
853  s->intra4x4_pred_mode_cache[6 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 2];
854  s->intra4x4_pred_mode_cache[7 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 3];
855 
856  if (s->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
857  s->top_samples_available = 0x33FF;
858  }
859 
860  /* decode prediction codes for luma blocks */
861  for (i = 0; i < 16; i += 2) {
863 
864  if (vlc >= 25U) {
866  "luma prediction:%"PRIu32"\n", vlc);
867  return -1;
868  }
869 
870  left = &s->intra4x4_pred_mode_cache[scan8[i] - 1];
871  top = &s->intra4x4_pred_mode_cache[scan8[i] - 8];
872 
873  left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
874  left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
875 
876  if (left[1] == -1 || left[2] == -1) {
877  av_log(s->avctx, AV_LOG_ERROR, "weird prediction\n");
878  return -1;
879  }
880  }
881  } else { /* mb_type == 33, DC_128_PRED block type */
882  for (i = 0; i < 4; i++)
883  memset(&s->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
884  }
885 
886  AV_COPY32(i4x4, i4x4_cache + 4 + 8 * 4);
887  i4x4[4] = i4x4_cache[7 + 8 * 3];
888  i4x4[5] = i4x4_cache[7 + 8 * 2];
889  i4x4[6] = i4x4_cache[7 + 8 * 1];
890 
891  if (mb_type == 8) {
895 
896  s->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
897  s->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
898  } else {
899  for (i = 0; i < 4; i++)
900  memset(&s->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
901 
902  s->top_samples_available = 0x33FF;
903  s->left_samples_available = 0x5F5F;
904  }
905 
906  mb_type = MB_TYPE_INTRA4x4;
907  } else { /* INTRA16x16 */
908  dir = ff_h264_i_mb_type_info[mb_type - 8].pred_mode;
909  dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
910 
912  s->left_samples_available, dir, 0)) < 0) {
913  av_log(s->avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
914  return s->intra16x16_pred_mode;
915  }
916 
917  cbp = ff_h264_i_mb_type_info[mb_type - 8].cbp;
918  mb_type = MB_TYPE_INTRA16x16;
919  }
920 
921  if (!IS_INTER(mb_type) && s->pict_type != AV_PICTURE_TYPE_I) {
922  for (i = 0; i < 4; i++)
923  memset(s->cur_pic->motion_val[0][b_xy + i * s->b_stride],
924  0, 4 * 2 * sizeof(int16_t));
925  if (s->pict_type == AV_PICTURE_TYPE_B) {
926  for (i = 0; i < 4; i++)
927  memset(s->cur_pic->motion_val[1][b_xy + i * s->b_stride],
928  0, 4 * 2 * sizeof(int16_t));
929  }
930  }
931  if (!IS_INTRA4x4(mb_type)) {
932  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy], DC_PRED, 8);
933  }
934  if (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B) {
935  memset(s->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
936  }
937 
938  if (!IS_INTRA16x16(mb_type) &&
939  (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B)) {
940  if ((vlc = get_interleaved_ue_golomb(&s->gb_slice)) >= 48U){
941  av_log(s->avctx, AV_LOG_ERROR, "cbp_vlc=%"PRIu32"\n", vlc);
942  return -1;
943  }
944 
945  cbp = IS_INTRA(mb_type) ? ff_h264_golomb_to_intra4x4_cbp[vlc]
947  }
948  if (IS_INTRA16x16(mb_type) ||
949  (s->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
951 
952  if (s->qscale > 31u) {
953  av_log(s->avctx, AV_LOG_ERROR, "qscale:%d\n", s->qscale);
954  return -1;
955  }
956  }
957  if (IS_INTRA16x16(mb_type)) {
958  AV_ZERO128(s->mb_luma_dc[0] + 0);
959  AV_ZERO128(s->mb_luma_dc[0] + 8);
960  if (svq3_decode_block(&s->gb_slice, s->mb_luma_dc[0], 0, 1)) {
962  "error while decoding intra luma dc\n");
963  return -1;
964  }
965  }
966 
967  if (cbp) {
968  const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
969  const int type = ((s->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
970 
971  for (i = 0; i < 4; i++)
972  if ((cbp & (1 << i))) {
973  for (j = 0; j < 4; j++) {
974  k = index ? (1 * (j & 1) + 2 * (i & 1) +
975  2 * (j & 2) + 4 * (i & 2))
976  : (4 * i + j);
977  s->non_zero_count_cache[scan8[k]] = 1;
978 
979  if (svq3_decode_block(&s->gb_slice, &s->mb[16 * k], index, type)) {
981  "error while decoding block\n");
982  return -1;
983  }
984  }
985  }
986 
987  if ((cbp & 0x30)) {
988  for (i = 1; i < 3; ++i)
989  if (svq3_decode_block(&s->gb_slice, &s->mb[16 * 16 * i], 0, 3)) {
991  "error while decoding chroma dc block\n");
992  return -1;
993  }
994 
995  if ((cbp & 0x20)) {
996  for (i = 1; i < 3; i++) {
997  for (j = 0; j < 4; j++) {
998  k = 16 * i + j;
999  s->non_zero_count_cache[scan8[k]] = 1;
1000 
1001  if (svq3_decode_block(&s->gb_slice, &s->mb[16 * k], 1, 1)) {
1003  "error while decoding chroma ac block\n");
1004  return -1;
1005  }
1006  }
1007  }
1008  }
1009  }
1010  }
1011 
1012  s->cbp = cbp;
1013  s->cur_pic->mb_type[mb_xy] = mb_type;
1014 
1015  if (IS_INTRA(mb_type))
1018 
1019  return 0;
1020 }
1021 
1023 {
1024  SVQ3Context *s = avctx->priv_data;
1025  const int mb_xy = s->mb_xy;
1026  int i, header;
1027  unsigned slice_id;
1028 
1029  header = get_bits(&s->gb, 8);
1030 
1031  if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
1032  /* TODO: what? */
1033  av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
1034  return -1;
1035  } else {
1036  int slice_bits, slice_bytes, slice_length;
1037  int length = header >> 5 & 3;
1038 
1039  slice_length = show_bits(&s->gb, 8 * length);
1040  slice_bits = slice_length * 8;
1041  slice_bytes = slice_length + length - 1;
1042 
1043  if (8LL*slice_bytes > get_bits_left(&s->gb)) {
1044  av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
1045  return -1;
1046  }
1047 
1048  skip_bits(&s->gb, 8);
1049 
1051  if (!s->slice_buf)
1052  return AVERROR(ENOMEM);
1053 
1054  memcpy(s->slice_buf, s->gb.buffer + s->gb.index / 8, slice_bytes);
1055 
1056  init_get_bits(&s->gb_slice, s->slice_buf, slice_bits);
1057 
1058  if (s->watermark_key) {
1059  uint32_t header = AV_RL32(&s->gb_slice.buffer[1]);
1060  AV_WL32(&s->gb_slice.buffer[1], header ^ s->watermark_key);
1061  }
1062  if (length > 0) {
1063  memmove(s->slice_buf, &s->slice_buf[slice_length], length - 1);
1064  }
1065  skip_bits_long(&s->gb, slice_bytes * 8);
1066  }
1067 
1068  if ((slice_id = get_interleaved_ue_golomb(&s->gb_slice)) >= 3) {
1069  av_log(s->avctx, AV_LOG_ERROR, "illegal slice type %u \n", slice_id);
1070  return -1;
1071  }
1072 
1073  s->pict_type = ff_h264_golomb_to_pict_type[slice_id];
1074 
1075  if ((header & 0x9F) == 2) {
1076  i = (s->mb_num < 64) ? 6 : (1 + av_log2(s->mb_num - 1));
1077  get_bits(&s->gb_slice, i);
1078  } else {
1079  skip_bits1(&s->gb_slice);
1080  }
1081 
1082  s->slice_num = get_bits(&s->gb_slice, 8);
1083  s->qscale = get_bits(&s->gb_slice, 5);
1084  s->adaptive_quant = get_bits1(&s->gb_slice);
1085 
1086  /* unknown fields */
1087  skip_bits1(&s->gb_slice);
1088 
1089  if (s->has_watermark)
1090  skip_bits1(&s->gb_slice);
1091 
1092  skip_bits1(&s->gb_slice);
1093  skip_bits(&s->gb_slice, 2);
1094 
1095  if (skip_1stop_8data_bits(&s->gb_slice) < 0)
1096  return AVERROR_INVALIDDATA;
1097 
1098  /* reset intra predictors and invalidate motion vector references */
1099  if (s->mb_x > 0) {
1100  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - 1] + 3,
1101  -1, 4 * sizeof(int8_t));
1102  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - s->mb_x],
1103  -1, 8 * sizeof(int8_t) * s->mb_x);
1104  }
1105  if (s->mb_y > 0) {
1106  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - s->mb_stride],
1107  -1, 8 * sizeof(int8_t) * (s->mb_width - s->mb_x));
1108 
1109  if (s->mb_x > 0)
1110  s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride - 1] + 3] = -1;
1111  }
1112 
1113  return 0;
1114 }
1115 
1117 {
1118  int q, x;
1119  const int max_qp = 51;
1120 
1121  for (q = 0; q < max_qp + 1; q++) {
1122  int shift = ff_h264_quant_div6[q] + 2;
1123  int idx = ff_h264_quant_rem6[q];
1124  for (x = 0; x < 16; x++)
1125  s->dequant4_coeff[q][(x >> 2) | ((x << 2) & 0xF)] =
1126  ((uint32_t)ff_h264_dequant4_coeff_init[idx][(x & 1) + ((x >> 2) & 1)] * 16) << shift;
1127  }
1128 }
1129 
1131 {
1132  SVQ3Context *s = avctx->priv_data;
1133  int m, x, y;
1134  unsigned char *extradata;
1135  unsigned char *extradata_end;
1136  unsigned int size;
1137  int marker_found = 0;
1138  int ret;
1139 
1140  s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
1141  s->last_pic = av_mallocz(sizeof(*s->last_pic));
1142  s->next_pic = av_mallocz(sizeof(*s->next_pic));
1143  if (!s->next_pic || !s->last_pic || !s->cur_pic) {
1144  ret = AVERROR(ENOMEM);
1145  goto fail;
1146  }
1147 
1148  s->cur_pic->f = av_frame_alloc();
1149  s->last_pic->f = av_frame_alloc();
1150  s->next_pic->f = av_frame_alloc();
1151  if (!s->cur_pic->f || !s->last_pic->f || !s->next_pic->f)
1152  return AVERROR(ENOMEM);
1153 
1154  ff_h264dsp_init(&s->h264dsp, 8, 1);
1156  ff_videodsp_init(&s->vdsp, 8);
1157 
1158 
1159  avctx->bits_per_raw_sample = 8;
1160 
1161  ff_hpeldsp_init(&s->hdsp, avctx->flags);
1162  ff_tpeldsp_init(&s->tdsp);
1163 
1164  avctx->pix_fmt = AV_PIX_FMT_YUVJ420P;
1165  avctx->color_range = AVCOL_RANGE_JPEG;
1166 
1167  s->avctx = avctx;
1168  s->halfpel_flag = 1;
1169  s->thirdpel_flag = 1;
1170  s->has_watermark = 0;
1171 
1172  /* prowl for the "SEQH" marker in the extradata */
1173  extradata = (unsigned char *)avctx->extradata;
1174  extradata_end = avctx->extradata + avctx->extradata_size;
1175  if (extradata) {
1176  for (m = 0; m + 8 < avctx->extradata_size; m++) {
1177  if (!memcmp(extradata, "SEQH", 4)) {
1178  marker_found = 1;
1179  break;
1180  }
1181  extradata++;
1182  }
1183  }
1184 
1185  /* if a match was found, parse the extra data */
1186  if (marker_found) {
1187  GetBitContext gb;
1188  int frame_size_code;
1189  int unk0, unk1, unk2, unk3, unk4;
1190 
1191  size = AV_RB32(&extradata[4]);
1192  if (size > extradata_end - extradata - 8) {
1193  ret = AVERROR_INVALIDDATA;
1194  goto fail;
1195  }
1196  init_get_bits(&gb, extradata + 8, size * 8);
1197 
1198  /* 'frame size code' and optional 'width, height' */
1199  frame_size_code = get_bits(&gb, 3);
1200  switch (frame_size_code) {
1201  case 0:
1202  avctx->width = 160;
1203  avctx->height = 120;
1204  break;
1205  case 1:
1206  avctx->width = 128;
1207  avctx->height = 96;
1208  break;
1209  case 2:
1210  avctx->width = 176;
1211  avctx->height = 144;
1212  break;
1213  case 3:
1214  avctx->width = 352;
1215  avctx->height = 288;
1216  break;
1217  case 4:
1218  avctx->width = 704;
1219  avctx->height = 576;
1220  break;
1221  case 5:
1222  avctx->width = 240;
1223  avctx->height = 180;
1224  break;
1225  case 6:
1226  avctx->width = 320;
1227  avctx->height = 240;
1228  break;
1229  case 7:
1230  avctx->width = get_bits(&gb, 12);
1231  avctx->height = get_bits(&gb, 12);
1232  break;
1233  }
1234 
1235  s->halfpel_flag = get_bits1(&gb);
1236  s->thirdpel_flag = get_bits1(&gb);
1237 
1238  /* unknown fields */
1239  unk0 = get_bits1(&gb);
1240  unk1 = get_bits1(&gb);
1241  unk2 = get_bits1(&gb);
1242  unk3 = get_bits1(&gb);
1243 
1244  s->low_delay = get_bits1(&gb);
1245 
1246  /* unknown field */
1247  unk4 = get_bits1(&gb);
1248 
1249  av_log(avctx, AV_LOG_DEBUG, "Unknown fields %d %d %d %d %d\n",
1250  unk0, unk1, unk2, unk3, unk4);
1251 
1252  if (skip_1stop_8data_bits(&gb) < 0) {
1253  ret = AVERROR_INVALIDDATA;
1254  goto fail;
1255  }
1256 
1257  s->has_watermark = get_bits1(&gb);
1258  avctx->has_b_frames = !s->low_delay;
1259  if (s->has_watermark) {
1260 #if CONFIG_ZLIB
1261  unsigned watermark_width = get_interleaved_ue_golomb(&gb);
1262  unsigned watermark_height = get_interleaved_ue_golomb(&gb);
1263  int u1 = get_interleaved_ue_golomb(&gb);
1264  int u2 = get_bits(&gb, 8);
1265  int u3 = get_bits(&gb, 2);
1266  int u4 = get_interleaved_ue_golomb(&gb);
1267  unsigned long buf_len = watermark_width *
1268  watermark_height * 4;
1269  int offset = get_bits_count(&gb) + 7 >> 3;
1270  uint8_t *buf;
1271 
1272  if (watermark_height <= 0 ||
1273  (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height) {
1274  ret = -1;
1275  goto fail;
1276  }
1277 
1278  buf = av_malloc(buf_len);
1279  if (!buf) {
1280  ret = AVERROR(ENOMEM);
1281  goto fail;
1282  }
1283  av_log(avctx, AV_LOG_DEBUG, "watermark size: %ux%u\n",
1284  watermark_width, watermark_height);
1285  av_log(avctx, AV_LOG_DEBUG,
1286  "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
1287  u1, u2, u3, u4, offset);
1288  if (uncompress(buf, &buf_len, extradata + 8 + offset,
1289  size - offset) != Z_OK) {
1290  av_log(avctx, AV_LOG_ERROR,
1291  "could not uncompress watermark logo\n");
1292  av_free(buf);
1293  ret = -1;
1294  goto fail;
1295  }
1296  s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
1297  s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1298  av_log(avctx, AV_LOG_DEBUG,
1299  "watermark key %#"PRIx32"\n", s->watermark_key);
1300  av_free(buf);
1301 #else
1302  av_log(avctx, AV_LOG_ERROR,
1303  "this svq3 file contains watermark which need zlib support compiled in\n");
1304  ret = -1;
1305  goto fail;
1306 #endif
1307  }
1308  }
1309 
1310  s->mb_width = (avctx->width + 15) / 16;
1311  s->mb_height = (avctx->height + 15) / 16;
1312  s->mb_stride = s->mb_width + 1;
1313  s->mb_num = s->mb_width * s->mb_height;
1314  s->b_stride = 4 * s->mb_width;
1315  s->h_edge_pos = s->mb_width * 16;
1316  s->v_edge_pos = s->mb_height * 16;
1317 
1318  s->intra4x4_pred_mode = av_mallocz(s->mb_stride * 2 * 8);
1319  if (!s->intra4x4_pred_mode)
1320  return AVERROR(ENOMEM);
1321 
1322  s->mb2br_xy = av_mallocz(s->mb_stride * (s->mb_height + 1) *
1323  sizeof(*s->mb2br_xy));
1324  if (!s->mb2br_xy)
1325  return AVERROR(ENOMEM);
1326 
1327  for (y = 0; y < s->mb_height; y++)
1328  for (x = 0; x < s->mb_width; x++) {
1329  const int mb_xy = x + y * s->mb_stride;
1330 
1331  s->mb2br_xy[mb_xy] = 8 * (mb_xy % (2 * s->mb_stride));
1332  }
1333 
1335 
1336  return 0;
1337 fail:
1338  svq3_decode_end(avctx);
1339  return ret;
1340 }
1341 
1342 static void free_picture(AVCodecContext *avctx, SVQ3Frame *pic)
1343 {
1344  int i;
1345  for (i = 0; i < 2; i++) {
1346  av_buffer_unref(&pic->motion_val_buf[i]);
1347  av_buffer_unref(&pic->ref_index_buf[i]);
1348  }
1350 
1351  av_frame_unref(pic->f);
1352 }
1353 
1354 static int get_buffer(AVCodecContext *avctx, SVQ3Frame *pic)
1355 {
1356  SVQ3Context *s = avctx->priv_data;
1357  const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
1358  const int mb_array_size = s->mb_stride * s->mb_height;
1359  const int b4_stride = s->mb_width * 4 + 1;
1360  const int b4_array_size = b4_stride * s->mb_height * 4;
1361  int ret;
1362 
1363  if (!pic->motion_val_buf[0]) {
1364  int i;
1365 
1366  pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) * sizeof(uint32_t));
1367  if (!pic->mb_type_buf)
1368  return AVERROR(ENOMEM);
1369  pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
1370 
1371  for (i = 0; i < 2; i++) {
1372  pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1373  pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
1374  if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
1375  ret = AVERROR(ENOMEM);
1376  goto fail;
1377  }
1378 
1379  pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
1380  pic->ref_index[i] = pic->ref_index_buf[i]->data;
1381  }
1382  }
1383 
1384  ret = ff_get_buffer(avctx, pic->f,
1385  (s->pict_type != AV_PICTURE_TYPE_B) ?
1387  if (ret < 0)
1388  goto fail;
1389 
1390  if (!s->edge_emu_buffer) {
1391  s->edge_emu_buffer = av_mallocz_array(pic->f->linesize[0], 17);
1392  if (!s->edge_emu_buffer)
1393  return AVERROR(ENOMEM);
1394  }
1395 
1396  return 0;
1397 fail:
1398  free_picture(avctx, pic);
1399  return ret;
1400 }
1401 
1402 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1403  int *got_frame, AVPacket *avpkt)
1404 {
1405  SVQ3Context *s = avctx->priv_data;
1406  int buf_size = avpkt->size;
1407  int left;
1408  uint8_t *buf;
1409  int ret, m, i;
1410 
1411  /* special case for last picture */
1412  if (buf_size == 0) {
1413  if (s->next_pic->f->data[0] && !s->low_delay && !s->last_frame_output) {
1414  ret = av_frame_ref(data, s->next_pic->f);
1415  if (ret < 0)
1416  return ret;
1417  s->last_frame_output = 1;
1418  *got_frame = 1;
1419  }
1420  return 0;
1421  }
1422 
1423  s->mb_x = s->mb_y = s->mb_xy = 0;
1424 
1425  if (s->watermark_key) {
1426  av_fast_padded_malloc(&s->buf, &s->buf_size, buf_size);
1427  if (!s->buf)
1428  return AVERROR(ENOMEM);
1429  memcpy(s->buf, avpkt->data, buf_size);
1430  buf = s->buf;
1431  } else {
1432  buf = avpkt->data;
1433  }
1434 
1435  ret = init_get_bits(&s->gb, buf, 8 * buf_size);
1436  if (ret < 0)
1437  return ret;
1438 
1439  if (svq3_decode_slice_header(avctx))
1440  return -1;
1441 
1442  if (s->pict_type != AV_PICTURE_TYPE_B)
1443  FFSWAP(SVQ3Frame*, s->next_pic, s->last_pic);
1444 
1445  av_frame_unref(s->cur_pic->f);
1446 
1447  /* for skipping the frame */
1448  s->cur_pic->f->pict_type = s->pict_type;
1450 
1451  ret = get_buffer(avctx, s->cur_pic);
1452  if (ret < 0)
1453  return ret;
1454 
1455  for (i = 0; i < 16; i++) {
1456  s->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * s->cur_pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
1457  s->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * s->cur_pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
1458  }
1459  for (i = 0; i < 16; i++) {
1460  s->block_offset[16 + i] =
1461  s->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * s->cur_pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
1462  s->block_offset[48 + 16 + i] =
1463  s->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * s->cur_pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
1464  }
1465 
1466  if (s->pict_type != AV_PICTURE_TYPE_I) {
1467  if (!s->last_pic->f->data[0]) {
1468  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1469  av_frame_unref(s->last_pic->f);
1470  ret = get_buffer(avctx, s->last_pic);
1471  if (ret < 0)
1472  return ret;
1473  memset(s->last_pic->f->data[0], 0, avctx->height * s->last_pic->f->linesize[0]);
1474  memset(s->last_pic->f->data[1], 0x80, (avctx->height / 2) *
1475  s->last_pic->f->linesize[1]);
1476  memset(s->last_pic->f->data[2], 0x80, (avctx->height / 2) *
1477  s->last_pic->f->linesize[2]);
1478  }
1479 
1480  if (s->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f->data[0]) {
1481  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1482  av_frame_unref(s->next_pic->f);
1483  ret = get_buffer(avctx, s->next_pic);
1484  if (ret < 0)
1485  return ret;
1486  memset(s->next_pic->f->data[0], 0, avctx->height * s->next_pic->f->linesize[0]);
1487  memset(s->next_pic->f->data[1], 0x80, (avctx->height / 2) *
1488  s->next_pic->f->linesize[1]);
1489  memset(s->next_pic->f->data[2], 0x80, (avctx->height / 2) *
1490  s->next_pic->f->linesize[2]);
1491  }
1492  }
1493 
1494  if (avctx->debug & FF_DEBUG_PICT_INFO)
1496  "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1498  s->halfpel_flag, s->thirdpel_flag,
1499  s->adaptive_quant, s->qscale, s->slice_num);
1500 
1501  if (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B ||
1503  avctx->skip_frame >= AVDISCARD_ALL)
1504  return 0;
1505 
1506  if (s->next_p_frame_damaged) {
1507  if (s->pict_type == AV_PICTURE_TYPE_B)
1508  return 0;
1509  else
1510  s->next_p_frame_damaged = 0;
1511  }
1512 
1513  if (s->pict_type == AV_PICTURE_TYPE_B) {
1515 
1516  if (s->frame_num_offset < 0)
1517  s->frame_num_offset += 256;
1518  if (s->frame_num_offset == 0 ||
1520  av_log(s->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1521  return -1;
1522  }
1523  } else {
1524  s->prev_frame_num = s->frame_num;
1525  s->frame_num = s->slice_num;
1527 
1528  if (s->prev_frame_num_offset < 0)
1529  s->prev_frame_num_offset += 256;
1530  }
1531 
1532  for (m = 0; m < 2; m++) {
1533  int i;
1534  for (i = 0; i < 4; i++) {
1535  int j;
1536  for (j = -1; j < 4; j++)
1537  s->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1538  if (i < 3)
1539  s->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1540  }
1541  }
1542 
1543  for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
1544  for (s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
1545  unsigned mb_type;
1546  s->mb_xy = s->mb_x + s->mb_y * s->mb_stride;
1547 
1548  if ((get_bits_left(&s->gb_slice)) <= 7) {
1549  if (((get_bits_count(&s->gb_slice) & 7) == 0 ||
1550  show_bits(&s->gb_slice, get_bits_left(&s->gb_slice) & 7) == 0)) {
1551 
1552  if (svq3_decode_slice_header(avctx))
1553  return -1;
1554  }
1555  /* TODO: support s->mb_skip_run */
1556  }
1557 
1558  mb_type = get_interleaved_ue_golomb(&s->gb_slice);
1559 
1560  if (s->pict_type == AV_PICTURE_TYPE_I)
1561  mb_type += 8;
1562  else if (s->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1563  mb_type += 4;
1564  if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1566  "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
1567  return -1;
1568  }
1569 
1570  if (mb_type != 0 || s->cbp)
1571  hl_decode_mb(s);
1572 
1573  if (s->pict_type != AV_PICTURE_TYPE_B && !s->low_delay)
1574  s->cur_pic->mb_type[s->mb_x + s->mb_y * s->mb_stride] =
1575  (s->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1576  }
1577 
1578  ff_draw_horiz_band(avctx, s->cur_pic->f,
1579  s->last_pic->f->data[0] ? s->last_pic->f : NULL,
1580  16 * s->mb_y, 16, PICT_FRAME, 0,
1581  s->low_delay);
1582  }
1583 
1584  left = buf_size*8 - get_bits_count(&s->gb_slice);
1585 
1586  if (s->mb_y != s->mb_height || s->mb_x != s->mb_width) {
1587  av_log(avctx, AV_LOG_INFO, "frame num %d incomplete pic x %d y %d left %d\n", avctx->frame_number, s->mb_y, s->mb_x, left);
1588  //av_hex_dump(stderr, buf+buf_size-8, 8);
1589  }
1590 
1591  if (left < 0) {
1592  av_log(avctx, AV_LOG_ERROR, "frame num %d left %d\n", avctx->frame_number, left);
1593  return -1;
1594  }
1595 
1596  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay)
1597  ret = av_frame_ref(data, s->cur_pic->f);
1598  else if (s->last_pic->f->data[0])
1599  ret = av_frame_ref(data, s->last_pic->f);
1600  if (ret < 0)
1601  return ret;
1602 
1603  /* Do not output the last pic after seeking. */
1604  if (s->last_pic->f->data[0] || s->low_delay)
1605  *got_frame = 1;
1606 
1607  if (s->pict_type != AV_PICTURE_TYPE_B) {
1608  FFSWAP(SVQ3Frame*, s->cur_pic, s->next_pic);
1609  } else {
1610  av_frame_unref(s->cur_pic->f);
1611  }
1612 
1613  return buf_size;
1614 }
1615 
1617 {
1618  SVQ3Context *s = avctx->priv_data;
1619 
1620  free_picture(avctx, s->cur_pic);
1621  free_picture(avctx, s->next_pic);
1622  free_picture(avctx, s->last_pic);
1623  av_frame_free(&s->cur_pic->f);
1624  av_frame_free(&s->next_pic->f);
1625  av_frame_free(&s->last_pic->f);
1626  av_freep(&s->cur_pic);
1627  av_freep(&s->next_pic);
1628  av_freep(&s->last_pic);
1629  av_freep(&s->slice_buf);
1632  av_freep(&s->mb2br_xy);
1633 
1634 
1635  av_freep(&s->buf);
1636  s->buf_size = 0;
1637 
1638  return 0;
1639 }
1640 
1642  .name = "svq3",
1643  .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1644  .type = AVMEDIA_TYPE_VIDEO,
1645  .id = AV_CODEC_ID_SVQ3,
1646  .priv_data_size = sizeof(SVQ3Context),
1648  .close = svq3_decode_end,
1650  .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND |
1653  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,
1654  AV_PIX_FMT_NONE},
1655 };
#define MB_TYPE_INTRA16x16
Definition: avcodec.h:1243
uint8_t pred_mode
Definition: h264data.h:35
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
#define NULL
Definition: coverity.c:32
#define MB_TYPE_SKIP
Definition: avcodec.h:1253
discard all frames except keyframes
Definition: avcodec.h:785
void(* prefetch)(uint8_t *buf, ptrdiff_t stride, int h)
Prefetch memory into cache (if supported by hardware).
Definition: videodsp.h:76
const char * s
Definition: avisynth_c.h:768
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
int cbp
Definition: svq3.c:111
static int shift(int a, int b)
Definition: sonic.c:82
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:124
This structure describes decoded (raw) audio or video data.
Definition: frame.h:184
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
#define C
HpelDSPContext hdsp
Definition: svq3.c:87
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:68
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:247
uint16_t ff_svq1_packet_checksum(const uint8_t *data, const int length, int value)
Definition: svq13.c:60
static int svq3_decode_block(GetBitContext *gb, int16_t *block, int index, const int type)
Definition: svq3.c:298
else temp
Definition: vf_mcdeint.c:259
static void skip_bits_long(GetBitContext *s, int n)
Definition: get_bits.h:204
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
int prev_frame_num
Definition: svq3.c:115
static av_always_inline void svq3_pred_motion(const SVQ3Context *s, int n, int part_width, int list, int ref, int *const mx, int *const my)
Get the predicted MV.
Definition: svq3.c:379
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2396
int size
Definition: avcodec.h:1591
#define MB_TYPE_INTRA4x4
Definition: avcodec.h:1242
int mb_xy
Definition: svq3.c:121
Definition: vf_geq.c:46
const uint8_t * buffer
Definition: get_bits.h:56
int av_log2(unsigned v)
Definition: intmath.c:26
uint8_t * slice_buf
Definition: svq3.c:96
#define INVALID_VLC
Definition: golomb.h:38
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1887
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:411
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:120
int v_edge_pos
Definition: svq3.c:107
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:252
const uint8_t ff_h264_quant_rem6[QP_MAX_NUM+1]
Definition: h264data.c:174
discard all
Definition: avcodec.h:786
uint8_t run
Definition: svq3.c:205
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:3060
#define FULLPEL_MODE
Definition: svq3.c:149
void ff_draw_horiz_band(AVCodecContext *avctx, AVFrame *cur, AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
Definition: mpegutils.c:30
AVCodec.
Definition: avcodec.h:3573
#define AV_WN32A(p, v)
Definition: intreadwrite.h:538
#define AV_COPY32(d, s)
Definition: intreadwrite.h:586
int16_t mb[16 *48 *2]
Definition: svq3.c:142
Macro definitions for various function/variable attributes.
static int svq3_mc_dir(SVQ3Context *s, int size, int mode, int dir, int avg)
Definition: svq3.c:503
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:3284
static int16_t block[64]
Definition: dct.c:113
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:983
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:63
int has_watermark
Definition: svq3.c:100
int thirdpel_flag
Definition: svq3.c:99
int mb_num
Definition: svq3.c:123
const uint8_t ff_h264_dequant4_coeff_init[6][3]
Definition: h264data.c:152
static const uint8_t luma_dc_zigzag_scan[16]
Definition: svq3.c:170
uint8_t
static av_always_inline void hl_decode_mb_idct_luma(SVQ3Context *s, int mb_type, const int *block_offset, int linesize, uint8_t *dest_y)
Definition: svq3.c:616
#define av_cold
Definition: attributes.h:82
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:140
#define mb
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:63
#define DC_PRED8x8
Definition: h264pred.h:68
mode
Definition: f_perms.c:27
int block_offset[2 *(16 *3)]
Definition: svq3.c:146
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:2900
static av_always_inline int svq3_fetch_diagonal_mv(const SVQ3Context *s, const int16_t **C, int i, int list, int part_width)
Definition: svq3.c:358
static av_always_inline int dctcoef_get(int16_t *mb, int index)
Definition: svq3.c:631
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:374
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1774
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:87
const uint8_t ff_h264_chroma_dc_scan[4]
Definition: h264data.c:54
Context for storing H.264 prediction functions.
Definition: h264pred.h:92
void(* pred8x8[4+3+4])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:97
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
Definition: mem.h:101
#define height
uint8_t * data
Definition: avcodec.h:1590
thirdpel DSP context
Definition: tpeldsp.h:42
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:199
const IMbInfo ff_h264_i_mb_type_info[26]
Definition: h264data.c:66
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:91
AVBufferRef * ref_index_buf[2]
Definition: svq3.c:78
const uint8_t ff_h264_golomb_to_inter_cbp[48]
Definition: h264data.c:48
int ff_h264_check_intra4x4_pred_mode(int8_t *pred_mode_cache, void *logctx, int top_samples_available, int left_samples_available)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264_parse.c:113
thirdpel DSP functions
static void fill_rectangle(SDL_Surface *screen, int x, int y, int w, int h, int color, int update)
Definition: ffplay.c:801
ptrdiff_t size
Definition: opengl_enc.c:101
static const uint8_t header[24]
Definition: sdr2.c:67
void(* pred4x4[9+3+3])(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.h:93
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:860
#define A(x)
Definition: vp56_arith.h:28
#define av_log(a,...)
int prev_frame_num_offset
Definition: svq3.c:114
int low_delay
Definition: svq3.c:118
static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
Definition: svq3.c:723
#define U(x)
Definition: vp56_arith.h:37
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:568
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1981
#define HALFPEL_MODE
Definition: svq3.c:150
AVCodecContext * avctx
Definition: svq3.c:83
int8_t * intra4x4_pred_mode
Definition: svq3.c:132
#define AVERROR(e)
Definition: error.h:43
uint8_t * edge_emu_buffer
Definition: svq3.c:138
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:153
av_cold void ff_tpeldsp_init(TpelDSPContext *c)
Definition: tpeldsp.c:312
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:176
const uint8_t ff_zigzag_scan[16+1]
Definition: mathtables.c:109
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1754
int frame_num
Definition: svq3.c:112
int mb_x
Definition: svq3.c:120
static int get_interleaved_se_golomb(GetBitContext *gb)
Definition: golomb.h:222
GLsizei GLsizei * length
Definition: opengl_enc.c:115
unsigned int left_samples_available
Definition: svq3.c:136
const char * name
Name of the codec implementation.
Definition: avcodec.h:3580
#define IS_SKIP(a)
Definition: mpegutils.h:83
int chroma_pred_mode
Definition: svq3.c:128
const uint8_t ff_h264_golomb_to_pict_type[5]
Definition: h264data.c:37
#define PREDICT_MODE
Definition: svq3.c:152
static const uint8_t offset[127][2]
Definition: vf_spp.c:92
static void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.h:226
#define fail()
Definition: checkasm.h:82
unsigned int topright_samples_available
Definition: svq3.c:135
Sorenson Vector Quantizer #1 (SVQ1) video codec.
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:499
Definition: svq3.c:68
useful rectangle filling function
AVBufferRef * motion_val_buf[2]
Definition: svq3.c:71
tpel_mc_func avg_tpel_pixels_tab[11]
Definition: tpeldsp.h:54
Half-pel DSP context.
Definition: hpeldsp.h:45
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: avcodec.h:952
SVQ3Frame * cur_pic
Definition: svq3.c:91
Context for storing H.264 DSP functions.
Definition: h264dsp.h:41
uint32_t dequant4_coeff[QP_MAX_NUM+1][16]
Definition: svq3.c:145
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:258
int16_t(*[2] motion_val)[2]
Definition: svq3.c:72
#define FFMIN(a, b)
Definition: common.h:96
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:74
#define width
int width
picture width / height.
Definition: avcodec.h:1846
int32_t
GetBitContext gb_slice
Definition: svq3.c:95
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:282
static av_cold int svq3_decode_init(AVCodecContext *avctx)
Definition: svq3.c:1130
tpel_mc_func put_tpel_pixels_tab[11]
Thirdpel motion compensation with rounding (a + b + 1) >> 1.
Definition: tpeldsp.h:53
H.264 / AVC / MPEG-4 part10 codec.
int b_stride
Definition: svq3.c:124
H264PredContext hpc
Definition: svq3.c:86
int n
Definition: avisynth_c.h:684
#define src
Definition: vp9dsp.c:530
int last_frame_output
Definition: svq3.c:108
int next_p_frame_damaged
Definition: svq3.c:105
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:459
#define IS_INTRA16x16(a)
Definition: mpegutils.h:78
static const int8_t mv[256][2]
Definition: 4xm.c:77
H264DSPContext h264dsp
Definition: svq3.c:85
Half-pel DSP functions.
AVCodec ff_svq3_decoder
Definition: svq3.c:1641
GetBitContext gb
Definition: svq3.c:94
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
Libavcodec external API header.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:215
int debug
debug
Definition: avcodec.h:2899
int intra16x16_pred_mode
Definition: svq3.c:129
main external API structure.
Definition: avcodec.h:1659
const uint8_t ff_h264_chroma_qp[7][QP_MAX_NUM+1]
Definition: h264data.c:203
uint8_t * data
The data buffer.
Definition: buffer.h:89
#define QP_MAX_NUM
Definition: h264.h:27
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: utils.c:928
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
void * buf
Definition: avisynth_c.h:690
GLint GLenum type
Definition: opengl_enc.c:105
static const uint8_t scan8[16 *3+3]
Definition: h264dec.h:656
int extradata_size
Definition: avcodec.h:1775
void(* pred16x16[4+3+2])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:98
int qscale
Definition: svq3.c:110
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:82
AVBufferRef * mb_type_buf
Definition: svq3.c:74
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:299
const uint8_t ff_h264_golomb_to_intra4x4_cbp[48]
Definition: h264data.c:42
static void skip_bits1(GetBitContext *s)
Definition: get_bits.h:324
int mb_height
Definition: svq3.c:122
enum AVPictureType pict_type
Definition: svq3.c:117
const uint8_t ff_h264_quant_div6[QP_MAX_NUM+1]
Definition: h264data.c:182
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:292
int index
Definition: gxfenc.c:89
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
Definition: h264dec.h:672
static void svq3_mc_dir_part(SVQ3Context *s, int x, int y, int width, int height, int mx, int my, int dxy, int thirdpel, int dir, int avg)
Definition: svq3.c:428
uint32_t * mb_type
Definition: svq3.c:75
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:406
static void init_dequant4_coeff_table(SVQ3Context *s)
Definition: svq3.c:1116
#define MB_TYPE_16x16
Definition: avcodec.h:1245
void(* h264_chroma_dc_dequant_idct)(int16_t *block, int qmul)
Definition: h264dsp.h:103
static int svq3_decode_end(AVCodecContext *avctx)
Definition: svq3.c:1616
#define mid_pred
Definition: mathops.h:96
int8_t ref_cache[2][5 *8]
Definition: svq3.c:141
static const uint8_t svq3_pred_0[25][2]
Definition: svq3.c:177
int mb_y
Definition: svq3.c:120
AVPictureType
Definition: avutil.h:266
#define IS_INTER(a)
Definition: mpegutils.h:81
int slice_num
Definition: svq3.c:109
#define u(width,...)
AVFrame * f
Definition: svq3.c:69
uint8_t * buf
Definition: svq3.c:102
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:262
SVQ3Frame * last_pic
Definition: svq3.c:93
VideoDSPContext vdsp
Definition: svq3.c:89
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:484
static void free_picture(AVCodecContext *avctx, SVQ3Frame *pic)
Definition: svq3.c:1342
uint32_t * mb2br_xy
Definition: svq3.c:126
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:198
uint8_t level
Definition: svq3.c:206
Definition: vp9.h:84
#define AV_ZERO128(d)
Definition: intreadwrite.h:622
static int decode(AVCodecContext *avctx, void *data, int *got_sub, AVPacket *avpkt)
Definition: ccaption_dec.c:752
A reference to a data buffer.
Definition: buffer.h:81
#define avg(a, b, c, d)
discard all non reference
Definition: avcodec.h:782
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:105
static av_always_inline void hl_decode_mb_predict_luma(SVQ3Context *s, int mb_type, const int *block_offset, int linesize, uint8_t *dest_y)
Definition: svq3.c:636
uint8_t non_zero_count_cache[15 *8]
Definition: svq3.c:144
uint8_t cbp
Definition: h264data.h:36
common internal api header.
if(ret< 0)
Definition: vf_mcdeint.c:282
static int get_buffer(AVCodecContext *avctx, SVQ3Frame *pic)
Definition: svq3.c:1354
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
int mb_stride
Definition: svq3.c:123
int ff_h264_check_intra_pred_mode(void *logctx, int top_samples_available, int left_samples_available, int mode, int is_chroma)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264_parse.c:161
int16_t mb_luma_dc[3][16 *2]
Definition: svq3.c:143
int h_edge_pos
Definition: svq3.c:106
Bi-dir predicted.
Definition: avutil.h:270
static void svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
Definition: svq3.c:223
#define stride
int frame_num_offset
Definition: svq3.c:113
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:733
#define IS_INTRA(x, y)
static const uint32_t svq3_dequant_coeff[32]
Definition: svq3.c:214
void * priv_data
Definition: avcodec.h:1701
#define THIRDPEL_MODE
Definition: svq3.c:151
#define PICT_FRAME
Definition: mpegutils.h:39
unsigned int top_samples_available
Definition: svq3.c:134
#define IS_INTRA4x4(a)
Definition: mpegutils.h:77
#define av_free(p)
static void hl_decode_mb(SVQ3Context *s)
Definition: svq3.c:675
static int svq3_decode_slice_header(AVCodecContext *avctx)
Definition: svq3.c:1022
#define PART_NOT_AVAILABLE
Definition: h264dec.h:396
int slice_size
Definition: svq3.c:97
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:253
#define AV_ZERO32(d)
Definition: intreadwrite.h:614
TpelDSPContext tdsp
Definition: svq3.c:88
static const uint8_t svq3_scan[16]
Definition: svq3.c:163
#define AV_RN16A(p)
Definition: intreadwrite.h:522
int8_t intra4x4_pred_mode_cache[5 *8]
Definition: svq3.c:131
int mb_width
Definition: svq3.c:122
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc
av_cold void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
Definition: h264dsp.c:67
static const int8_t svq3_pred_1[6][6][5]
Definition: svq3.c:189
static void svq3_add_idct_c(uint8_t *dst, int16_t *block, int stride, int qp, int dc)
Definition: svq3.c:258
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:2452
#define av_freep(p)
uint32_t watermark_key
Definition: svq3.c:101
static int skip_1stop_8data_bits(GetBitContext *gb)
Definition: get_bits.h:573
#define av_always_inline
Definition: attributes.h:39
SVQ3Frame * next_pic
Definition: svq3.c:92
#define FFSWAP(type, a, b)
Definition: common.h:99
static unsigned get_interleaved_ue_golomb(GetBitContext *gb)
Definition: golomb.h:115
int buf_size
Definition: svq3.c:103
exp golomb vlc stuff
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
This structure stores compressed data.
Definition: avcodec.h:1567
static int svq3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: svq3.c:1402
static const struct @104 svq3_dct_tables[2][16]
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1343
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:958
for(j=16;j >0;--j)
Predicted.
Definition: avutil.h:269
int halfpel_flag
Definition: svq3.c:98
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
int adaptive_quant
Definition: svq3.c:104
int8_t * ref_index[2]
Definition: svq3.c:79
int16_t mv_cache[2][5 *8][2]
Definition: svq3.c:140