FFmpeg
svq1dec.c
Go to the documentation of this file.
1 /*
2  * SVQ1 decoder
3  * ported to MPlayer by Arpi <arpi@thot.banki.hu>
4  * ported to libavcodec by Nick Kurshev <nickols_k@mail.ru>
5  *
6  * Copyright (c) 2002 The Xine project
7  * Copyright (c) 2002 The FFmpeg project
8  *
9  * SVQ1 Encoder (c) 2004 Mike Melanson <melanson@pcisys.net>
10  *
11  * This file is part of FFmpeg.
12  *
13  * FFmpeg is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Lesser General Public
15  * License as published by the Free Software Foundation; either
16  * version 2.1 of the License, or (at your option) any later version.
17  *
18  * FFmpeg is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21  * Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public
24  * License along with FFmpeg; if not, write to the Free Software
25  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26  */
27 
28 /**
29  * @file
30  * Sorenson Vector Quantizer #1 (SVQ1) video codec.
31  * For more information of the SVQ1 algorithm, visit:
32  * http://www.pcisys.net/~melanson/codecs/
33  */
34 
35 #include "avcodec.h"
36 #include "get_bits.h"
37 #include "h263.h"
38 #include "hpeldsp.h"
39 #include "internal.h"
40 #include "mathops.h"
41 #include "svq1.h"
42 
49 
50 /* motion vector (prediction) */
51 typedef struct svq1_pmv_s {
52  int x;
53  int y;
54 } svq1_pmv;
55 
56 typedef struct SVQ1Context {
60 
63 
64  int width;
65  int height;
67  int nonref; // 1 if the current frame won't be referenced
68 } SVQ1Context;
69 
70 static const uint8_t string_table[256] = {
71  0x00, 0xD5, 0x7F, 0xAA, 0xFE, 0x2B, 0x81, 0x54,
72  0x29, 0xFC, 0x56, 0x83, 0xD7, 0x02, 0xA8, 0x7D,
73  0x52, 0x87, 0x2D, 0xF8, 0xAC, 0x79, 0xD3, 0x06,
74  0x7B, 0xAE, 0x04, 0xD1, 0x85, 0x50, 0xFA, 0x2F,
75  0xA4, 0x71, 0xDB, 0x0E, 0x5A, 0x8F, 0x25, 0xF0,
76  0x8D, 0x58, 0xF2, 0x27, 0x73, 0xA6, 0x0C, 0xD9,
77  0xF6, 0x23, 0x89, 0x5C, 0x08, 0xDD, 0x77, 0xA2,
78  0xDF, 0x0A, 0xA0, 0x75, 0x21, 0xF4, 0x5E, 0x8B,
79  0x9D, 0x48, 0xE2, 0x37, 0x63, 0xB6, 0x1C, 0xC9,
80  0xB4, 0x61, 0xCB, 0x1E, 0x4A, 0x9F, 0x35, 0xE0,
81  0xCF, 0x1A, 0xB0, 0x65, 0x31, 0xE4, 0x4E, 0x9B,
82  0xE6, 0x33, 0x99, 0x4C, 0x18, 0xCD, 0x67, 0xB2,
83  0x39, 0xEC, 0x46, 0x93, 0xC7, 0x12, 0xB8, 0x6D,
84  0x10, 0xC5, 0x6F, 0xBA, 0xEE, 0x3B, 0x91, 0x44,
85  0x6B, 0xBE, 0x14, 0xC1, 0x95, 0x40, 0xEA, 0x3F,
86  0x42, 0x97, 0x3D, 0xE8, 0xBC, 0x69, 0xC3, 0x16,
87  0xEF, 0x3A, 0x90, 0x45, 0x11, 0xC4, 0x6E, 0xBB,
88  0xC6, 0x13, 0xB9, 0x6C, 0x38, 0xED, 0x47, 0x92,
89  0xBD, 0x68, 0xC2, 0x17, 0x43, 0x96, 0x3C, 0xE9,
90  0x94, 0x41, 0xEB, 0x3E, 0x6A, 0xBF, 0x15, 0xC0,
91  0x4B, 0x9E, 0x34, 0xE1, 0xB5, 0x60, 0xCA, 0x1F,
92  0x62, 0xB7, 0x1D, 0xC8, 0x9C, 0x49, 0xE3, 0x36,
93  0x19, 0xCC, 0x66, 0xB3, 0xE7, 0x32, 0x98, 0x4D,
94  0x30, 0xE5, 0x4F, 0x9A, 0xCE, 0x1B, 0xB1, 0x64,
95  0x72, 0xA7, 0x0D, 0xD8, 0x8C, 0x59, 0xF3, 0x26,
96  0x5B, 0x8E, 0x24, 0xF1, 0xA5, 0x70, 0xDA, 0x0F,
97  0x20, 0xF5, 0x5F, 0x8A, 0xDE, 0x0B, 0xA1, 0x74,
98  0x09, 0xDC, 0x76, 0xA3, 0xF7, 0x22, 0x88, 0x5D,
99  0xD6, 0x03, 0xA9, 0x7C, 0x28, 0xFD, 0x57, 0x82,
100  0xFF, 0x2A, 0x80, 0x55, 0x01, 0xD4, 0x7E, 0xAB,
101  0x84, 0x51, 0xFB, 0x2E, 0x7A, 0xAF, 0x05, 0xD0,
102  0xAD, 0x78, 0xD2, 0x07, 0x53, 0x86, 0x2C, 0xF9
103 };
104 
105 #define SVQ1_PROCESS_VECTOR() \
106  for (; level > 0; i++) { \
107  /* process next depth */ \
108  if (i == m) { \
109  m = n; \
110  if (--level == 0) \
111  break; \
112  } \
113  /* divide block if next bit set */ \
114  if (!get_bits1(bitbuf)) \
115  break; \
116  /* add child nodes */ \
117  list[n++] = list[i]; \
118  list[n++] = list[i] + (((level & 1) ? pitch : 1) << ((level >> 1) + 1));\
119  }
120 
121 #define SVQ1_ADD_CODEBOOK() \
122  /* add codebook entries to vector */ \
123  for (j = 0; j < stages; j++) { \
124  n3 = codebook[entries[j]] ^ 0x80808080; \
125  n1 += (n3 & 0xFF00FF00) >> 8; \
126  n2 += n3 & 0x00FF00FF; \
127  } \
128  \
129  /* clip to [0..255] */ \
130  if (n1 & 0xFF00FF00) { \
131  n3 = (n1 >> 15 & 0x00010001 | 0x01000100) - 0x00010001; \
132  n1 += 0x7F007F00; \
133  n1 |= (~n1 >> 15 & 0x00010001 | 0x01000100) - 0x00010001; \
134  n1 &= n3 & 0x00FF00FF; \
135  } \
136  \
137  if (n2 & 0xFF00FF00) { \
138  n3 = (n2 >> 15 & 0x00010001 | 0x01000100) - 0x00010001; \
139  n2 += 0x7F007F00; \
140  n2 |= (~n2 >> 15 & 0x00010001 | 0x01000100) - 0x00010001; \
141  n2 &= n3 & 0x00FF00FF; \
142  }
143 
144 #define SVQ1_CALC_CODEBOOK_ENTRIES(cbook) \
145  codebook = (const uint32_t *)cbook[level]; \
146  if (stages > 0) \
147  bit_cache = get_bits(bitbuf, 4 * stages); \
148  /* calculate codebook entries for this vector */ \
149  for (j = 0; j < stages; j++) { \
150  entries[j] = (((bit_cache >> (4 * (stages - j - 1))) & 0xF) + \
151  16 * j) << (level + 1); \
152  } \
153  mean -= stages * 128; \
154  n4 = (mean << 16) + mean;
155 
157  ptrdiff_t pitch)
158 {
159  uint32_t bit_cache;
160  uint8_t *list[63];
161  uint32_t *dst;
162  const uint32_t *codebook;
163  int entries[6];
164  int i, j, m, n;
165  int stages;
166  unsigned mean;
167  unsigned x, y, width, height, level;
168  uint32_t n1, n2, n3, n4;
169 
170  /* initialize list for breadth first processing of vectors */
171  list[0] = pixels;
172 
173  /* recursively process vector */
174  for (i = 0, m = 1, n = 1, level = 5; i < n; i++) {
176 
177  /* destination address and vector size */
178  dst = (uint32_t *)list[i];
179  width = 1 << ((4 + level) / 2);
180  height = 1 << ((3 + level) / 2);
181 
182  /* get number of stages (-1 skips vector, 0 for mean only) */
183  stages = get_vlc2(bitbuf, svq1_intra_multistage[level].table, 3, 3) - 1;
184 
185  if (stages == -1) {
186  for (y = 0; y < height; y++)
187  memset(&dst[y * (pitch / 4)], 0, width);
188  continue; /* skip vector */
189  }
190 
191  if ((stages > 0 && level >= 4)) {
192  ff_dlog(NULL,
193  "Error (svq1_decode_block_intra): invalid vector: stages=%i level=%i\n",
194  stages, level);
195  return AVERROR_INVALIDDATA; /* invalid vector */
196  }
197  av_assert0(stages >= 0);
198 
199  mean = get_vlc2(bitbuf, svq1_intra_mean.table, 8, 3);
200 
201  if (stages == 0) {
202  for (y = 0; y < height; y++)
203  memset(&dst[y * (pitch / 4)], mean, width);
204  } else {
206 
207  for (y = 0; y < height; y++) {
208  for (x = 0; x < width / 4; x++, codebook++) {
209  n1 = n4;
210  n2 = n4;
212  /* store result */
213  dst[x] = n1 << 8 | n2;
214  }
215  dst += pitch / 4;
216  }
217  }
218  }
219 
220  return 0;
221 }
222 
224  ptrdiff_t pitch)
225 {
226  uint32_t bit_cache;
227  uint8_t *list[63];
228  uint32_t *dst;
229  const uint32_t *codebook;
230  int entries[6];
231  int i, j, m, n;
232  int stages;
233  unsigned mean;
234  int x, y, width, height, level;
235  uint32_t n1, n2, n3, n4;
236 
237  /* initialize list for breadth first processing of vectors */
238  list[0] = pixels;
239 
240  /* recursively process vector */
241  for (i = 0, m = 1, n = 1, level = 5; i < n; i++) {
243 
244  /* destination address and vector size */
245  dst = (uint32_t *)list[i];
246  width = 1 << ((4 + level) / 2);
247  height = 1 << ((3 + level) / 2);
248 
249  /* get number of stages (-1 skips vector, 0 for mean only) */
250  stages = get_vlc2(bitbuf, svq1_inter_multistage[level].table, 3, 2) - 1;
251 
252  if (stages == -1)
253  continue; /* skip vector */
254 
255  if ((stages > 0 && level >= 4)) {
256  ff_dlog(NULL,
257  "Error (svq1_decode_block_non_intra): invalid vector: stages=%i level=%i\n",
258  stages, level);
259  return AVERROR_INVALIDDATA; /* invalid vector */
260  }
261  av_assert0(stages >= 0);
262 
263  mean = get_vlc2(bitbuf, svq1_inter_mean.table, 9, 3) - 256;
264 
266 
267  for (y = 0; y < height; y++) {
268  for (x = 0; x < width / 4; x++, codebook++) {
269  n3 = dst[x];
270  /* add mean value to vector */
271  n1 = n4 + ((n3 & 0xFF00FF00) >> 8);
272  n2 = n4 + (n3 & 0x00FF00FF);
274  /* store result */
275  dst[x] = n1 << 8 | n2;
276  }
277  dst += pitch / 4;
278  }
279  }
280  return 0;
281 }
282 
284  svq1_pmv **pmv)
285 {
286  int diff;
287  int i;
288 
289  for (i = 0; i < 2; i++) {
290  /* get motion code */
291  diff = get_vlc2(bitbuf, svq1_motion_component.table, 7, 2);
292  if (diff < 0)
293  return AVERROR_INVALIDDATA;
294  else if (diff) {
295  if (get_bits1(bitbuf))
296  diff = -diff;
297  }
298 
299  /* add median of motion vector predictors and clip result */
300  if (i == 1)
301  mv->y = sign_extend(diff + mid_pred(pmv[0]->y, pmv[1]->y, pmv[2]->y), 6);
302  else
303  mv->x = sign_extend(diff + mid_pred(pmv[0]->x, pmv[1]->x, pmv[2]->x), 6);
304  }
305 
306  return 0;
307 }
308 
309 static void svq1_skip_block(uint8_t *current, uint8_t *previous,
310  ptrdiff_t pitch, int x, int y)
311 {
312  uint8_t *src;
313  uint8_t *dst;
314  int i;
315 
316  src = &previous[x + y * pitch];
317  dst = current;
318 
319  for (i = 0; i < 16; i++) {
320  memcpy(dst, src, 16);
321  src += pitch;
322  dst += pitch;
323  }
324 }
325 
327  uint8_t *current, uint8_t *previous,
328  ptrdiff_t pitch, svq1_pmv *motion, int x, int y,
329  int width, int height)
330 {
331  uint8_t *src;
332  uint8_t *dst;
333  svq1_pmv mv;
334  svq1_pmv *pmv[3];
335  int result;
336 
337  /* predict and decode motion vector */
338  pmv[0] = &motion[0];
339  if (y == 0) {
340  pmv[1] =
341  pmv[2] = pmv[0];
342  } else {
343  pmv[1] = &motion[x / 8 + 2];
344  pmv[2] = &motion[x / 8 + 4];
345  }
346 
347  result = svq1_decode_motion_vector(bitbuf, &mv, pmv);
348  if (result)
349  return result;
350 
351  motion[0].x =
352  motion[x / 8 + 2].x =
353  motion[x / 8 + 3].x = mv.x;
354  motion[0].y =
355  motion[x / 8 + 2].y =
356  motion[x / 8 + 3].y = mv.y;
357 
358  mv.x = av_clip(mv.x, -2 * x, 2 * (width - x - 16));
359  mv.y = av_clip(mv.y, -2 * y, 2 * (height - y - 16));
360 
361  src = &previous[(x + (mv.x >> 1)) + (y + (mv.y >> 1)) * pitch];
362  dst = current;
363 
364  hdsp->put_pixels_tab[0][(mv.y & 1) << 1 | (mv.x & 1)](dst, src, pitch, 16);
365 
366  return 0;
367 }
368 
370  uint8_t *current, uint8_t *previous,
371  ptrdiff_t pitch, svq1_pmv *motion, int x, int y,
372  int width, int height)
373 {
374  uint8_t *src;
375  uint8_t *dst;
376  svq1_pmv mv;
377  svq1_pmv *pmv[4];
378  int i, result;
379 
380  /* predict and decode motion vector (0) */
381  pmv[0] = &motion[0];
382  if (y == 0) {
383  pmv[1] =
384  pmv[2] = pmv[0];
385  } else {
386  pmv[1] = &motion[(x / 8) + 2];
387  pmv[2] = &motion[(x / 8) + 4];
388  }
389 
390  result = svq1_decode_motion_vector(bitbuf, &mv, pmv);
391  if (result)
392  return result;
393 
394  /* predict and decode motion vector (1) */
395  pmv[0] = &mv;
396  if (y == 0) {
397  pmv[1] =
398  pmv[2] = pmv[0];
399  } else {
400  pmv[1] = &motion[(x / 8) + 3];
401  }
402  result = svq1_decode_motion_vector(bitbuf, &motion[0], pmv);
403  if (result)
404  return result;
405 
406  /* predict and decode motion vector (2) */
407  pmv[1] = &motion[0];
408  pmv[2] = &motion[(x / 8) + 1];
409 
410  result = svq1_decode_motion_vector(bitbuf, &motion[(x / 8) + 2], pmv);
411  if (result)
412  return result;
413 
414  /* predict and decode motion vector (3) */
415  pmv[2] = &motion[(x / 8) + 2];
416  pmv[3] = &motion[(x / 8) + 3];
417 
418  result = svq1_decode_motion_vector(bitbuf, pmv[3], pmv);
419  if (result)
420  return result;
421 
422  /* form predictions */
423  for (i = 0; i < 4; i++) {
424  int mvx = pmv[i]->x + (i & 1) * 16;
425  int mvy = pmv[i]->y + (i >> 1) * 16;
426 
427  // FIXME: clipping or padding?
428  mvx = av_clip(mvx, -2 * x, 2 * (width - x - 8));
429  mvy = av_clip(mvy, -2 * y, 2 * (height - y - 8));
430 
431  src = &previous[(x + (mvx >> 1)) + (y + (mvy >> 1)) * pitch];
432  dst = current;
433 
434  hdsp->put_pixels_tab[1][((mvy & 1) << 1) | (mvx & 1)](dst, src, pitch, 8);
435 
436  /* select next block */
437  if (i & 1)
438  current += 8 * (pitch - 1);
439  else
440  current += 8;
441  }
442 
443  return 0;
444 }
445 
447  GetBitContext *bitbuf,
448  uint8_t *current, uint8_t *previous,
449  ptrdiff_t pitch, svq1_pmv *motion, int x, int y,
450  int width, int height)
451 {
452  uint32_t block_type;
453  int result = 0;
454 
455  /* get block type */
456  block_type = get_vlc2(bitbuf, svq1_block_type.table, 2, 2);
457 
458  /* reset motion vectors */
459  if (block_type == SVQ1_BLOCK_SKIP || block_type == SVQ1_BLOCK_INTRA) {
460  motion[0].x =
461  motion[0].y =
462  motion[x / 8 + 2].x =
463  motion[x / 8 + 2].y =
464  motion[x / 8 + 3].x =
465  motion[x / 8 + 3].y = 0;
466  }
467 
468  switch (block_type) {
469  case SVQ1_BLOCK_SKIP:
470  svq1_skip_block(current, previous, pitch, x, y);
471  break;
472 
473  case SVQ1_BLOCK_INTER:
474  result = svq1_motion_inter_block(hdsp, bitbuf, current, previous,
475  pitch, motion, x, y, width, height);
476 
477  if (result != 0) {
478  ff_dlog(avctx, "Error in svq1_motion_inter_block %i\n", result);
479  break;
480  }
481  result = svq1_decode_block_non_intra(bitbuf, current, pitch);
482  break;
483 
484  case SVQ1_BLOCK_INTER_4V:
485  result = svq1_motion_inter_4v_block(hdsp, bitbuf, current, previous,
486  pitch, motion, x, y, width, height);
487 
488  if (result != 0) {
489  ff_dlog(avctx, "Error in svq1_motion_inter_4v_block %i\n", result);
490  break;
491  }
492  result = svq1_decode_block_non_intra(bitbuf, current, pitch);
493  break;
494 
495  case SVQ1_BLOCK_INTRA:
496  result = svq1_decode_block_intra(bitbuf, current, pitch);
497  break;
498  }
499 
500  return result;
501 }
502 
503 static void svq1_parse_string(GetBitContext *bitbuf, uint8_t out[257])
504 {
505  uint8_t seed;
506  int i;
507 
508  out[0] = get_bits(bitbuf, 8);
509  seed = string_table[out[0]];
510 
511  for (i = 1; i <= out[0]; i++) {
512  out[i] = get_bits(bitbuf, 8) ^ seed;
513  seed = string_table[out[i] ^ seed];
514  }
515  out[i] = 0;
516 }
517 
519 {
520  SVQ1Context *s = avctx->priv_data;
521  GetBitContext *bitbuf = &s->gb;
522  int frame_size_code;
523  int width = s->width;
524  int height = s->height;
525 
526  skip_bits(bitbuf, 8); /* temporal_reference */
527 
528  /* frame type */
529  s->nonref = 0;
530  switch (get_bits(bitbuf, 2)) {
531  case 0:
532  frame->pict_type = AV_PICTURE_TYPE_I;
533  break;
534  case 2:
535  s->nonref = 1;
536  case 1:
537  frame->pict_type = AV_PICTURE_TYPE_P;
538  break;
539  default:
540  av_log(avctx, AV_LOG_ERROR, "Invalid frame type.\n");
541  return AVERROR_INVALIDDATA;
542  }
543 
544  if (frame->pict_type == AV_PICTURE_TYPE_I) {
545  /* unknown fields */
546  if (s->frame_code == 0x50 || s->frame_code == 0x60) {
547  int csum = get_bits(bitbuf, 16);
548 
549  csum = ff_svq1_packet_checksum(bitbuf->buffer,
550  bitbuf->size_in_bits >> 3,
551  csum);
552 
553  ff_dlog(avctx, "%s checksum (%02x) for packet data\n",
554  (csum == 0) ? "correct" : "incorrect", csum);
555  }
556 
557  if ((s->frame_code ^ 0x10) >= 0x50) {
558  uint8_t msg[257];
559 
560  svq1_parse_string(bitbuf, msg);
561 
562  av_log(avctx, AV_LOG_INFO,
563  "embedded message:\n%s\n", ((char *)msg) + 1);
564  }
565 
566  skip_bits(bitbuf, 2);
567  skip_bits(bitbuf, 2);
568  skip_bits1(bitbuf);
569 
570  /* load frame size */
571  frame_size_code = get_bits(bitbuf, 3);
572 
573  if (frame_size_code == 7) {
574  /* load width, height (12 bits each) */
575  width = get_bits(bitbuf, 12);
576  height = get_bits(bitbuf, 12);
577 
578  if (!width || !height)
579  return AVERROR_INVALIDDATA;
580  } else {
581  /* get width, height from table */
582  width = ff_svq1_frame_size_table[frame_size_code][0];
583  height = ff_svq1_frame_size_table[frame_size_code][1];
584  }
585  }
586 
587  /* unknown fields */
588  if (get_bits1(bitbuf)) {
589  skip_bits1(bitbuf); /* use packet checksum if (1) */
590  skip_bits1(bitbuf); /* component checksums after image data if (1) */
591 
592  if (get_bits(bitbuf, 2) != 0)
593  return AVERROR_INVALIDDATA;
594  }
595 
596  if (get_bits1(bitbuf)) {
597  skip_bits1(bitbuf);
598  skip_bits(bitbuf, 4);
599  skip_bits1(bitbuf);
600  skip_bits(bitbuf, 2);
601 
602  if (skip_1stop_8data_bits(bitbuf) < 0)
603  return AVERROR_INVALIDDATA;
604  }
605 
606  s->width = width;
607  s->height = height;
608  return 0;
609 }
610 
611 static int svq1_decode_frame(AVCodecContext *avctx, void *data,
612  int *got_frame, AVPacket *avpkt)
613 {
614  const uint8_t *buf = avpkt->data;
615  int buf_size = avpkt->size;
616  SVQ1Context *s = avctx->priv_data;
617  AVFrame *cur = data;
618  uint8_t *current;
619  int result, i, x, y, width, height;
620  svq1_pmv *pmv;
621  int ret;
622 
623  /* initialize bit buffer */
624  ret = init_get_bits8(&s->gb, buf, buf_size);
625  if (ret < 0)
626  return ret;
627 
628  /* decode frame header */
629  s->frame_code = get_bits(&s->gb, 22);
630 
631  if ((s->frame_code & ~0x70) || !(s->frame_code & 0x60))
632  return AVERROR_INVALIDDATA;
633 
634  /* swap some header bytes (why?) */
635  if (s->frame_code != 0x20) {
636  uint32_t *src;
637 
638  if (buf_size < 9 * 4) {
639  av_log(avctx, AV_LOG_ERROR, "Input packet too small\n");
640  return AVERROR_INVALIDDATA;
641  }
642 
645  buf_size);
646  if (!s->pkt_swapped)
647  return AVERROR(ENOMEM);
648 
649  memcpy(s->pkt_swapped, buf, buf_size);
650  buf = s->pkt_swapped;
651  init_get_bits(&s->gb, buf, buf_size * 8);
652  skip_bits(&s->gb, 22);
653 
654  src = (uint32_t *)(s->pkt_swapped + 4);
655 
656  for (i = 0; i < 4; i++)
657  src[i] = ((src[i] << 16) | (src[i] >> 16)) ^ src[7 - i];
658  }
659 
660  result = svq1_decode_frame_header(avctx, cur);
661  if (result != 0) {
662  ff_dlog(avctx, "Error in svq1_decode_frame_header %i\n", result);
663  return result;
664  }
665 
666  result = ff_set_dimensions(avctx, s->width, s->height);
667  if (result < 0)
668  return result;
669 
670  if ((avctx->skip_frame >= AVDISCARD_NONREF && s->nonref) ||
671  (avctx->skip_frame >= AVDISCARD_NONKEY &&
672  cur->pict_type != AV_PICTURE_TYPE_I) ||
673  avctx->skip_frame >= AVDISCARD_ALL)
674  return buf_size;
675 
676  result = ff_get_buffer(avctx, cur, s->nonref ? 0 : AV_GET_BUFFER_FLAG_REF);
677  if (result < 0)
678  return result;
679 
680  pmv = av_malloc((FFALIGN(s->width, 16) / 8 + 3) * sizeof(*pmv));
681  if (!pmv)
682  return AVERROR(ENOMEM);
683 
684  /* decode y, u and v components */
685  for (i = 0; i < 3; i++) {
686  int linesize = cur->linesize[i];
687  if (i == 0) {
688  width = FFALIGN(s->width, 16);
689  height = FFALIGN(s->height, 16);
690  } else {
691  if (avctx->flags & AV_CODEC_FLAG_GRAY)
692  break;
693  width = FFALIGN(s->width / 4, 16);
694  height = FFALIGN(s->height / 4, 16);
695  }
696 
697  current = cur->data[i];
698 
699  if (cur->pict_type == AV_PICTURE_TYPE_I) {
700  /* keyframe */
701  for (y = 0; y < height; y += 16) {
702  for (x = 0; x < width; x += 16) {
703  result = svq1_decode_block_intra(&s->gb, &current[x],
704  linesize);
705  if (result) {
706  av_log(avctx, AV_LOG_ERROR,
707  "Error in svq1_decode_block %i (keyframe)\n",
708  result);
709  goto err;
710  }
711  }
712  current += 16 * linesize;
713  }
714  } else {
715  /* delta frame */
716  uint8_t *previous = s->prev->data[i];
717  if (!previous ||
718  s->prev->width != s->width || s->prev->height != s->height) {
719  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
720  result = AVERROR_INVALIDDATA;
721  goto err;
722  }
723 
724  memset(pmv, 0, ((width / 8) + 3) * sizeof(svq1_pmv));
725 
726  for (y = 0; y < height; y += 16) {
727  for (x = 0; x < width; x += 16) {
728  result = svq1_decode_delta_block(avctx, &s->hdsp,
729  &s->gb, &current[x],
730  previous, linesize,
731  pmv, x, y, width, height);
732  if (result != 0) {
733  ff_dlog(avctx,
734  "Error in svq1_decode_delta_block %i\n",
735  result);
736  goto err;
737  }
738  }
739 
740  pmv[0].x =
741  pmv[0].y = 0;
742 
743  current += 16 * linesize;
744  }
745  }
746  }
747 
748  if (!s->nonref) {
749  av_frame_unref(s->prev);
750  result = av_frame_ref(s->prev, cur);
751  if (result < 0)
752  goto err;
753  }
754 
755  *got_frame = 1;
756  result = buf_size;
757 
758 err:
759  av_free(pmv);
760  return result;
761 }
762 
764 {
765  SVQ1Context *s = avctx->priv_data;
766  int i;
767  int offset = 0;
768 
769  s->prev = av_frame_alloc();
770  if (!s->prev)
771  return AVERROR(ENOMEM);
772 
773  s->width = avctx->width + 3 & ~3;
774  s->height = avctx->height + 3 & ~3;
775  avctx->pix_fmt = AV_PIX_FMT_YUV410P;
776 
777  ff_hpeldsp_init(&s->hdsp, avctx->flags);
778 
779  INIT_VLC_STATIC(&svq1_block_type, 2, 4,
780  &ff_svq1_block_type_vlc[0][1], 2, 1,
781  &ff_svq1_block_type_vlc[0][0], 2, 1, 6);
782 
783  INIT_VLC_STATIC(&svq1_motion_component, 7, 33,
784  &ff_mvtab[0][1], 2, 1,
785  &ff_mvtab[0][0], 2, 1, 176);
786 
787  for (i = 0; i < 6; i++) {
788  static const uint8_t sizes[2][6] = { { 14, 10, 14, 18, 16, 18 },
789  { 10, 10, 14, 14, 14, 16 } };
790  static VLC_TYPE table[168][2];
791  svq1_intra_multistage[i].table = &table[offset];
792  svq1_intra_multistage[i].table_allocated = sizes[0][i];
793  offset += sizes[0][i];
794  init_vlc(&svq1_intra_multistage[i], 3, 8,
795  &ff_svq1_intra_multistage_vlc[i][0][1], 2, 1,
796  &ff_svq1_intra_multistage_vlc[i][0][0], 2, 1,
798  svq1_inter_multistage[i].table = &table[offset];
799  svq1_inter_multistage[i].table_allocated = sizes[1][i];
800  offset += sizes[1][i];
801  init_vlc(&svq1_inter_multistage[i], 3, 8,
802  &ff_svq1_inter_multistage_vlc[i][0][1], 2, 1,
803  &ff_svq1_inter_multistage_vlc[i][0][0], 2, 1,
805  }
806 
807  INIT_VLC_STATIC(&svq1_intra_mean, 8, 256,
808  &ff_svq1_intra_mean_vlc[0][1], 4, 2,
809  &ff_svq1_intra_mean_vlc[0][0], 4, 2, 632);
810 
811  INIT_VLC_STATIC(&svq1_inter_mean, 9, 512,
812  &ff_svq1_inter_mean_vlc[0][1], 4, 2,
813  &ff_svq1_inter_mean_vlc[0][0], 4, 2, 1434);
814 
815  return 0;
816 }
817 
819 {
820  SVQ1Context *s = avctx->priv_data;
821 
822  av_frame_free(&s->prev);
823  av_freep(&s->pkt_swapped);
824  s->pkt_swapped_allocated = 0;
825 
826  return 0;
827 }
828 
829 static void svq1_flush(AVCodecContext *avctx)
830 {
831  SVQ1Context *s = avctx->priv_data;
832 
833  av_frame_unref(s->prev);
834 }
835 
837  .name = "svq1",
838  .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 1 / Sorenson Video 1 / SVQ1"),
839  .type = AVMEDIA_TYPE_VIDEO,
840  .id = AV_CODEC_ID_SVQ1,
841  .priv_data_size = sizeof(SVQ1Context),
843  .close = svq1_decode_end,
845  .capabilities = AV_CODEC_CAP_DR1,
846  .flush = svq1_flush,
847  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV410P,
848  AV_PIX_FMT_NONE },
849 };
#define NULL
Definition: coverity.c:32
discard all frames except keyframes
Definition: avcodec.h:810
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static VLC svq1_inter_multistage[6]
Definition: svq1dec.c:46
This structure describes decoded (raw) audio or video data.
Definition: frame.h:268
static VLC svq1_motion_component
Definition: svq1dec.c:44
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
static void flush(AVCodecContext *avctx)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
uint16_t ff_svq1_packet_checksum(const uint8_t *data, const int length, int value)
Definition: svq13.c:60
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
static void svq1_skip_block(uint8_t *current, uint8_t *previous, ptrdiff_t pitch, int x, int y)
Definition: svq1dec.c:309
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
#define INIT_VLC_STATIC(vlc, bits, a, b, c, d, e, f, g, static_size)
Definition: vlc.h:75
int size
Definition: avcodec.h:1478
const uint8_t * buffer
Definition: get_bits.h:62
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1775
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:70
discard all
Definition: avcodec.h:811
#define SVQ1_BLOCK_INTRA
Definition: svq1.h:43
#define src
Definition: vp8dsp.c:254
AVCodec.
Definition: avcodec.h:3477
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:42
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:3036
const uint16_t ff_svq1_frame_size_table[7][2]
Definition: svq1.c:40
#define SVQ1_BLOCK_SKIP
Definition: svq1.h:40
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
static av_cold int svq1_decode_end(AVCodecContext *avctx)
Definition: svq1dec.c:818
uint8_t
#define av_cold
Definition: attributes.h:82
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
static VLC svq1_intra_mean
Definition: svq1dec.c:47
const int8_t *const ff_svq1_inter_codebooks[6]
Definition: svq1_cb.h:776
static VLC svq1_intra_multistage[6]
Definition: svq1dec.c:45
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
static int svq1_decode_block_intra(GetBitContext *bitbuf, uint8_t *pixels, ptrdiff_t pitch)
Definition: svq1dec.c:156
#define height
uint8_t * data
Definition: avcodec.h:1477
#define ff_dlog(a,...)
bitstream reader API header.
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:883
#define FFALIGN(x, a)
Definition: macros.h:48
#define av_log(a,...)
static const uint16_t table[]
Definition: prosumer.c:206
#define SVQ1_ADD_CODEBOOK()
Definition: svq1dec.c:121
static int svq1_decode_frame_header(AVCodecContext *avctx, AVFrame *frame)
Definition: svq1dec.c:518
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:260
int width
Definition: frame.h:326
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define init_vlc(vlc, nb_bits, nb_codes,bits, bits_wrap, bits_size,codes, codes_wrap, codes_size,flags)
Definition: vlc.h:38
static const int sizes[][2]
Definition: img2dec.c:53
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
#define SVQ1_CALC_CODEBOOK_ENTRIES(cbook)
Definition: svq1dec.c:144
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1645
const char * name
Name of the codec implementation.
Definition: avcodec.h:3484
#define SVQ1_BLOCK_INTER
Definition: svq1.h:41
Sorenson Vector Quantizer #1 (SVQ1) video codec.
Definition: vlc.h:26
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
Half-pel DSP context.
Definition: hpeldsp.h:45
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:351
int width
Definition: svq1dec.c:64
const uint8_t ff_svq1_block_type_vlc[4][2]
Definition: svq1_vlc.h:27
#define width
int width
picture width / height.
Definition: avcodec.h:1738
HpelDSPContext hdsp
Definition: svq1dec.c:57
int size_in_bits
Definition: get_bits.h:68
static int svq1_motion_inter_block(HpelDSPContext *hdsp, GetBitContext *bitbuf, uint8_t *current, uint8_t *previous, ptrdiff_t pitch, svq1_pmv *motion, int x, int y, int width, int height)
Definition: svq1dec.c:326
uint8_t * pkt_swapped
Definition: svq1dec.c:61
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
static int svq1_decode_delta_block(AVCodecContext *avctx, HpelDSPContext *hdsp, GetBitContext *bitbuf, uint8_t *current, uint8_t *previous, ptrdiff_t pitch, svq1_pmv *motion, int x, int y, int width, int height)
Definition: svq1dec.c:446
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:797
int n
Definition: avisynth_c.h:760
static void svq1_flush(AVCodecContext *avctx)
Definition: svq1dec.c:829
int pkt_swapped_allocated
Definition: svq1dec.c:62
static void svq1_parse_string(GetBitContext *bitbuf, uint8_t out[257])
Definition: svq1dec.c:503
const uint8_t ff_svq1_inter_multistage_vlc[6][8][2]
Definition: svq1_vlc.h:50
int table_allocated
Definition: vlc.h:29
static const int8_t mv[256][2]
Definition: 4xm.c:77
Half-pel DSP functions.
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
Libavcodec external API header.
AVFrame * prev
Definition: svq1dec.c:59
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:299
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
main external API structure.
Definition: avcodec.h:1565
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
static unsigned int seed
Definition: videogen.c:78
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1964
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
void * buf
Definition: avisynth_c.h:766
static VLC svq1_inter_mean
Definition: svq1dec.c:48
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
static void skip_bits1(GetBitContext *s)
Definition: get_bits.h:538
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
GetBitContext gb
Definition: svq1dec.c:58
#define mid_pred
Definition: mathops.h:97
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:130
int height
Definition: svq1dec.c:65
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
#define SVQ1_BLOCK_INTER_4V
Definition: svq1.h:42
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:282
uint8_t level
Definition: svq3.c:207
discard all non reference
Definition: avcodec.h:807
static int svq1_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: svq1dec.c:611
const int8_t *const ff_svq1_intra_codebooks[6]
Definition: svq1_cb.h:1519
common internal api header.
const uint8_t ff_svq1_intra_multistage_vlc[6][8][2]
Definition: svq1_vlc.h:33
#define INIT_VLC_USE_NEW_STATIC
Definition: vlc.h:55
int frame_code
Definition: svq1dec.c:66
static const uint8_t string_table[256]
Definition: svq1dec.c:70
static int svq1_decode_motion_vector(GetBitContext *bitbuf, svq1_pmv *mv, svq1_pmv **pmv)
Definition: svq1dec.c:283
const uint16_t ff_svq1_inter_mean_vlc[512][2]
Definition: svq1_vlc.h:136
static int svq1_motion_inter_4v_block(HpelDSPContext *hdsp, GetBitContext *bitbuf, uint8_t *current, uint8_t *previous, ptrdiff_t pitch, svq1_pmv *motion, int x, int y, int width, int height)
Definition: svq1dec.c:369
int y
Definition: svq1dec.c:53
void * priv_data
Definition: avcodec.h:1592
static av_always_inline int diff(const uint32_t a, const uint32_t b)
#define av_free(p)
int pixels
Definition: avisynth_c.h:390
#define SVQ1_PROCESS_VECTOR()
Definition: svq1dec.c:105
int nonref
Definition: svq1dec.c:67
VLC_TYPE(* table)[2]
code, bits
Definition: vlc.h:28
static int svq1_decode_block_non_intra(GetBitContext *bitbuf, uint8_t *pixels, ptrdiff_t pitch)
Definition: svq1dec.c:223
static av_cold int svq1_decode_init(AVCodecContext *avctx)
Definition: svq1dec.c:763
and forward the result(frame or status change) to the corresponding input.If nothing is possible
int height
Definition: frame.h:326
FILE * out
Definition: movenc.c:54
#define av_freep(p)
static int skip_1stop_8data_bits(GetBitContext *gb)
Definition: get_bits.h:854
#define VLC_TYPE
Definition: vlc.h:24
static VLC svq1_block_type
Definition: svq1dec.c:43
AVCodec ff_svq1_decoder
Definition: svq1dec.c:836
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
This structure stores compressed data.
Definition: avcodec.h:1454
const uint16_t ff_svq1_intra_mean_vlc[256][2]
Definition: svq1_vlc.h:67
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1176
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:981
Predicted.
Definition: avutil.h:275
const uint8_t ff_mvtab[33][2]
Definition: h263data.c:90
int x
Definition: svq1dec.c:52