FFmpeg
huffyuvdec.c
Go to the documentation of this file.
1 /*
2  * huffyuv decoder
3  *
4  * Copyright (c) 2002-2014 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
7  * the algorithm used
8  *
9  * This file is part of FFmpeg.
10  *
11  * FFmpeg is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public
13  * License as published by the Free Software Foundation; either
14  * version 2.1 of the License, or (at your option) any later version.
15  *
16  * FFmpeg is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with FFmpeg; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24  *
25  * yuva, gray, 4:4:4, 4:1:1, 4:1:0 and >8 bit per sample support sponsored by NOA
26  */
27 
28 /**
29  * @file
30  * huffyuv decoder
31  */
32 
33 #define UNCHECKED_BITSTREAM_READER 1
34 
35 #include "avcodec.h"
36 #include "get_bits.h"
37 #include "huffyuv.h"
38 #include "huffyuvdsp.h"
39 #include "lossless_videodsp.h"
40 #include "thread.h"
41 #include "libavutil/imgutils.h"
42 #include "libavutil/pixdesc.h"
43 
44 #define classic_shift_luma_table_size 42
46  34, 36, 35, 69, 135, 232, 9, 16, 10, 24, 11, 23, 12, 16, 13, 10,
47  14, 8, 15, 8, 16, 8, 17, 20, 16, 10, 207, 206, 205, 236, 11, 8,
48  10, 21, 9, 23, 8, 8, 199, 70, 69, 68, 0,
49  0,0,0,0,0,0,0,0,
50 };
51 
52 #define classic_shift_chroma_table_size 59
54  66, 36, 37, 38, 39, 40, 41, 75, 76, 77, 110, 239, 144, 81, 82, 83,
55  84, 85, 118, 183, 56, 57, 88, 89, 56, 89, 154, 57, 58, 57, 26, 141,
56  57, 56, 58, 57, 58, 57, 184, 119, 214, 245, 116, 83, 82, 49, 80, 79,
57  78, 77, 44, 75, 41, 40, 39, 38, 37, 36, 34, 0,
58  0,0,0,0,0,0,0,0,
59 };
60 
61 static const unsigned char classic_add_luma[256] = {
62  3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
63  73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
64  68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
65  35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
66  37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
67  35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
68  27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
69  15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
70  12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
71  12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
72  18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
73  28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
74  28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
75  62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
76  54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
77  46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
78 };
79 
80 static const unsigned char classic_add_chroma[256] = {
81  3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
82  7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
83  11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
84  43, 45, 76, 81, 46, 82, 75, 55, 56, 144, 58, 80, 60, 74, 147, 63,
85  143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
86  80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
87  17, 14, 5, 6, 100, 54, 47, 50, 51, 53, 106, 107, 108, 109, 110, 111,
88  112, 113, 114, 115, 4, 117, 118, 92, 94, 121, 122, 3, 124, 103, 2, 1,
89  0, 129, 130, 131, 120, 119, 126, 125, 136, 137, 138, 139, 140, 141, 142, 134,
90  135, 132, 133, 104, 64, 101, 62, 57, 102, 95, 93, 59, 61, 28, 97, 96,
91  52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
92  19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
93  7, 128, 127, 105, 123, 116, 35, 34, 33, 145, 31, 79, 42, 146, 78, 26,
94  83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
95  14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
96  6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
97 };
98 
99 static int read_len_table(uint8_t *dst, GetBitContext *gb, int n)
100 {
101  int i, val, repeat;
102 
103  for (i = 0; i < n;) {
104  repeat = get_bits(gb, 3);
105  val = get_bits(gb, 5);
106  if (repeat == 0)
107  repeat = get_bits(gb, 8);
108  if (i + repeat > n || get_bits_left(gb) < 0) {
109  av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
110  return AVERROR_INVALIDDATA;
111  }
112  while (repeat--)
113  dst[i++] = val;
114  }
115  return 0;
116 }
117 
119 {
120  int ret;
121  uint16_t *symbols = av_mallocz(5 << VLC_BITS);
122  uint16_t *bits;
123  uint8_t *len;
124  if (!symbols)
125  return AVERROR(ENOMEM);
126  bits = symbols + (1 << VLC_BITS);
127  len = (uint8_t *)(bits + (1 << VLC_BITS));
128 
129  if (s->bitstream_bpp < 24 || s->version > 2) {
130  int p, i, y, u;
131  for (p = 0; p < 4; p++) {
132  int p0 = s->version > 2 ? p : 0;
133  for (i = y = 0; y < s->vlc_n; y++) {
134  int len0 = s->len[p0][y];
135  int limit = VLC_BITS - len0;
136  if (limit <= 0 || !len0)
137  continue;
138  if ((sign_extend(y, 8) & (s->vlc_n-1)) != y)
139  continue;
140  for (u = 0; u < s->vlc_n; u++) {
141  int len1 = s->len[p][u];
142  if (len1 > limit || !len1)
143  continue;
144  if ((sign_extend(u, 8) & (s->vlc_n-1)) != u)
145  continue;
146  av_assert0(i < (1 << VLC_BITS));
147  len[i] = len0 + len1;
148  bits[i] = (s->bits[p0][y] << len1) + s->bits[p][u];
149  symbols[i] = (y << 8) + (u & 0xFF);
150  i++;
151  }
152  }
153  ff_free_vlc(&s->vlc[4 + p]);
154  if ((ret = ff_init_vlc_sparse(&s->vlc[4 + p], VLC_BITS, i, len, 1, 1,
155  bits, 2, 2, symbols, 2, 2, 0)) < 0)
156  goto out;
157  }
158  } else {
159  uint8_t (*map)[4] = (uint8_t(*)[4]) s->pix_bgr_map;
160  int i, b, g, r, code;
161  int p0 = s->decorrelate;
162  int p1 = !s->decorrelate;
163  /* Restrict the range to +/-16 because that's pretty much guaranteed
164  * to cover all the combinations that fit in 11 bits total, and it
165  * does not matter if we miss a few rare codes. */
166  for (i = 0, g = -16; g < 16; g++) {
167  int len0 = s->len[p0][g & 255];
168  int limit0 = VLC_BITS - len0;
169  if (limit0 < 2 || !len0)
170  continue;
171  for (b = -16; b < 16; b++) {
172  int len1 = s->len[p1][b & 255];
173  int limit1 = limit0 - len1;
174  if (limit1 < 1 || !len1)
175  continue;
176  code = (s->bits[p0][g & 255] << len1) + s->bits[p1][b & 255];
177  for (r = -16; r < 16; r++) {
178  int len2 = s->len[2][r & 255];
179  if (len2 > limit1 || !len2)
180  continue;
181  av_assert0(i < (1 << VLC_BITS));
182  len[i] = len0 + len1 + len2;
183  bits[i] = (code << len2) + s->bits[2][r & 255];
184  if (s->decorrelate) {
185  map[i][G] = g;
186  map[i][B] = g + b;
187  map[i][R] = g + r;
188  } else {
189  map[i][B] = g;
190  map[i][G] = b;
191  map[i][R] = r;
192  }
193  i++;
194  }
195  }
196  }
197  ff_free_vlc(&s->vlc[4]);
198  if ((ret = init_vlc(&s->vlc[4], VLC_BITS, i, len, 1, 1,
199  bits, 2, 2, 0)) < 0)
200  goto out;
201  }
202  ret = 0;
203 out:
204  av_freep(&symbols);
205  return ret;
206 }
207 
209 {
210  GetBitContext gb;
211  int i, ret;
212  int count = 3;
213 
214  if ((ret = init_get_bits(&gb, src, length * 8)) < 0)
215  return ret;
216 
217  if (s->version > 2)
218  count = 1 + s->alpha + 2*s->chroma;
219 
220  for (i = 0; i < count; i++) {
221  if ((ret = read_len_table(s->len[i], &gb, s->vlc_n)) < 0)
222  return ret;
223  if ((ret = ff_huffyuv_generate_bits_table(s->bits[i], s->len[i], s->vlc_n)) < 0)
224  return ret;
225  ff_free_vlc(&s->vlc[i]);
226  if ((ret = init_vlc(&s->vlc[i], VLC_BITS, s->vlc_n, s->len[i], 1, 1,
227  s->bits[i], 4, 4, 0)) < 0)
228  return ret;
229  }
230 
231  if ((ret = generate_joint_tables(s)) < 0)
232  return ret;
233 
234  return (get_bits_count(&gb) + 7) / 8;
235 }
236 
238 {
239  GetBitContext gb;
240  int i, ret;
241 
244  if ((ret = read_len_table(s->len[0], &gb, 256)) < 0)
245  return ret;
246 
249  if ((ret = read_len_table(s->len[1], &gb, 256)) < 0)
250  return ret;
251 
252  for (i = 0; i < 256; i++)
253  s->bits[0][i] = classic_add_luma[i];
254  for (i = 0; i < 256; i++)
255  s->bits[1][i] = classic_add_chroma[i];
256 
257  if (s->bitstream_bpp >= 24) {
258  memcpy(s->bits[1], s->bits[0], 256 * sizeof(uint32_t));
259  memcpy(s->len[1], s->len[0], 256 * sizeof(uint8_t));
260  }
261  memcpy(s->bits[2], s->bits[1], 256 * sizeof(uint32_t));
262  memcpy(s->len[2], s->len[1], 256 * sizeof(uint8_t));
263 
264  for (i = 0; i < 4; i++) {
265  ff_free_vlc(&s->vlc[i]);
266  if ((ret = init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1,
267  s->bits[i], 4, 4, 0)) < 0)
268  return ret;
269  }
270 
271  if ((ret = generate_joint_tables(s)) < 0)
272  return ret;
273 
274  return 0;
275 }
276 
278 {
279  HYuvContext *s = avctx->priv_data;
280  int i;
281 
284 
285  for (i = 0; i < 8; i++)
286  ff_free_vlc(&s->vlc[i]);
287 
288  return 0;
289 }
290 
292 {
293  HYuvContext *s = avctx->priv_data;
294  int ret;
295 
296  ret = av_image_check_size(avctx->width, avctx->height, 0, avctx);
297  if (ret < 0)
298  return ret;
299 
300  ff_huffyuvdsp_init(&s->hdsp, avctx->pix_fmt);
302  memset(s->vlc, 0, 4 * sizeof(VLC));
303 
304  s->interlaced = avctx->height > 288;
305  s->bgr32 = 1;
306 
307  if (avctx->extradata_size) {
308  if ((avctx->bits_per_coded_sample & 7) &&
309  avctx->bits_per_coded_sample != 12)
310  s->version = 1; // do such files exist at all?
311  else if (avctx->extradata_size > 3 && avctx->extradata[3] == 0)
312  s->version = 2;
313  else
314  s->version = 3;
315  } else
316  s->version = 0;
317 
318  s->bps = 8;
319  s->n = 1<<s->bps;
320  s->vlc_n = FFMIN(s->n, MAX_VLC_N);
321  s->chroma = 1;
322  if (s->version >= 2) {
323  int method, interlace;
324 
325  if (avctx->extradata_size < 4)
326  return AVERROR_INVALIDDATA;
327 
328  method = avctx->extradata[0];
329  s->decorrelate = method & 64 ? 1 : 0;
330  s->predictor = method & 63;
331  if (s->version == 2) {
332  s->bitstream_bpp = avctx->extradata[1];
333  if (s->bitstream_bpp == 0)
334  s->bitstream_bpp = avctx->bits_per_coded_sample & ~7;
335  } else {
336  s->bps = (avctx->extradata[1] >> 4) + 1;
337  s->n = 1<<s->bps;
338  s->vlc_n = FFMIN(s->n, MAX_VLC_N);
339  s->chroma_h_shift = avctx->extradata[1] & 3;
340  s->chroma_v_shift = (avctx->extradata[1] >> 2) & 3;
341  s->yuv = !!(avctx->extradata[2] & 1);
342  s->chroma= !!(avctx->extradata[2] & 3);
343  s->alpha = !!(avctx->extradata[2] & 4);
344  }
345  interlace = (avctx->extradata[2] & 0x30) >> 4;
346  s->interlaced = (interlace == 1) ? 1 : (interlace == 2) ? 0 : s->interlaced;
347  s->context = avctx->extradata[2] & 0x40 ? 1 : 0;
348 
349  if ((ret = read_huffman_tables(s, avctx->extradata + 4,
350  avctx->extradata_size - 4)) < 0)
351  goto error;
352  } else {
353  switch (avctx->bits_per_coded_sample & 7) {
354  case 1:
355  s->predictor = LEFT;
356  s->decorrelate = 0;
357  break;
358  case 2:
359  s->predictor = LEFT;
360  s->decorrelate = 1;
361  break;
362  case 3:
363  s->predictor = PLANE;
364  s->decorrelate = avctx->bits_per_coded_sample >= 24;
365  break;
366  case 4:
367  s->predictor = MEDIAN;
368  s->decorrelate = 0;
369  break;
370  default:
371  s->predictor = LEFT; // OLD
372  s->decorrelate = 0;
373  break;
374  }
375  s->bitstream_bpp = avctx->bits_per_coded_sample & ~7;
376  s->context = 0;
377 
378  if ((ret = read_old_huffman_tables(s)) < 0)
379  goto error;
380  }
381 
382  if (s->version <= 2) {
383  switch (s->bitstream_bpp) {
384  case 12:
385  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
386  s->yuv = 1;
387  break;
388  case 16:
389  if (s->yuy2)
390  avctx->pix_fmt = AV_PIX_FMT_YUYV422;
391  else
392  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
393  s->yuv = 1;
394  break;
395  case 24:
396  if (s->bgr32)
397  avctx->pix_fmt = AV_PIX_FMT_0RGB32;
398  else
399  avctx->pix_fmt = AV_PIX_FMT_BGR24;
400  break;
401  case 32:
402  av_assert0(s->bgr32);
403  avctx->pix_fmt = AV_PIX_FMT_RGB32;
404  s->alpha = 1;
405  break;
406  default:
407  ret = AVERROR_INVALIDDATA;
408  goto error;
409  }
411  &s->chroma_h_shift,
412  &s->chroma_v_shift);
413  } else {
414  switch ( (s->chroma<<10) | (s->yuv<<9) | (s->alpha<<8) | ((s->bps-1)<<4) | s->chroma_h_shift | (s->chroma_v_shift<<2)) {
415  case 0x070:
416  avctx->pix_fmt = AV_PIX_FMT_GRAY8;
417  break;
418  case 0x0F0:
419  avctx->pix_fmt = AV_PIX_FMT_GRAY16;
420  break;
421  case 0x170:
422  avctx->pix_fmt = AV_PIX_FMT_GRAY8A;
423  break;
424  case 0x470:
425  avctx->pix_fmt = AV_PIX_FMT_GBRP;
426  break;
427  case 0x480:
428  avctx->pix_fmt = AV_PIX_FMT_GBRP9;
429  break;
430  case 0x490:
431  avctx->pix_fmt = AV_PIX_FMT_GBRP10;
432  break;
433  case 0x4B0:
434  avctx->pix_fmt = AV_PIX_FMT_GBRP12;
435  break;
436  case 0x4D0:
437  avctx->pix_fmt = AV_PIX_FMT_GBRP14;
438  break;
439  case 0x4F0:
440  avctx->pix_fmt = AV_PIX_FMT_GBRP16;
441  break;
442  case 0x570:
443  avctx->pix_fmt = AV_PIX_FMT_GBRAP;
444  break;
445  case 0x670:
446  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
447  break;
448  case 0x680:
449  avctx->pix_fmt = AV_PIX_FMT_YUV444P9;
450  break;
451  case 0x690:
452  avctx->pix_fmt = AV_PIX_FMT_YUV444P10;
453  break;
454  case 0x6B0:
455  avctx->pix_fmt = AV_PIX_FMT_YUV444P12;
456  break;
457  case 0x6D0:
458  avctx->pix_fmt = AV_PIX_FMT_YUV444P14;
459  break;
460  case 0x6F0:
461  avctx->pix_fmt = AV_PIX_FMT_YUV444P16;
462  break;
463  case 0x671:
464  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
465  break;
466  case 0x681:
467  avctx->pix_fmt = AV_PIX_FMT_YUV422P9;
468  break;
469  case 0x691:
470  avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
471  break;
472  case 0x6B1:
473  avctx->pix_fmt = AV_PIX_FMT_YUV422P12;
474  break;
475  case 0x6D1:
476  avctx->pix_fmt = AV_PIX_FMT_YUV422P14;
477  break;
478  case 0x6F1:
479  avctx->pix_fmt = AV_PIX_FMT_YUV422P16;
480  break;
481  case 0x672:
482  avctx->pix_fmt = AV_PIX_FMT_YUV411P;
483  break;
484  case 0x674:
485  avctx->pix_fmt = AV_PIX_FMT_YUV440P;
486  break;
487  case 0x675:
488  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
489  break;
490  case 0x685:
491  avctx->pix_fmt = AV_PIX_FMT_YUV420P9;
492  break;
493  case 0x695:
494  avctx->pix_fmt = AV_PIX_FMT_YUV420P10;
495  break;
496  case 0x6B5:
497  avctx->pix_fmt = AV_PIX_FMT_YUV420P12;
498  break;
499  case 0x6D5:
500  avctx->pix_fmt = AV_PIX_FMT_YUV420P14;
501  break;
502  case 0x6F5:
503  avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
504  break;
505  case 0x67A:
506  avctx->pix_fmt = AV_PIX_FMT_YUV410P;
507  break;
508  case 0x770:
509  avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
510  break;
511  case 0x780:
512  avctx->pix_fmt = AV_PIX_FMT_YUVA444P9;
513  break;
514  case 0x790:
516  break;
517  case 0x7F0:
519  break;
520  case 0x771:
521  avctx->pix_fmt = AV_PIX_FMT_YUVA422P;
522  break;
523  case 0x781:
524  avctx->pix_fmt = AV_PIX_FMT_YUVA422P9;
525  break;
526  case 0x791:
528  break;
529  case 0x7F1:
531  break;
532  case 0x775:
533  avctx->pix_fmt = AV_PIX_FMT_YUVA420P;
534  break;
535  case 0x785:
536  avctx->pix_fmt = AV_PIX_FMT_YUVA420P9;
537  break;
538  case 0x795:
540  break;
541  case 0x7F5:
543  break;
544  default:
545  ret = AVERROR_INVALIDDATA;
546  goto error;
547  }
548  }
549 
550  ff_huffyuv_common_init(avctx);
551 
552  if ((avctx->pix_fmt == AV_PIX_FMT_YUV422P || avctx->pix_fmt == AV_PIX_FMT_YUV420P) && avctx->width & 1) {
553  av_log(avctx, AV_LOG_ERROR, "width must be even for this colorspace\n");
554  ret = AVERROR_INVALIDDATA;
555  goto error;
556  }
557  if (s->predictor == MEDIAN && avctx->pix_fmt == AV_PIX_FMT_YUV422P &&
558  avctx->width % 4) {
559  av_log(avctx, AV_LOG_ERROR, "width must be a multiple of 4 "
560  "for this combination of colorspace and predictor type.\n");
561  ret = AVERROR_INVALIDDATA;
562  goto error;
563  }
564 
565  if ((ret = ff_huffyuv_alloc_temp(s)) < 0) {
567  goto error;
568  }
569 
570  return 0;
571  error:
572  decode_end(avctx);
573  return ret;
574 }
575 
576 #if HAVE_THREADS
577 static av_cold int decode_init_thread_copy(AVCodecContext *avctx)
578 {
579  HYuvContext *s = avctx->priv_data;
580  int i, ret;
581 
582  s->avctx = avctx;
583 
584  if ((ret = ff_huffyuv_alloc_temp(s)) < 0) {
586  return ret;
587  }
588 
589  for (i = 0; i < 8; i++)
590  s->vlc[i].table = NULL;
591 
592  if (s->version >= 2) {
593  if ((ret = read_huffman_tables(s, avctx->extradata + 4,
594  avctx->extradata_size)) < 0)
595  return ret;
596  } else {
597  if ((ret = read_old_huffman_tables(s)) < 0)
598  return ret;
599  }
600 
601  return 0;
602 }
603 #endif
604 
605 /** Subset of GET_VLC for use in hand-roller VLC code */
606 #define VLC_INTERN(dst, table, gb, name, bits, max_depth) \
607  code = table[index][0]; \
608  n = table[index][1]; \
609  if (max_depth > 1 && n < 0) { \
610  LAST_SKIP_BITS(name, gb, bits); \
611  UPDATE_CACHE(name, gb); \
612  \
613  nb_bits = -n; \
614  index = SHOW_UBITS(name, gb, nb_bits) + code; \
615  code = table[index][0]; \
616  n = table[index][1]; \
617  if (max_depth > 2 && n < 0) { \
618  LAST_SKIP_BITS(name, gb, nb_bits); \
619  UPDATE_CACHE(name, gb); \
620  \
621  nb_bits = -n; \
622  index = SHOW_UBITS(name, gb, nb_bits) + code; \
623  code = table[index][0]; \
624  n = table[index][1]; \
625  } \
626  } \
627  dst = code; \
628  LAST_SKIP_BITS(name, gb, n)
629 
630 
631 #define GET_VLC_DUAL(dst0, dst1, name, gb, dtable, table1, table2, \
632  bits, max_depth, OP) \
633  do { \
634  unsigned int index = SHOW_UBITS(name, gb, bits); \
635  int code, n = dtable[index][1]; \
636  \
637  if (n<=0) { \
638  int nb_bits; \
639  VLC_INTERN(dst0, table1, gb, name, bits, max_depth); \
640  \
641  UPDATE_CACHE(re, gb); \
642  index = SHOW_UBITS(name, gb, bits); \
643  VLC_INTERN(dst1, table2, gb, name, bits, max_depth); \
644  } else { \
645  code = dtable[index][0]; \
646  OP(dst0, dst1, code); \
647  LAST_SKIP_BITS(name, gb, n); \
648  } \
649  } while (0)
650 
651 #define OP8bits(dst0, dst1, code) dst0 = code>>8; dst1 = code
652 
653 #define READ_2PIX(dst0, dst1, plane1) \
654  UPDATE_CACHE(re, &s->gb); \
655  GET_VLC_DUAL(dst0, dst1, re, &s->gb, s->vlc[4+plane1].table, \
656  s->vlc[0].table, s->vlc[plane1].table, VLC_BITS, 3, OP8bits)
657 
659 {
660  int i, icount;
661  OPEN_READER(re, &s->gb);
662  count /= 2;
663 
664  icount = get_bits_left(&s->gb) / (32 * 4);
665  if (count >= icount) {
666  for (i = 0; i < icount; i++) {
667  READ_2PIX(s->temp[0][2 * i], s->temp[1][i], 1);
668  READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
669  }
670  for (; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
671  READ_2PIX(s->temp[0][2 * i ], s->temp[1][i], 1);
672  if (BITS_LEFT(re, &s->gb) <= 0) break;
673  READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
674  }
675  for (; i < count; i++)
676  s->temp[0][2 * i ] = s->temp[1][i] =
677  s->temp[0][2 * i + 1] = s->temp[2][i] = 0;
678  } else {
679  for (i = 0; i < count; i++) {
680  READ_2PIX(s->temp[0][2 * i], s->temp[1][i], 1);
681  READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
682  }
683  }
684  CLOSE_READER(re, &s->gb);
685 }
686 
687 #define READ_2PIX_PLANE(dst0, dst1, plane, OP) \
688  UPDATE_CACHE(re, &s->gb); \
689  GET_VLC_DUAL(dst0, dst1, re, &s->gb, s->vlc[4+plane].table, \
690  s->vlc[plane].table, s->vlc[plane].table, VLC_BITS, 3, OP)
691 
692 #define OP14bits(dst0, dst1, code) dst0 = code>>8; dst1 = sign_extend(code, 8)
693 
694 /* TODO instead of restarting the read when the code isn't in the first level
695  * of the joint table, jump into the 2nd level of the individual table. */
696 #define READ_2PIX_PLANE16(dst0, dst1, plane){\
697  dst0 = get_vlc2(&s->gb, s->vlc[plane].table, VLC_BITS, 3)<<2;\
698  dst0 += get_bits(&s->gb, 2);\
699  dst1 = get_vlc2(&s->gb, s->vlc[plane].table, VLC_BITS, 3)<<2;\
700  dst1 += get_bits(&s->gb, 2);\
701 }
703 {
704  int i, count = width/2;
705 
706  if (s->bps <= 8) {
707  OPEN_READER(re, &s->gb);
708  if (count >= (get_bits_left(&s->gb)) / (32 * 2)) {
709  for (i = 0; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
710  READ_2PIX_PLANE(s->temp[0][2 * i], s->temp[0][2 * i + 1], plane, OP8bits);
711  }
712  } else {
713  for(i=0; i<count; i++){
714  READ_2PIX_PLANE(s->temp[0][2 * i], s->temp[0][2 * i + 1], plane, OP8bits);
715  }
716  }
717  if( width&1 && BITS_LEFT(re, &s->gb)>0 ) {
718  unsigned int index;
719  int nb_bits, code, n;
720  UPDATE_CACHE(re, &s->gb);
721  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
722  VLC_INTERN(s->temp[0][width-1], s->vlc[plane].table,
723  &s->gb, re, VLC_BITS, 3);
724  }
725  CLOSE_READER(re, &s->gb);
726  } else if (s->bps <= 14) {
727  OPEN_READER(re, &s->gb);
728  if (count >= (get_bits_left(&s->gb)) / (32 * 2)) {
729  for (i = 0; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
730  READ_2PIX_PLANE(s->temp16[0][2 * i], s->temp16[0][2 * i + 1], plane, OP14bits);
731  }
732  } else {
733  for(i=0; i<count; i++){
734  READ_2PIX_PLANE(s->temp16[0][2 * i], s->temp16[0][2 * i + 1], plane, OP14bits);
735  }
736  }
737  if( width&1 && BITS_LEFT(re, &s->gb)>0 ) {
738  unsigned int index;
739  int nb_bits, code, n;
740  UPDATE_CACHE(re, &s->gb);
741  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
742  VLC_INTERN(s->temp16[0][width-1], s->vlc[plane].table,
743  &s->gb, re, VLC_BITS, 3);
744  }
745  CLOSE_READER(re, &s->gb);
746  } else {
747  if (count >= (get_bits_left(&s->gb)) / (32 * 2)) {
748  for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
749  READ_2PIX_PLANE16(s->temp16[0][2 * i], s->temp16[0][2 * i + 1], plane);
750  }
751  } else {
752  for(i=0; i<count; i++){
753  READ_2PIX_PLANE16(s->temp16[0][2 * i], s->temp16[0][2 * i + 1], plane);
754  }
755  }
756  if( width&1 && get_bits_left(&s->gb)>0 ) {
757  int dst = get_vlc2(&s->gb, s->vlc[plane].table, VLC_BITS, 3)<<2;
758  s->temp16[0][width-1] = dst + get_bits(&s->gb, 2);
759  }
760  }
761 }
762 
764 {
765  int i;
766  OPEN_READER(re, &s->gb);
767  count /= 2;
768 
769  if (count >= (get_bits_left(&s->gb)) / (32 * 2)) {
770  for (i = 0; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
771  READ_2PIX(s->temp[0][2 * i], s->temp[0][2 * i + 1], 0);
772  }
773  } else {
774  for (i = 0; i < count; i++) {
775  READ_2PIX(s->temp[0][2 * i], s->temp[0][2 * i + 1], 0);
776  }
777  }
778  CLOSE_READER(re, &s->gb);
779 }
780 
782  int decorrelate, int alpha)
783 {
784  int i;
785  OPEN_READER(re, &s->gb);
786 
787  for (i = 0; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
788  unsigned int index;
789  int code, n, nb_bits;
790 
791  UPDATE_CACHE(re, &s->gb);
792  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
793  n = s->vlc[4].table[index][1];
794 
795  if (n>0) {
796  code = s->vlc[4].table[index][0];
797  *(uint32_t *) &s->temp[0][4 * i] = s->pix_bgr_map[code];
798  LAST_SKIP_BITS(re, &s->gb, n);
799  } else {
800  if (decorrelate) {
801  VLC_INTERN(s->temp[0][4 * i + G], s->vlc[1].table,
802  &s->gb, re, VLC_BITS, 3);
803 
804  UPDATE_CACHE(re, &s->gb);
805  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
806  VLC_INTERN(code, s->vlc[0].table, &s->gb, re, VLC_BITS, 3);
807  s->temp[0][4 * i + B] = code + s->temp[0][4 * i + G];
808 
809  UPDATE_CACHE(re, &s->gb);
810  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
811  VLC_INTERN(code, s->vlc[2].table, &s->gb, re, VLC_BITS, 3);
812  s->temp[0][4 * i + R] = code + s->temp[0][4 * i + G];
813  } else {
814  VLC_INTERN(s->temp[0][4 * i + B], s->vlc[0].table,
815  &s->gb, re, VLC_BITS, 3);
816 
817  UPDATE_CACHE(re, &s->gb);
818  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
819  VLC_INTERN(s->temp[0][4 * i + G], s->vlc[1].table,
820  &s->gb, re, VLC_BITS, 3);
821 
822  UPDATE_CACHE(re, &s->gb);
823  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
824  VLC_INTERN(s->temp[0][4 * i + R], s->vlc[2].table,
825  &s->gb, re, VLC_BITS, 3);
826  }
827  }
828  if (alpha) {
829  UPDATE_CACHE(re, &s->gb);
830  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
831  VLC_INTERN(s->temp[0][4 * i + A], s->vlc[2].table,
832  &s->gb, re, VLC_BITS, 3);
833  } else
834  s->temp[0][4 * i + A] = 0;
835  }
836  CLOSE_READER(re, &s->gb);
837 }
838 
840 {
841  if (s->decorrelate) {
842  if (s->bitstream_bpp == 24)
843  decode_bgr_1(s, count, 1, 0);
844  else
845  decode_bgr_1(s, count, 1, 1);
846  } else {
847  if (s->bitstream_bpp == 24)
848  decode_bgr_1(s, count, 0, 0);
849  else
850  decode_bgr_1(s, count, 0, 1);
851  }
852 }
853 
854 static void draw_slice(HYuvContext *s, AVFrame *frame, int y)
855 {
856  int h, cy, i;
858 
859  if (!s->avctx->draw_horiz_band)
860  return;
861 
862  h = y - s->last_slice_end;
863  y -= h;
864 
865  if (s->bitstream_bpp == 12)
866  cy = y >> 1;
867  else
868  cy = y;
869 
870  offset[0] = frame->linesize[0] * y;
871  offset[1] = frame->linesize[1] * cy;
872  offset[2] = frame->linesize[2] * cy;
873  for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
874  offset[i] = 0;
875  emms_c();
876 
877  s->avctx->draw_horiz_band(s->avctx, frame, offset, y, 3, h);
878 
879  s->last_slice_end = y + h;
880 }
881 
882 static int left_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int acc)
883 {
884  if (s->bps <= 8) {
885  return s->llviddsp.add_left_pred(dst, src, w, acc);
886  } else {
887  return s->llviddsp.add_left_pred_int16(( uint16_t *)dst, (const uint16_t *)src, s->n-1, w, acc);
888  }
889 }
890 
891 static void add_bytes(HYuvContext *s, uint8_t *dst, uint8_t *src, int w)
892 {
893  if (s->bps <= 8) {
894  s->llviddsp.add_bytes(dst, src, w);
895  } else {
896  s->hdsp.add_int16((uint16_t*)dst, (const uint16_t*)src, s->n - 1, w);
897  }
898 }
899 
900 static void add_median_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, const uint8_t *diff, int w, int *left, int *left_top)
901 {
902  if (s->bps <= 8) {
903  s->llviddsp.add_median_pred(dst, src, diff, w, left, left_top);
904  } else {
905  s->hdsp.add_hfyu_median_pred_int16((uint16_t *)dst, (const uint16_t *)src, (const uint16_t *)diff, s->n-1, w, left, left_top);
906  }
907 }
908 
909 static int decode_slice(AVCodecContext *avctx, AVFrame *p, int height,
910  int buf_size, int y_offset, int table_size)
911 {
912  HYuvContext *s = avctx->priv_data;
913  int fake_ystride, fake_ustride, fake_vstride;
914  const int width = s->width;
915  const int width2 = s->width >> 1;
916  int ret;
917 
918  if ((ret = init_get_bits8(&s->gb, s->bitstream_buffer + table_size, buf_size - table_size)) < 0)
919  return ret;
920 
921  fake_ystride = s->interlaced ? p->linesize[0] * 2 : p->linesize[0];
922  fake_ustride = s->interlaced ? p->linesize[1] * 2 : p->linesize[1];
923  fake_vstride = s->interlaced ? p->linesize[2] * 2 : p->linesize[2];
924 
925  if (s->version > 2) {
926  int plane;
927  for(plane = 0; plane < 1 + 2*s->chroma + s->alpha; plane++) {
928  int left, lefttop, y;
929  int w = width;
930  int h = height;
931  int fake_stride = fake_ystride;
932 
933  if (s->chroma && (plane == 1 || plane == 2)) {
934  w >>= s->chroma_h_shift;
935  h >>= s->chroma_v_shift;
936  fake_stride = plane == 1 ? fake_ustride : fake_vstride;
937  }
938 
939  switch (s->predictor) {
940  case LEFT:
941  case PLANE:
942  decode_plane_bitstream(s, w, plane);
943  left = left_prediction(s, p->data[plane], s->temp[0], w, 0);
944 
945  for (y = 1; y < h; y++) {
946  uint8_t *dst = p->data[plane] + p->linesize[plane]*y;
947 
948  decode_plane_bitstream(s, w, plane);
949  left = left_prediction(s, dst, s->temp[0], w, left);
950  if (s->predictor == PLANE) {
951  if (y > s->interlaced) {
952  add_bytes(s, dst, dst - fake_stride, w);
953  }
954  }
955  }
956 
957  break;
958  case MEDIAN:
959  decode_plane_bitstream(s, w, plane);
960  left= left_prediction(s, p->data[plane], s->temp[0], w, 0);
961 
962  y = 1;
963 
964  /* second line is left predicted for interlaced case */
965  if (s->interlaced) {
966  decode_plane_bitstream(s, w, plane);
967  left = left_prediction(s, p->data[plane] + p->linesize[plane], s->temp[0], w, left);
968  y++;
969  }
970 
971  lefttop = p->data[plane][0];
972  decode_plane_bitstream(s, w, plane);
973  add_median_prediction(s, p->data[plane] + fake_stride, p->data[plane], s->temp[0], w, &left, &lefttop);
974  y++;
975 
976  for (; y<h; y++) {
977  uint8_t *dst;
978 
979  decode_plane_bitstream(s, w, plane);
980 
981  dst = p->data[plane] + p->linesize[plane] * y;
982 
983  add_median_prediction(s, dst, dst - fake_stride, s->temp[0], w, &left, &lefttop);
984  }
985 
986  break;
987  }
988  }
989  draw_slice(s, p, height);
990  } else if (s->bitstream_bpp < 24) {
991  int y, cy;
992  int lefty, leftu, leftv;
993  int lefttopy, lefttopu, lefttopv;
994 
995  if (s->yuy2) {
996  p->data[0][3] = get_bits(&s->gb, 8);
997  p->data[0][2] = get_bits(&s->gb, 8);
998  p->data[0][1] = get_bits(&s->gb, 8);
999  p->data[0][0] = get_bits(&s->gb, 8);
1000 
1001  av_log(avctx, AV_LOG_ERROR,
1002  "YUY2 output is not implemented yet\n");
1003  return AVERROR_PATCHWELCOME;
1004  } else {
1005  leftv =
1006  p->data[2][0 + y_offset * p->linesize[2]] = get_bits(&s->gb, 8);
1007  lefty =
1008  p->data[0][1 + y_offset * p->linesize[0]] = get_bits(&s->gb, 8);
1009  leftu =
1010  p->data[1][0 + y_offset * p->linesize[1]] = get_bits(&s->gb, 8);
1011  p->data[0][0 + y_offset * p->linesize[0]] = get_bits(&s->gb, 8);
1012 
1013  switch (s->predictor) {
1014  case LEFT:
1015  case PLANE:
1016  decode_422_bitstream(s, width - 2);
1017  lefty = s->llviddsp.add_left_pred(p->data[0] + p->linesize[0] * y_offset + 2, s->temp[0],
1018  width - 2, lefty);
1019  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1020  leftu = s->llviddsp.add_left_pred(p->data[1] + p->linesize[1] * y_offset + 1, s->temp[1], width2 - 1, leftu);
1021  leftv = s->llviddsp.add_left_pred(p->data[2] + p->linesize[2] * y_offset + 1, s->temp[2], width2 - 1, leftv);
1022  }
1023 
1024  for (cy = y = 1; y < height; y++, cy++) {
1025  uint8_t *ydst, *udst, *vdst;
1026 
1027  if (s->bitstream_bpp == 12) {
1028  decode_gray_bitstream(s, width);
1029 
1030  ydst = p->data[0] + p->linesize[0] * (y + y_offset);
1031 
1032  lefty = s->llviddsp.add_left_pred(ydst, s->temp[0],
1033  width, lefty);
1034  if (s->predictor == PLANE) {
1035  if (y > s->interlaced)
1036  s->llviddsp.add_bytes(ydst, ydst - fake_ystride, width);
1037  }
1038  y++;
1039  if (y >= height)
1040  break;
1041  }
1042 
1043  draw_slice(s, p, y);
1044 
1045  ydst = p->data[0] + p->linesize[0] * (y + y_offset);
1046  udst = p->data[1] + p->linesize[1] * (cy + y_offset);
1047  vdst = p->data[2] + p->linesize[2] * (cy + y_offset);
1048 
1049  decode_422_bitstream(s, width);
1050  lefty = s->llviddsp.add_left_pred(ydst, s->temp[0],
1051  width, lefty);
1052  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1053  leftu = s->llviddsp.add_left_pred(udst, s->temp[1], width2, leftu);
1054  leftv = s->llviddsp.add_left_pred(vdst, s->temp[2], width2, leftv);
1055  }
1056  if (s->predictor == PLANE) {
1057  if (cy > s->interlaced) {
1058  s->llviddsp.add_bytes(ydst, ydst - fake_ystride, width);
1059  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1060  s->llviddsp.add_bytes(udst, udst - fake_ustride, width2);
1061  s->llviddsp.add_bytes(vdst, vdst - fake_vstride, width2);
1062  }
1063  }
1064  }
1065  }
1066  draw_slice(s, p, height);
1067 
1068  break;
1069  case MEDIAN:
1070  /* first line except first 2 pixels is left predicted */
1071  decode_422_bitstream(s, width - 2);
1072  lefty = s->llviddsp.add_left_pred(p->data[0] + 2, s->temp[0],
1073  width - 2, lefty);
1074  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1075  leftu = s->llviddsp.add_left_pred(p->data[1] + 1, s->temp[1], width2 - 1, leftu);
1076  leftv = s->llviddsp.add_left_pred(p->data[2] + 1, s->temp[2], width2 - 1, leftv);
1077  }
1078 
1079  cy = y = 1;
1080 
1081  /* second line is left predicted for interlaced case */
1082  if (s->interlaced) {
1083  decode_422_bitstream(s, width);
1084  lefty = s->llviddsp.add_left_pred(p->data[0] + p->linesize[0],
1085  s->temp[0], width, lefty);
1086  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1087  leftu = s->llviddsp.add_left_pred(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1088  leftv = s->llviddsp.add_left_pred(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1089  }
1090  y++;
1091  cy++;
1092  }
1093 
1094  /* next 4 pixels are left predicted too */
1095  decode_422_bitstream(s, 4);
1096  lefty = s->llviddsp.add_left_pred(p->data[0] + fake_ystride,
1097  s->temp[0], 4, lefty);
1098  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1099  leftu = s->llviddsp.add_left_pred(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1100  leftv = s->llviddsp.add_left_pred(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1101  }
1102 
1103  /* next line except the first 4 pixels is median predicted */
1104  lefttopy = p->data[0][3];
1105  decode_422_bitstream(s, width - 4);
1106  s->llviddsp.add_median_pred(p->data[0] + fake_ystride + 4,
1107  p->data[0] + 4, s->temp[0],
1108  width - 4, &lefty, &lefttopy);
1109  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1110  lefttopu = p->data[1][1];
1111  lefttopv = p->data[2][1];
1112  s->llviddsp.add_median_pred(p->data[1] + fake_ustride + 2, p->data[1] + 2, s->temp[1], width2 - 2, &leftu, &lefttopu);
1113  s->llviddsp.add_median_pred(p->data[2] + fake_vstride + 2, p->data[2] + 2, s->temp[2], width2 - 2, &leftv, &lefttopv);
1114  }
1115  y++;
1116  cy++;
1117 
1118  for (; y < height; y++, cy++) {
1119  uint8_t *ydst, *udst, *vdst;
1120 
1121  if (s->bitstream_bpp == 12) {
1122  while (2 * cy > y) {
1123  decode_gray_bitstream(s, width);
1124  ydst = p->data[0] + p->linesize[0] * y;
1125  s->llviddsp.add_median_pred(ydst, ydst - fake_ystride,
1126  s->temp[0], width,
1127  &lefty, &lefttopy);
1128  y++;
1129  }
1130  if (y >= height)
1131  break;
1132  }
1133  draw_slice(s, p, y);
1134 
1135  decode_422_bitstream(s, width);
1136 
1137  ydst = p->data[0] + p->linesize[0] * y;
1138  udst = p->data[1] + p->linesize[1] * cy;
1139  vdst = p->data[2] + p->linesize[2] * cy;
1140 
1141  s->llviddsp.add_median_pred(ydst, ydst - fake_ystride,
1142  s->temp[0], width,
1143  &lefty, &lefttopy);
1144  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1145  s->llviddsp.add_median_pred(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1146  s->llviddsp.add_median_pred(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1147  }
1148  }
1149 
1150  draw_slice(s, p, height);
1151  break;
1152  }
1153  }
1154  } else {
1155  int y;
1156  uint8_t left[4];
1157  const int last_line = (y_offset + height - 1) * p->linesize[0];
1158 
1159  if (s->bitstream_bpp == 32) {
1160  left[A] = p->data[0][last_line + A] = get_bits(&s->gb, 8);
1161  left[R] = p->data[0][last_line + R] = get_bits(&s->gb, 8);
1162  left[G] = p->data[0][last_line + G] = get_bits(&s->gb, 8);
1163  left[B] = p->data[0][last_line + B] = get_bits(&s->gb, 8);
1164  } else {
1165  left[R] = p->data[0][last_line + R] = get_bits(&s->gb, 8);
1166  left[G] = p->data[0][last_line + G] = get_bits(&s->gb, 8);
1167  left[B] = p->data[0][last_line + B] = get_bits(&s->gb, 8);
1168  left[A] = p->data[0][last_line + A] = 255;
1169  skip_bits(&s->gb, 8);
1170  }
1171 
1172  if (s->bgr32) {
1173  switch (s->predictor) {
1174  case LEFT:
1175  case PLANE:
1176  decode_bgr_bitstream(s, width - 1);
1177  s->hdsp.add_hfyu_left_pred_bgr32(p->data[0] + last_line + 4,
1178  s->temp[0], width - 1, left);
1179 
1180  for (y = height - 2; y >= 0; y--) { // Yes it is stored upside down.
1181  decode_bgr_bitstream(s, width);
1182 
1183  s->hdsp.add_hfyu_left_pred_bgr32(p->data[0] + p->linesize[0] * (y + y_offset),
1184  s->temp[0], width, left);
1185  if (s->predictor == PLANE) {
1186  if (s->bitstream_bpp != 32)
1187  left[A] = 0;
1188  if (y < height - 1 - s->interlaced) {
1189  s->llviddsp.add_bytes(p->data[0] + p->linesize[0] * (y + y_offset),
1190  p->data[0] + p->linesize[0] * (y + y_offset) +
1191  fake_ystride, 4 * width);
1192  }
1193  }
1194  }
1195  // just 1 large slice as this is not possible in reverse order
1196  draw_slice(s, p, height);
1197  break;
1198  default:
1199  av_log(avctx, AV_LOG_ERROR,
1200  "prediction type not supported!\n");
1201  }
1202  } else {
1203  av_log(avctx, AV_LOG_ERROR,
1204  "BGR24 output is not implemented yet\n");
1205  return AVERROR_PATCHWELCOME;
1206  }
1207  }
1208 
1209  return 0;
1210 }
1211 
1212 static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
1213  AVPacket *avpkt)
1214 {
1215  const uint8_t *buf = avpkt->data;
1216  int buf_size = avpkt->size;
1217  HYuvContext *s = avctx->priv_data;
1218  const int width = s->width;
1219  const int height = s->height;
1220  ThreadFrame frame = { .f = data };
1221  AVFrame *const p = data;
1222  int slice, table_size = 0, ret, nb_slices;
1223  unsigned slices_info_offset;
1224  int slice_height;
1225 
1226  if (buf_size < (width * height + 7)/8)
1227  return AVERROR_INVALIDDATA;
1228 
1231  buf_size);
1232  if (!s->bitstream_buffer)
1233  return AVERROR(ENOMEM);
1234 
1235  s->bdsp.bswap_buf((uint32_t *) s->bitstream_buffer,
1236  (const uint32_t *) buf, buf_size / 4);
1237 
1238  if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
1239  return ret;
1240 
1241  if (s->context) {
1242  table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
1243  if (table_size < 0)
1244  return table_size;
1245  }
1246 
1247  if ((unsigned) (buf_size - table_size) >= INT_MAX / 8)
1248  return AVERROR_INVALIDDATA;
1249 
1250  s->last_slice_end = 0;
1251 
1252  if (avctx->codec_id == AV_CODEC_ID_HYMT &&
1253  (buf_size > 32 && AV_RL32(avpkt->data + buf_size - 16) == 0)) {
1254  slices_info_offset = AV_RL32(avpkt->data + buf_size - 4);
1255  slice_height = AV_RL32(avpkt->data + buf_size - 8);
1256  nb_slices = AV_RL32(avpkt->data + buf_size - 12);
1257  if (nb_slices * 8LL + slices_info_offset > buf_size - 16 ||
1258  slice_height <= 0 || nb_slices * (uint64_t)slice_height > height)
1259  return AVERROR_INVALIDDATA;
1260  } else {
1261  slice_height = height;
1262  nb_slices = 1;
1263  }
1264 
1265  for (slice = 0; slice < nb_slices; slice++) {
1266  int y_offset, slice_offset, slice_size;
1267 
1268  if (nb_slices > 1) {
1269  slice_offset = AV_RL32(avpkt->data + slices_info_offset + slice * 8);
1270  slice_size = AV_RL32(avpkt->data + slices_info_offset + slice * 8 + 4);
1271 
1272  if (slice_offset < 0 || slice_size <= 0 || (slice_offset&3) ||
1273  slice_offset + (int64_t)slice_size > buf_size)
1274  return AVERROR_INVALIDDATA;
1275 
1276  y_offset = height - (slice + 1) * slice_height;
1277  s->bdsp.bswap_buf((uint32_t *)s->bitstream_buffer,
1278  (const uint32_t *)(buf + slice_offset), slice_size / 4);
1279  } else {
1280  y_offset = 0;
1281  slice_offset = 0;
1282  slice_size = buf_size;
1283  }
1284 
1285  ret = decode_slice(avctx, p, slice_height, slice_size, y_offset, table_size);
1286  emms_c();
1287  if (ret < 0)
1288  return ret;
1289  }
1290 
1291  *got_frame = 1;
1292 
1293  return (get_bits_count(&s->gb) + 31) / 32 * 4 + table_size;
1294 }
1295 
1297  .name = "huffyuv",
1298  .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1299  .type = AVMEDIA_TYPE_VIDEO,
1300  .id = AV_CODEC_ID_HUFFYUV,
1301  .priv_data_size = sizeof(HYuvContext),
1302  .init = decode_init,
1303  .close = decode_end,
1304  .decode = decode_frame,
1307  .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1308 };
1309 
1310 #if CONFIG_FFVHUFF_DECODER
1312  .name = "ffvhuff",
1313  .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1314  .type = AVMEDIA_TYPE_VIDEO,
1315  .id = AV_CODEC_ID_FFVHUFF,
1316  .priv_data_size = sizeof(HYuvContext),
1317  .init = decode_init,
1318  .close = decode_end,
1319  .decode = decode_frame,
1322  .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1323 };
1324 #endif /* CONFIG_FFVHUFF_DECODER */
1325 
1326 #if CONFIG_HYMT_DECODER
1328  .name = "hymt",
1329  .long_name = NULL_IF_CONFIG_SMALL("HuffYUV MT"),
1330  .type = AVMEDIA_TYPE_VIDEO,
1331  .id = AV_CODEC_ID_HYMT,
1332  .priv_data_size = sizeof(HYuvContext),
1333  .init = decode_init,
1334  .close = decode_end,
1335  .decode = decode_frame,
1338  .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1339 };
1340 #endif /* CONFIG_HYMT_DECODER */
int plane
Definition: avisynth_c.h:384
static void add_median_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, const uint8_t *diff, int w, int *left, int *left_top)
Definition: huffyuvdec.c:900
#define OP14bits(dst0, dst1, code)
Definition: huffyuvdec.c:692
#define NULL
Definition: coverity.c:32
const char const char void * val
Definition: avisynth_c.h:863
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:430
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
int(* add_left_pred)(uint8_t *dst, const uint8_t *src, ptrdiff_t w, int left)
#define AV_NUM_DATA_POINTERS
Definition: frame.h:269
static void decode_plane_bitstream(HYuvContext *s, int width, int plane)
Definition: huffyuvdec.c:702
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:422
#define READ_2PIX_PLANE(dst0, dst1, plane, OP)
Definition: huffyuvdec.c:687
This structure describes decoded (raw) audio or video data.
Definition: frame.h:268
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:424
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:397
float re
Definition: fft.c:82
int yuy2
Definition: huffyuv.h:65
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:425
int bitstream_bpp
Definition: huffyuv.h:63
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
misc image utilities
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
const char * g
Definition: vf_curves.c:115
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have add an init_thread_copy() which re-allocates them for other threads.Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities.There will be very little speed gain at this point but it should work.If there are inter-frame dependencies
int acc
Definition: yuv2rgb.c:554
static void decode_422_bitstream(HYuvContext *s, int count)
Definition: huffyuvdec.c:658
int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
Definition: bitstream.c:273
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
int size
Definition: avcodec.h:1478
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:403
static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha)
Definition: huffyuvdec.c:781
#define VLC_BITS
Definition: asvdec.c:37
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1775
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:70
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:391
#define MAX_VLC_N
Definition: huffyuv.h:47
int context
Definition: huffyuv.h:77
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
#define READ_2PIX_PLANE16(dst0, dst1, plane)
Definition: huffyuvdec.c:696
unsigned int bitstream_buffer_size
Definition: huffyuv.h:88
#define src
Definition: vp8dsp.c:254
static int generate_joint_tables(HYuvContext *s)
Definition: huffyuvdec.c:118
AVCodec.
Definition: avcodec.h:3477
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:42
int height
Definition: huffyuv.h:75
void(* draw_horiz_band)(struct AVCodecContext *s, const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], int y, int type, int height)
If non NULL, &#39;draw_horiz_band&#39; is called by the libavcodec decoder to draw a horizontal band...
Definition: avcodec.h:1800
#define READ_2PIX(dst0, dst1, plane1)
Definition: huffyuvdec.c:653
static int read_old_huffman_tables(HYuvContext *s)
Definition: huffyuvdec.c:237
static void draw_slice(HYuvContext *s, AVFrame *frame, int y)
Definition: huffyuvdec.c:854
void(* add_int16)(uint16_t *dst, const uint16_t *src, unsigned mask, int w)
Definition: huffyuvdsp.h:39
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
uint32_t pix_bgr_map[1<< VLC_BITS]
Definition: huffyuv.h:85
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
uint8_t
#define av_cold
Definition: attributes.h:82
void(* bswap_buf)(uint32_t *dst, const uint32_t *src, int w)
Definition: bswapdsp.h:25
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
static void decode_bgr_bitstream(HYuvContext *s, int count)
Definition: huffyuvdec.c:839
int bps
Definition: huffyuv.h:67
Multithreading support functions.
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1666
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:253
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:421
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:402
#define height
uint8_t * data
Definition: avcodec.h:1477
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
void(* add_median_pred)(uint8_t *dst, const uint8_t *top, const uint8_t *diff, ptrdiff_t w, int *left, int *left_top)
bitstream reader API header.
int vlc_n
Definition: huffyuv.h:69
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:400
int chroma_h_shift
Definition: huffyuv.h:73
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:2785
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:392
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:429
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:883
#define A(x)
Definition: vp56_arith.h:28
uint16_t * temp16[3]
identical to temp but 16bit type
Definition: huffyuv.h:81
#define av_log(a,...)
uint8_t len[4][MAX_VLC_N]
Definition: huffyuv.h:83
av_cold int ff_huffyuv_alloc_temp(HYuvContext *s)
Definition: huffyuv.c:58
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:176
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:178
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:260
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define R
Definition: huffyuvdsp.h:34
int chroma_v_shift
Definition: huffyuv.h:74
Definition: huffyuv.h:51
av_cold void ff_huffyuv_common_end(HYuvContext *s)
Definition: huffyuv.c:86
#define init_vlc(vlc, nb_bits, nb_codes,bits, bits_wrap, bits_size,codes, codes_wrap, codes_size,flags)
Definition: vlc.h:38
static const unsigned char classic_add_luma[256]
Definition: huffyuvdec.c:61
AVCodec ff_hymt_decoder
uint8_t * bitstream_buffer
Definition: huffyuv.h:87
int flags
Definition: huffyuv.h:76
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2550
#define B
Definition: huffyuvdsp.h:32
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
const char * r
Definition: vf_curves.c:114
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:431
int bgr32
Definition: huffyuv.h:66
GLsizei GLsizei * length
Definition: opengl_enc.c:114
const char * name
Name of the codec implementation.
Definition: avcodec.h:3484
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:390
uint8_t bits
Definition: vp3data.h:202
int chroma
Definition: huffyuv.h:71
VLC vlc[8]
Definition: huffyuv.h:86
GLsizei count
Definition: opengl_enc.c:108
#define CLOSE_READER(name, gb)
Definition: get_bits.h:149
huffyuv codec for libavcodec.
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:1037
#define classic_shift_chroma_table_size
Definition: huffyuvdec.c:52
Definition: vlc.h:26
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:225
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:385
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:282
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: avcodec.h:975
#define b
Definition: input.c:41
int decorrelate
Definition: huffyuv.h:62
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:406
alias for AV_PIX_FMT_YA8
Definition: pixfmt.h:146
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:371
#define FFMIN(a, b)
Definition: common.h:96
int width
Definition: huffyuv.h:75
int last_slice_end
Definition: huffyuv.h:79
uint8_t interlaced
Definition: mxfenc.c:2218
#define VLC_INTERN(dst, table, gb, name, bits, max_depth)
Subset of GET_VLC for use in hand-roller VLC code.
Definition: huffyuvdec.c:606
#define width
int width
picture width / height.
Definition: avcodec.h:1738
static const unsigned char classic_shift_chroma[classic_shift_chroma_table_size+AV_INPUT_BUFFER_PADDING_SIZE]
Definition: huffyuvdec.c:53
uint8_t w
Definition: llviddspenc.c:38
int ff_huffyuv_generate_bits_table(uint32_t *dst, const uint8_t *len_table, int n)
Definition: huffyuv.c:39
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:199
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:797
int n
Definition: avisynth_c.h:760
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:426
uint8_t * temp[3]
Definition: huffyuv.h:80
AVCodec ff_ffvhuff_decoder
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:386
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:405
static void error(const char *err)
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:211
int alpha
Definition: huffyuv.h:70
if(ret)
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:398
static void add_bytes(HYuvContext *s, uint8_t *dst, uint8_t *src, int w)
Definition: huffyuvdec.c:891
int(* add_left_pred_int16)(uint16_t *dst, const uint16_t *src, unsigned mask, ptrdiff_t w, unsigned left)
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:395
AVCodec ff_huffyuv_decoder
Definition: huffyuvdec.c:1296
Libavcodec external API header.
enum AVCodecID codec_id
Definition: avcodec.h:1575
int yuv
Definition: huffyuv.h:72
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:299
LLVidDSPContext llviddsp
Definition: huffyuv.h:92
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
HuffYUVDSPContext hdsp
Definition: huffyuv.h:90
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
static const int16_t alpha[]
Definition: ilbcdata.h:55
main external API structure.
Definition: avcodec.h:1565
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:360
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:67
#define OPEN_READER(name, gb)
Definition: get_bits.h:138
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:206
void * buf
Definition: avisynth_c.h:766
int interlaced
Definition: huffyuv.h:61
int extradata_size
Definition: avcodec.h:1667
void ff_llviddsp_init(LLVidDSPContext *c)
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:387
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
int index
Definition: gxfenc.c:89
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
av_cold void ff_huffyuvdsp_init(HuffYUVDSPContext *c, enum AVPixelFormat pix_fmt)
Definition: huffyuvdsp.c:83
static void decode_gray_bitstream(HYuvContext *s, int count)
Definition: huffyuvdec.c:763
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:384
#define classic_shift_luma_table_size
Definition: huffyuvdec.c:44
const VDPAUPixFmtMap * map
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:130
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:396
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:404
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
int version
Definition: huffyuv.h:64
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:388
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:394
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:282
Predictor predictor
Definition: huffyuv.h:58
static av_cold int decode_init(AVCodecContext *avctx)
Definition: huffyuvdec.c:291
void(* add_hfyu_left_pred_bgr32)(uint8_t *dst, const uint8_t *src, intptr_t w, uint8_t *left)
Definition: huffyuvdsp.h:45
AVCodecContext * avctx
Definition: huffyuv.h:57
GetBitContext gb
Definition: huffyuv.h:59
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
Y , 8bpp.
Definition: pixfmt.h:74
Definition: huffyuv.h:52
#define G
Definition: huffyuvdsp.h:33
#define BITS_LEFT(name, gb)
Definition: get_bits.h:191
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
void(* add_hfyu_median_pred_int16)(uint16_t *dst, const uint16_t *top, const uint16_t *diff, unsigned mask, int w, int *left, int *left_top)
Definition: huffyuvdsp.h:42
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:423
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: huffyuvdec.c:1212
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
av_cold void ff_huffyuv_common_init(AVCodecContext *avctx)
Definition: huffyuv.c:71
static av_cold int decode_end(AVCodecContext *avctx)
Definition: huffyuvdec.c:277
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:790
void * priv_data
Definition: avcodec.h:1592
static av_always_inline int diff(const uint32_t a, const uint32_t b)
int len
static const unsigned char classic_add_chroma[256]
Definition: huffyuvdec.c:80
VLC_TYPE(* table)[2]
code, bits
Definition: vlc.h:28
static int left_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int acc)
Definition: huffyuvdec.c:882
static const unsigned char classic_shift_luma[classic_shift_luma_table_size+AV_INPUT_BUFFER_PADDING_SIZE]
Definition: huffyuvdec.c:45
FILE * out
Definition: movenc.c:54
#define av_freep(p)
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
static int decode_slice(AVCodecContext *avctx, AVFrame *p, int height, int buf_size, int y_offset, int table_size)
Definition: huffyuvdec.c:909
#define av_always_inline
Definition: attributes.h:39
uint32_t bits[4][MAX_VLC_N]
Definition: huffyuv.h:84
#define LEFT
Definition: cdgraphics.c:166
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
void(* add_bytes)(uint8_t *dst, uint8_t *src, ptrdiff_t w)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
This structure stores compressed data.
Definition: avcodec.h:1454
static void decorrelate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median)
Definition: snowenc.c:1328
BswapDSPContext bdsp
Definition: huffyuv.h:89
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:359
static int read_len_table(uint8_t *dst, GetBitContext *gb, int n)
Definition: huffyuvdec.c:99
#define OP8bits(dst0, dst1, code)
Definition: huffyuvdec.c:651
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:981
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:399
static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length)
Definition: huffyuvdec.c:208
for(j=16;j >0;--j)
#define AV_PIX_FMT_0RGB32
Definition: pixfmt.h:364