FFmpeg
huffyuvdec.c
Go to the documentation of this file.
1 /*
2  * huffyuv decoder
3  *
4  * Copyright (c) 2002-2014 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
7  * the algorithm used
8  *
9  * This file is part of FFmpeg.
10  *
11  * FFmpeg is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public
13  * License as published by the Free Software Foundation; either
14  * version 2.1 of the License, or (at your option) any later version.
15  *
16  * FFmpeg is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with FFmpeg; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24  *
25  * yuva, gray, 4:4:4, 4:1:1, 4:1:0 and >8 bit per sample support sponsored by NOA
26  */
27 
28 /**
29  * @file
30  * huffyuv decoder
31  */
32 
33 #define UNCHECKED_BITSTREAM_READER 1
34 
35 #include "avcodec.h"
36 #include "get_bits.h"
37 #include "huffyuv.h"
38 #include "huffyuvdsp.h"
39 #include "lossless_videodsp.h"
40 #include "thread.h"
41 #include "libavutil/imgutils.h"
42 #include "libavutil/pixdesc.h"
43 
44 #define classic_shift_luma_table_size 42
46  34, 36, 35, 69, 135, 232, 9, 16, 10, 24, 11, 23, 12, 16, 13, 10,
47  14, 8, 15, 8, 16, 8, 17, 20, 16, 10, 207, 206, 205, 236, 11, 8,
48  10, 21, 9, 23, 8, 8, 199, 70, 69, 68, 0,
49  0,0,0,0,0,0,0,0,
50 };
51 
52 #define classic_shift_chroma_table_size 59
54  66, 36, 37, 38, 39, 40, 41, 75, 76, 77, 110, 239, 144, 81, 82, 83,
55  84, 85, 118, 183, 56, 57, 88, 89, 56, 89, 154, 57, 58, 57, 26, 141,
56  57, 56, 58, 57, 58, 57, 184, 119, 214, 245, 116, 83, 82, 49, 80, 79,
57  78, 77, 44, 75, 41, 40, 39, 38, 37, 36, 34, 0,
58  0,0,0,0,0,0,0,0,
59 };
60 
61 static const unsigned char classic_add_luma[256] = {
62  3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
63  73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
64  68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
65  35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
66  37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
67  35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
68  27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
69  15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
70  12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
71  12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
72  18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
73  28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
74  28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
75  62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
76  54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
77  46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
78 };
79 
80 static const unsigned char classic_add_chroma[256] = {
81  3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
82  7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
83  11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
84  43, 45, 76, 81, 46, 82, 75, 55, 56, 144, 58, 80, 60, 74, 147, 63,
85  143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
86  80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
87  17, 14, 5, 6, 100, 54, 47, 50, 51, 53, 106, 107, 108, 109, 110, 111,
88  112, 113, 114, 115, 4, 117, 118, 92, 94, 121, 122, 3, 124, 103, 2, 1,
89  0, 129, 130, 131, 120, 119, 126, 125, 136, 137, 138, 139, 140, 141, 142, 134,
90  135, 132, 133, 104, 64, 101, 62, 57, 102, 95, 93, 59, 61, 28, 97, 96,
91  52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
92  19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
93  7, 128, 127, 105, 123, 116, 35, 34, 33, 145, 31, 79, 42, 146, 78, 26,
94  83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
95  14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
96  6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
97 };
98 
99 static int read_len_table(uint8_t *dst, GetBitContext *gb, int n)
100 {
101  int i, val, repeat;
102 
103  for (i = 0; i < n;) {
104  repeat = get_bits(gb, 3);
105  val = get_bits(gb, 5);
106  if (repeat == 0)
107  repeat = get_bits(gb, 8);
108  if (i + repeat > n || get_bits_left(gb) < 0) {
109  av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
110  return AVERROR_INVALIDDATA;
111  }
112  while (repeat--)
113  dst[i++] = val;
114  }
115  return 0;
116 }
117 
119 {
120  int ret;
121  uint16_t *symbols = av_mallocz(5 << VLC_BITS);
122  uint16_t *bits;
123  uint8_t *len;
124  if (!symbols)
125  return AVERROR(ENOMEM);
126  bits = symbols + (1 << VLC_BITS);
127  len = (uint8_t *)(bits + (1 << VLC_BITS));
128 
129  if (s->bitstream_bpp < 24 || s->version > 2) {
130  int p, i, y, u;
131  for (p = 0; p < 4; p++) {
132  int p0 = s->version > 2 ? p : 0;
133  for (i = y = 0; y < s->vlc_n; y++) {
134  int len0 = s->len[p0][y];
135  int limit = VLC_BITS - len0;
136  if (limit <= 0 || !len0)
137  continue;
138  if ((sign_extend(y, 8) & (s->vlc_n-1)) != y)
139  continue;
140  for (u = 0; u < s->vlc_n; u++) {
141  int len1 = s->len[p][u];
142  if (len1 > limit || !len1)
143  continue;
144  if ((sign_extend(u, 8) & (s->vlc_n-1)) != u)
145  continue;
146  av_assert0(i < (1 << VLC_BITS));
147  len[i] = len0 + len1;
148  bits[i] = (s->bits[p0][y] << len1) + s->bits[p][u];
149  symbols[i] = (y << 8) + (u & 0xFF);
150  i++;
151  }
152  }
153  ff_free_vlc(&s->vlc[4 + p]);
154  if ((ret = ff_init_vlc_sparse(&s->vlc[4 + p], VLC_BITS, i, len, 1, 1,
155  bits, 2, 2, symbols, 2, 2, 0)) < 0)
156  goto out;
157  }
158  } else {
159  uint8_t (*map)[4] = (uint8_t(*)[4]) s->pix_bgr_map;
160  int i, b, g, r, code;
161  int p0 = s->decorrelate;
162  int p1 = !s->decorrelate;
163  /* Restrict the range to +/-16 because that's pretty much guaranteed
164  * to cover all the combinations that fit in 11 bits total, and it
165  * does not matter if we miss a few rare codes. */
166  for (i = 0, g = -16; g < 16; g++) {
167  int len0 = s->len[p0][g & 255];
168  int limit0 = VLC_BITS - len0;
169  if (limit0 < 2 || !len0)
170  continue;
171  for (b = -16; b < 16; b++) {
172  int len1 = s->len[p1][b & 255];
173  int limit1 = limit0 - len1;
174  if (limit1 < 1 || !len1)
175  continue;
176  code = (s->bits[p0][g & 255] << len1) + s->bits[p1][b & 255];
177  for (r = -16; r < 16; r++) {
178  int len2 = s->len[2][r & 255];
179  if (len2 > limit1 || !len2)
180  continue;
181  av_assert0(i < (1 << VLC_BITS));
182  len[i] = len0 + len1 + len2;
183  bits[i] = (code << len2) + s->bits[2][r & 255];
184  if (s->decorrelate) {
185  map[i][G] = g;
186  map[i][B] = g + b;
187  map[i][R] = g + r;
188  } else {
189  map[i][B] = g;
190  map[i][G] = b;
191  map[i][R] = r;
192  }
193  i++;
194  }
195  }
196  }
197  ff_free_vlc(&s->vlc[4]);
198  if ((ret = init_vlc(&s->vlc[4], VLC_BITS, i, len, 1, 1,
199  bits, 2, 2, 0)) < 0)
200  goto out;
201  }
202  ret = 0;
203 out:
204  av_freep(&symbols);
205  return ret;
206 }
207 
208 static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length)
209 {
210  GetBitContext gb;
211  int i, ret;
212  int count = 3;
213 
214  if ((ret = init_get_bits(&gb, src, length * 8)) < 0)
215  return ret;
216 
217  if (s->version > 2)
218  count = 1 + s->alpha + 2*s->chroma;
219 
220  for (i = 0; i < count; i++) {
221  if ((ret = read_len_table(s->len[i], &gb, s->vlc_n)) < 0)
222  return ret;
223  if ((ret = ff_huffyuv_generate_bits_table(s->bits[i], s->len[i], s->vlc_n)) < 0)
224  return ret;
225  ff_free_vlc(&s->vlc[i]);
226  if ((ret = init_vlc(&s->vlc[i], VLC_BITS, s->vlc_n, s->len[i], 1, 1,
227  s->bits[i], 4, 4, 0)) < 0)
228  return ret;
229  }
230 
231  if ((ret = generate_joint_tables(s)) < 0)
232  return ret;
233 
234  return (get_bits_count(&gb) + 7) / 8;
235 }
236 
238 {
239  GetBitContext gb;
240  int i, ret;
241 
244  if ((ret = read_len_table(s->len[0], &gb, 256)) < 0)
245  return ret;
246 
249  if ((ret = read_len_table(s->len[1], &gb, 256)) < 0)
250  return ret;
251 
252  for (i = 0; i < 256; i++)
253  s->bits[0][i] = classic_add_luma[i];
254  for (i = 0; i < 256; i++)
255  s->bits[1][i] = classic_add_chroma[i];
256 
257  if (s->bitstream_bpp >= 24) {
258  memcpy(s->bits[1], s->bits[0], 256 * sizeof(uint32_t));
259  memcpy(s->len[1], s->len[0], 256 * sizeof(uint8_t));
260  }
261  memcpy(s->bits[2], s->bits[1], 256 * sizeof(uint32_t));
262  memcpy(s->len[2], s->len[1], 256 * sizeof(uint8_t));
263 
264  for (i = 0; i < 4; i++) {
265  ff_free_vlc(&s->vlc[i]);
266  if ((ret = init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1,
267  s->bits[i], 4, 4, 0)) < 0)
268  return ret;
269  }
270 
271  if ((ret = generate_joint_tables(s)) < 0)
272  return ret;
273 
274  return 0;
275 }
276 
278 {
279  HYuvContext *s = avctx->priv_data;
280  int i;
281 
283  av_freep(&s->bitstream_buffer);
284 
285  for (i = 0; i < 8; i++)
286  ff_free_vlc(&s->vlc[i]);
287 
288  return 0;
289 }
290 
292 {
293  HYuvContext *s = avctx->priv_data;
294  int ret;
295 
296  ret = av_image_check_size(avctx->width, avctx->height, 0, avctx);
297  if (ret < 0)
298  return ret;
299 
300  ff_huffyuvdsp_init(&s->hdsp, avctx->pix_fmt);
301  ff_llviddsp_init(&s->llviddsp);
302  memset(s->vlc, 0, 4 * sizeof(VLC));
303 
304  s->interlaced = avctx->height > 288;
305  s->bgr32 = 1;
306 
307  if (avctx->extradata_size) {
308  if ((avctx->bits_per_coded_sample & 7) &&
309  avctx->bits_per_coded_sample != 12)
310  s->version = 1; // do such files exist at all?
311  else if (avctx->extradata_size > 3 && avctx->extradata[3] == 0)
312  s->version = 2;
313  else
314  s->version = 3;
315  } else
316  s->version = 0;
317 
318  s->bps = 8;
319  s->n = 1<<s->bps;
320  s->vlc_n = FFMIN(s->n, MAX_VLC_N);
321  s->chroma = 1;
322  if (s->version >= 2) {
323  int method, interlace;
324 
325  if (avctx->extradata_size < 4)
326  return AVERROR_INVALIDDATA;
327 
328  method = avctx->extradata[0];
329  s->decorrelate = method & 64 ? 1 : 0;
330  s->predictor = method & 63;
331  if (s->version == 2) {
332  s->bitstream_bpp = avctx->extradata[1];
333  if (s->bitstream_bpp == 0)
334  s->bitstream_bpp = avctx->bits_per_coded_sample & ~7;
335  } else {
336  s->bps = (avctx->extradata[1] >> 4) + 1;
337  s->n = 1<<s->bps;
338  s->vlc_n = FFMIN(s->n, MAX_VLC_N);
339  s->chroma_h_shift = avctx->extradata[1] & 3;
340  s->chroma_v_shift = (avctx->extradata[1] >> 2) & 3;
341  s->yuv = !!(avctx->extradata[2] & 1);
342  s->chroma= !!(avctx->extradata[2] & 3);
343  s->alpha = !!(avctx->extradata[2] & 4);
344  }
345  interlace = (avctx->extradata[2] & 0x30) >> 4;
346  s->interlaced = (interlace == 1) ? 1 : (interlace == 2) ? 0 : s->interlaced;
347  s->context = avctx->extradata[2] & 0x40 ? 1 : 0;
348 
349  if ((ret = read_huffman_tables(s, avctx->extradata + 4,
350  avctx->extradata_size - 4)) < 0)
351  goto error;
352  } else {
353  switch (avctx->bits_per_coded_sample & 7) {
354  case 1:
355  s->predictor = LEFT;
356  s->decorrelate = 0;
357  break;
358  case 2:
359  s->predictor = LEFT;
360  s->decorrelate = 1;
361  break;
362  case 3:
363  s->predictor = PLANE;
364  s->decorrelate = avctx->bits_per_coded_sample >= 24;
365  break;
366  case 4:
367  s->predictor = MEDIAN;
368  s->decorrelate = 0;
369  break;
370  default:
371  s->predictor = LEFT; // OLD
372  s->decorrelate = 0;
373  break;
374  }
375  s->bitstream_bpp = avctx->bits_per_coded_sample & ~7;
376  s->context = 0;
377 
378  if ((ret = read_old_huffman_tables(s)) < 0)
379  goto error;
380  }
381 
382  if (s->version <= 2) {
383  switch (s->bitstream_bpp) {
384  case 12:
385  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
386  s->yuv = 1;
387  break;
388  case 16:
389  if (s->yuy2)
390  avctx->pix_fmt = AV_PIX_FMT_YUYV422;
391  else
392  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
393  s->yuv = 1;
394  break;
395  case 24:
396  if (s->bgr32)
397  avctx->pix_fmt = AV_PIX_FMT_0RGB32;
398  else
399  avctx->pix_fmt = AV_PIX_FMT_BGR24;
400  break;
401  case 32:
402  av_assert0(s->bgr32);
403  avctx->pix_fmt = AV_PIX_FMT_RGB32;
404  s->alpha = 1;
405  break;
406  default:
408  goto error;
409  }
411  &s->chroma_h_shift,
412  &s->chroma_v_shift);
413  } else {
414  switch ( (s->chroma<<10) | (s->yuv<<9) | (s->alpha<<8) | ((s->bps-1)<<4) | s->chroma_h_shift | (s->chroma_v_shift<<2)) {
415  case 0x070:
416  avctx->pix_fmt = AV_PIX_FMT_GRAY8;
417  break;
418  case 0x0F0:
419  avctx->pix_fmt = AV_PIX_FMT_GRAY16;
420  break;
421  case 0x470:
422  avctx->pix_fmt = AV_PIX_FMT_GBRP;
423  break;
424  case 0x480:
425  avctx->pix_fmt = AV_PIX_FMT_GBRP9;
426  break;
427  case 0x490:
428  avctx->pix_fmt = AV_PIX_FMT_GBRP10;
429  break;
430  case 0x4B0:
431  avctx->pix_fmt = AV_PIX_FMT_GBRP12;
432  break;
433  case 0x4D0:
434  avctx->pix_fmt = AV_PIX_FMT_GBRP14;
435  break;
436  case 0x4F0:
437  avctx->pix_fmt = AV_PIX_FMT_GBRP16;
438  break;
439  case 0x570:
440  avctx->pix_fmt = AV_PIX_FMT_GBRAP;
441  break;
442  case 0x670:
443  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
444  break;
445  case 0x680:
446  avctx->pix_fmt = AV_PIX_FMT_YUV444P9;
447  break;
448  case 0x690:
449  avctx->pix_fmt = AV_PIX_FMT_YUV444P10;
450  break;
451  case 0x6B0:
452  avctx->pix_fmt = AV_PIX_FMT_YUV444P12;
453  break;
454  case 0x6D0:
455  avctx->pix_fmt = AV_PIX_FMT_YUV444P14;
456  break;
457  case 0x6F0:
458  avctx->pix_fmt = AV_PIX_FMT_YUV444P16;
459  break;
460  case 0x671:
461  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
462  break;
463  case 0x681:
464  avctx->pix_fmt = AV_PIX_FMT_YUV422P9;
465  break;
466  case 0x691:
467  avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
468  break;
469  case 0x6B1:
470  avctx->pix_fmt = AV_PIX_FMT_YUV422P12;
471  break;
472  case 0x6D1:
473  avctx->pix_fmt = AV_PIX_FMT_YUV422P14;
474  break;
475  case 0x6F1:
476  avctx->pix_fmt = AV_PIX_FMT_YUV422P16;
477  break;
478  case 0x672:
479  avctx->pix_fmt = AV_PIX_FMT_YUV411P;
480  break;
481  case 0x674:
482  avctx->pix_fmt = AV_PIX_FMT_YUV440P;
483  break;
484  case 0x675:
485  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
486  break;
487  case 0x685:
488  avctx->pix_fmt = AV_PIX_FMT_YUV420P9;
489  break;
490  case 0x695:
491  avctx->pix_fmt = AV_PIX_FMT_YUV420P10;
492  break;
493  case 0x6B5:
494  avctx->pix_fmt = AV_PIX_FMT_YUV420P12;
495  break;
496  case 0x6D5:
497  avctx->pix_fmt = AV_PIX_FMT_YUV420P14;
498  break;
499  case 0x6F5:
500  avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
501  break;
502  case 0x67A:
503  avctx->pix_fmt = AV_PIX_FMT_YUV410P;
504  break;
505  case 0x770:
506  avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
507  break;
508  case 0x780:
509  avctx->pix_fmt = AV_PIX_FMT_YUVA444P9;
510  break;
511  case 0x790:
513  break;
514  case 0x7F0:
516  break;
517  case 0x771:
518  avctx->pix_fmt = AV_PIX_FMT_YUVA422P;
519  break;
520  case 0x781:
521  avctx->pix_fmt = AV_PIX_FMT_YUVA422P9;
522  break;
523  case 0x791:
525  break;
526  case 0x7F1:
528  break;
529  case 0x775:
530  avctx->pix_fmt = AV_PIX_FMT_YUVA420P;
531  break;
532  case 0x785:
533  avctx->pix_fmt = AV_PIX_FMT_YUVA420P9;
534  break;
535  case 0x795:
537  break;
538  case 0x7F5:
540  break;
541  default:
543  goto error;
544  }
545  }
546 
547  ff_huffyuv_common_init(avctx);
548 
549  if ((avctx->pix_fmt == AV_PIX_FMT_YUV422P || avctx->pix_fmt == AV_PIX_FMT_YUV420P) && avctx->width & 1) {
550  av_log(avctx, AV_LOG_ERROR, "width must be even for this colorspace\n");
552  goto error;
553  }
554  if (s->predictor == MEDIAN && avctx->pix_fmt == AV_PIX_FMT_YUV422P &&
555  avctx->width % 4) {
556  av_log(avctx, AV_LOG_ERROR, "width must be a multiple of 4 "
557  "for this combination of colorspace and predictor type.\n");
559  goto error;
560  }
561 
562  if ((ret = ff_huffyuv_alloc_temp(s)) < 0) {
564  goto error;
565  }
566 
567  return 0;
568  error:
569  decode_end(avctx);
570  return ret;
571 }
572 
573 /** Subset of GET_VLC for use in hand-roller VLC code */
574 #define VLC_INTERN(dst, table, gb, name, bits, max_depth) \
575  code = table[index][0]; \
576  n = table[index][1]; \
577  if (max_depth > 1 && n < 0) { \
578  LAST_SKIP_BITS(name, gb, bits); \
579  UPDATE_CACHE(name, gb); \
580  \
581  nb_bits = -n; \
582  index = SHOW_UBITS(name, gb, nb_bits) + code; \
583  code = table[index][0]; \
584  n = table[index][1]; \
585  if (max_depth > 2 && n < 0) { \
586  LAST_SKIP_BITS(name, gb, nb_bits); \
587  UPDATE_CACHE(name, gb); \
588  \
589  nb_bits = -n; \
590  index = SHOW_UBITS(name, gb, nb_bits) + code; \
591  code = table[index][0]; \
592  n = table[index][1]; \
593  } \
594  } \
595  dst = code; \
596  LAST_SKIP_BITS(name, gb, n)
597 
598 
599 #define GET_VLC_DUAL(dst0, dst1, name, gb, dtable, table1, table2, \
600  bits, max_depth, OP) \
601  do { \
602  unsigned int index = SHOW_UBITS(name, gb, bits); \
603  int code, n = dtable[index][1]; \
604  \
605  if (n<=0) { \
606  int nb_bits; \
607  VLC_INTERN(dst0, table1, gb, name, bits, max_depth); \
608  \
609  UPDATE_CACHE(re, gb); \
610  index = SHOW_UBITS(name, gb, bits); \
611  VLC_INTERN(dst1, table2, gb, name, bits, max_depth); \
612  } else { \
613  code = dtable[index][0]; \
614  OP(dst0, dst1, code); \
615  LAST_SKIP_BITS(name, gb, n); \
616  } \
617  } while (0)
618 
619 #define OP8bits(dst0, dst1, code) dst0 = code>>8; dst1 = code
620 
621 #define READ_2PIX(dst0, dst1, plane1) \
622  UPDATE_CACHE(re, &s->gb); \
623  GET_VLC_DUAL(dst0, dst1, re, &s->gb, s->vlc[4+plane1].table, \
624  s->vlc[0].table, s->vlc[plane1].table, VLC_BITS, 3, OP8bits)
625 
626 static void decode_422_bitstream(HYuvContext *s, int count)
627 {
628  int i, icount;
629  OPEN_READER(re, &s->gb);
630  count /= 2;
631 
632  icount = get_bits_left(&s->gb) / (32 * 4);
633  if (count >= icount) {
634  for (i = 0; i < icount; i++) {
635  READ_2PIX(s->temp[0][2 * i], s->temp[1][i], 1);
636  READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
637  }
638  for (; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
639  READ_2PIX(s->temp[0][2 * i ], s->temp[1][i], 1);
640  if (BITS_LEFT(re, &s->gb) <= 0) break;
641  READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
642  }
643  for (; i < count; i++)
644  s->temp[0][2 * i ] = s->temp[1][i] =
645  s->temp[0][2 * i + 1] = s->temp[2][i] = 0;
646  } else {
647  for (i = 0; i < count; i++) {
648  READ_2PIX(s->temp[0][2 * i], s->temp[1][i], 1);
649  READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
650  }
651  }
652  CLOSE_READER(re, &s->gb);
653 }
654 
655 #define READ_2PIX_PLANE(dst0, dst1, plane, OP) \
656  UPDATE_CACHE(re, &s->gb); \
657  GET_VLC_DUAL(dst0, dst1, re, &s->gb, s->vlc[4+plane].table, \
658  s->vlc[plane].table, s->vlc[plane].table, VLC_BITS, 3, OP)
659 
660 #define OP14bits(dst0, dst1, code) dst0 = code>>8; dst1 = sign_extend(code, 8)
661 
662 /* TODO instead of restarting the read when the code isn't in the first level
663  * of the joint table, jump into the 2nd level of the individual table. */
664 #define READ_2PIX_PLANE16(dst0, dst1, plane){\
665  dst0 = get_vlc2(&s->gb, s->vlc[plane].table, VLC_BITS, 3)<<2;\
666  dst0 += get_bits(&s->gb, 2);\
667  dst1 = get_vlc2(&s->gb, s->vlc[plane].table, VLC_BITS, 3)<<2;\
668  dst1 += get_bits(&s->gb, 2);\
669 }
670 static void decode_plane_bitstream(HYuvContext *s, int width, int plane)
671 {
672  int i, count = width/2;
673 
674  if (s->bps <= 8) {
675  OPEN_READER(re, &s->gb);
676  if (count >= (get_bits_left(&s->gb)) / (32 * 2)) {
677  for (i = 0; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
678  READ_2PIX_PLANE(s->temp[0][2 * i], s->temp[0][2 * i + 1], plane, OP8bits);
679  }
680  } else {
681  for(i=0; i<count; i++){
682  READ_2PIX_PLANE(s->temp[0][2 * i], s->temp[0][2 * i + 1], plane, OP8bits);
683  }
684  }
685  if( width&1 && BITS_LEFT(re, &s->gb)>0 ) {
686  unsigned int index;
687  int nb_bits, code, n;
688  UPDATE_CACHE(re, &s->gb);
689  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
690  VLC_INTERN(s->temp[0][width-1], s->vlc[plane].table,
691  &s->gb, re, VLC_BITS, 3);
692  }
693  CLOSE_READER(re, &s->gb);
694  } else if (s->bps <= 14) {
695  OPEN_READER(re, &s->gb);
696  if (count >= (get_bits_left(&s->gb)) / (32 * 2)) {
697  for (i = 0; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
698  READ_2PIX_PLANE(s->temp16[0][2 * i], s->temp16[0][2 * i + 1], plane, OP14bits);
699  }
700  } else {
701  for(i=0; i<count; i++){
702  READ_2PIX_PLANE(s->temp16[0][2 * i], s->temp16[0][2 * i + 1], plane, OP14bits);
703  }
704  }
705  if( width&1 && BITS_LEFT(re, &s->gb)>0 ) {
706  unsigned int index;
707  int nb_bits, code, n;
708  UPDATE_CACHE(re, &s->gb);
709  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
710  VLC_INTERN(s->temp16[0][width-1], s->vlc[plane].table,
711  &s->gb, re, VLC_BITS, 3);
712  }
713  CLOSE_READER(re, &s->gb);
714  } else {
715  if (count >= (get_bits_left(&s->gb)) / (32 * 2)) {
716  for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
717  READ_2PIX_PLANE16(s->temp16[0][2 * i], s->temp16[0][2 * i + 1], plane);
718  }
719  } else {
720  for(i=0; i<count; i++){
721  READ_2PIX_PLANE16(s->temp16[0][2 * i], s->temp16[0][2 * i + 1], plane);
722  }
723  }
724  if( width&1 && get_bits_left(&s->gb)>0 ) {
725  int dst = get_vlc2(&s->gb, s->vlc[plane].table, VLC_BITS, 3)<<2;
726  s->temp16[0][width-1] = dst + get_bits(&s->gb, 2);
727  }
728  }
729 }
730 
731 static void decode_gray_bitstream(HYuvContext *s, int count)
732 {
733  int i;
734  OPEN_READER(re, &s->gb);
735  count /= 2;
736 
737  if (count >= (get_bits_left(&s->gb)) / (32 * 2)) {
738  for (i = 0; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
739  READ_2PIX(s->temp[0][2 * i], s->temp[0][2 * i + 1], 0);
740  }
741  } else {
742  for (i = 0; i < count; i++) {
743  READ_2PIX(s->temp[0][2 * i], s->temp[0][2 * i + 1], 0);
744  }
745  }
746  CLOSE_READER(re, &s->gb);
747 }
748 
749 static av_always_inline void decode_bgr_1(HYuvContext *s, int count,
750  int decorrelate, int alpha)
751 {
752  int i;
753  OPEN_READER(re, &s->gb);
754 
755  for (i = 0; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
756  unsigned int index;
757  int code, n, nb_bits;
758 
759  UPDATE_CACHE(re, &s->gb);
760  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
761  n = s->vlc[4].table[index][1];
762 
763  if (n>0) {
764  code = s->vlc[4].table[index][0];
765  *(uint32_t *) &s->temp[0][4 * i] = s->pix_bgr_map[code];
766  LAST_SKIP_BITS(re, &s->gb, n);
767  } else {
768  if (decorrelate) {
769  VLC_INTERN(s->temp[0][4 * i + G], s->vlc[1].table,
770  &s->gb, re, VLC_BITS, 3);
771 
772  UPDATE_CACHE(re, &s->gb);
773  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
774  VLC_INTERN(code, s->vlc[0].table, &s->gb, re, VLC_BITS, 3);
775  s->temp[0][4 * i + B] = code + s->temp[0][4 * i + G];
776 
777  UPDATE_CACHE(re, &s->gb);
778  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
779  VLC_INTERN(code, s->vlc[2].table, &s->gb, re, VLC_BITS, 3);
780  s->temp[0][4 * i + R] = code + s->temp[0][4 * i + G];
781  } else {
782  VLC_INTERN(s->temp[0][4 * i + B], s->vlc[0].table,
783  &s->gb, re, VLC_BITS, 3);
784 
785  UPDATE_CACHE(re, &s->gb);
786  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
787  VLC_INTERN(s->temp[0][4 * i + G], s->vlc[1].table,
788  &s->gb, re, VLC_BITS, 3);
789 
790  UPDATE_CACHE(re, &s->gb);
791  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
792  VLC_INTERN(s->temp[0][4 * i + R], s->vlc[2].table,
793  &s->gb, re, VLC_BITS, 3);
794  }
795  }
796  if (alpha) {
797  UPDATE_CACHE(re, &s->gb);
798  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
799  VLC_INTERN(s->temp[0][4 * i + A], s->vlc[2].table,
800  &s->gb, re, VLC_BITS, 3);
801  } else
802  s->temp[0][4 * i + A] = 0;
803  }
804  CLOSE_READER(re, &s->gb);
805 }
806 
807 static void decode_bgr_bitstream(HYuvContext *s, int count)
808 {
809  if (s->decorrelate) {
810  if (s->bitstream_bpp == 24)
811  decode_bgr_1(s, count, 1, 0);
812  else
813  decode_bgr_1(s, count, 1, 1);
814  } else {
815  if (s->bitstream_bpp == 24)
816  decode_bgr_1(s, count, 0, 0);
817  else
818  decode_bgr_1(s, count, 0, 1);
819  }
820 }
821 
822 static void draw_slice(HYuvContext *s, AVFrame *frame, int y)
823 {
824  int h, cy, i;
826 
827  if (!s->avctx->draw_horiz_band)
828  return;
829 
830  h = y - s->last_slice_end;
831  y -= h;
832 
833  if (s->bitstream_bpp == 12)
834  cy = y >> 1;
835  else
836  cy = y;
837 
838  offset[0] = frame->linesize[0] * y;
839  offset[1] = frame->linesize[1] * cy;
840  offset[2] = frame->linesize[2] * cy;
841  for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
842  offset[i] = 0;
843  emms_c();
844 
845  s->avctx->draw_horiz_band(s->avctx, frame, offset, y, 3, h);
846 
847  s->last_slice_end = y + h;
848 }
849 
850 static int left_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int acc)
851 {
852  if (s->bps <= 8) {
853  return s->llviddsp.add_left_pred(dst, src, w, acc);
854  } else {
855  return s->llviddsp.add_left_pred_int16(( uint16_t *)dst, (const uint16_t *)src, s->n-1, w, acc);
856  }
857 }
858 
859 static void add_bytes(HYuvContext *s, uint8_t *dst, uint8_t *src, int w)
860 {
861  if (s->bps <= 8) {
862  s->llviddsp.add_bytes(dst, src, w);
863  } else {
864  s->hdsp.add_int16((uint16_t*)dst, (const uint16_t*)src, s->n - 1, w);
865  }
866 }
867 
868 static void add_median_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, const uint8_t *diff, int w, int *left, int *left_top)
869 {
870  if (s->bps <= 8) {
871  s->llviddsp.add_median_pred(dst, src, diff, w, left, left_top);
872  } else {
873  s->hdsp.add_hfyu_median_pred_int16((uint16_t *)dst, (const uint16_t *)src, (const uint16_t *)diff, s->n-1, w, left, left_top);
874  }
875 }
876 
877 static int decode_slice(AVCodecContext *avctx, AVFrame *p, int height,
878  int buf_size, int y_offset, int table_size)
879 {
880  HYuvContext *s = avctx->priv_data;
881  int fake_ystride, fake_ustride, fake_vstride;
882  const int width = s->width;
883  const int width2 = s->width >> 1;
884  int ret;
885 
886  if ((ret = init_get_bits8(&s->gb, s->bitstream_buffer + table_size, buf_size - table_size)) < 0)
887  return ret;
888 
889  fake_ystride = s->interlaced ? p->linesize[0] * 2 : p->linesize[0];
890  fake_ustride = s->interlaced ? p->linesize[1] * 2 : p->linesize[1];
891  fake_vstride = s->interlaced ? p->linesize[2] * 2 : p->linesize[2];
892 
893  if (s->version > 2) {
894  int plane;
895  for(plane = 0; plane < 1 + 2*s->chroma + s->alpha; plane++) {
896  int left, lefttop, y;
897  int w = width;
898  int h = height;
899  int fake_stride = fake_ystride;
900 
901  if (s->chroma && (plane == 1 || plane == 2)) {
902  w >>= s->chroma_h_shift;
903  h >>= s->chroma_v_shift;
904  fake_stride = plane == 1 ? fake_ustride : fake_vstride;
905  }
906 
907  switch (s->predictor) {
908  case LEFT:
909  case PLANE:
910  decode_plane_bitstream(s, w, plane);
911  left = left_prediction(s, p->data[plane], s->temp[0], w, 0);
912 
913  for (y = 1; y < h; y++) {
914  uint8_t *dst = p->data[plane] + p->linesize[plane]*y;
915 
916  decode_plane_bitstream(s, w, plane);
917  left = left_prediction(s, dst, s->temp[0], w, left);
918  if (s->predictor == PLANE) {
919  if (y > s->interlaced) {
920  add_bytes(s, dst, dst - fake_stride, w);
921  }
922  }
923  }
924 
925  break;
926  case MEDIAN:
927  decode_plane_bitstream(s, w, plane);
928  left= left_prediction(s, p->data[plane], s->temp[0], w, 0);
929 
930  y = 1;
931  if (y >= h)
932  break;
933 
934  /* second line is left predicted for interlaced case */
935  if (s->interlaced) {
936  decode_plane_bitstream(s, w, plane);
937  left = left_prediction(s, p->data[plane] + p->linesize[plane], s->temp[0], w, left);
938  y++;
939  if (y >= h)
940  break;
941  }
942 
943  lefttop = p->data[plane][0];
944  decode_plane_bitstream(s, w, plane);
945  add_median_prediction(s, p->data[plane] + fake_stride, p->data[plane], s->temp[0], w, &left, &lefttop);
946  y++;
947 
948  for (; y<h; y++) {
949  uint8_t *dst;
950 
951  decode_plane_bitstream(s, w, plane);
952 
953  dst = p->data[plane] + p->linesize[plane] * y;
954 
955  add_median_prediction(s, dst, dst - fake_stride, s->temp[0], w, &left, &lefttop);
956  }
957 
958  break;
959  }
960  }
961  draw_slice(s, p, height);
962  } else if (s->bitstream_bpp < 24) {
963  int y, cy;
964  int lefty, leftu, leftv;
965  int lefttopy, lefttopu, lefttopv;
966 
967  if (s->yuy2) {
968  p->data[0][3] = get_bits(&s->gb, 8);
969  p->data[0][2] = get_bits(&s->gb, 8);
970  p->data[0][1] = get_bits(&s->gb, 8);
971  p->data[0][0] = get_bits(&s->gb, 8);
972 
973  av_log(avctx, AV_LOG_ERROR,
974  "YUY2 output is not implemented yet\n");
975  return AVERROR_PATCHWELCOME;
976  } else {
977  leftv =
978  p->data[2][0 + y_offset * p->linesize[2]] = get_bits(&s->gb, 8);
979  lefty =
980  p->data[0][1 + y_offset * p->linesize[0]] = get_bits(&s->gb, 8);
981  leftu =
982  p->data[1][0 + y_offset * p->linesize[1]] = get_bits(&s->gb, 8);
983  p->data[0][0 + y_offset * p->linesize[0]] = get_bits(&s->gb, 8);
984 
985  switch (s->predictor) {
986  case LEFT:
987  case PLANE:
989  lefty = s->llviddsp.add_left_pred(p->data[0] + p->linesize[0] * y_offset + 2, s->temp[0],
990  width - 2, lefty);
991  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
992  leftu = s->llviddsp.add_left_pred(p->data[1] + p->linesize[1] * y_offset + 1, s->temp[1], width2 - 1, leftu);
993  leftv = s->llviddsp.add_left_pred(p->data[2] + p->linesize[2] * y_offset + 1, s->temp[2], width2 - 1, leftv);
994  }
995 
996  for (cy = y = 1; y < height; y++, cy++) {
997  uint8_t *ydst, *udst, *vdst;
998 
999  if (s->bitstream_bpp == 12) {
1001 
1002  ydst = p->data[0] + p->linesize[0] * (y + y_offset);
1003 
1004  lefty = s->llviddsp.add_left_pred(ydst, s->temp[0],
1005  width, lefty);
1006  if (s->predictor == PLANE) {
1007  if (y > s->interlaced)
1008  s->llviddsp.add_bytes(ydst, ydst - fake_ystride, width);
1009  }
1010  y++;
1011  if (y >= height)
1012  break;
1013  }
1014 
1015  draw_slice(s, p, y);
1016 
1017  ydst = p->data[0] + p->linesize[0] * (y + y_offset);
1018  udst = p->data[1] + p->linesize[1] * (cy + y_offset);
1019  vdst = p->data[2] + p->linesize[2] * (cy + y_offset);
1020 
1022  lefty = s->llviddsp.add_left_pred(ydst, s->temp[0],
1023  width, lefty);
1024  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1025  leftu = s->llviddsp.add_left_pred(udst, s->temp[1], width2, leftu);
1026  leftv = s->llviddsp.add_left_pred(vdst, s->temp[2], width2, leftv);
1027  }
1028  if (s->predictor == PLANE) {
1029  if (cy > s->interlaced) {
1030  s->llviddsp.add_bytes(ydst, ydst - fake_ystride, width);
1031  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1032  s->llviddsp.add_bytes(udst, udst - fake_ustride, width2);
1033  s->llviddsp.add_bytes(vdst, vdst - fake_vstride, width2);
1034  }
1035  }
1036  }
1037  }
1038  draw_slice(s, p, height);
1039 
1040  break;
1041  case MEDIAN:
1042  /* first line except first 2 pixels is left predicted */
1044  lefty = s->llviddsp.add_left_pred(p->data[0] + 2, s->temp[0],
1045  width - 2, lefty);
1046  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1047  leftu = s->llviddsp.add_left_pred(p->data[1] + 1, s->temp[1], width2 - 1, leftu);
1048  leftv = s->llviddsp.add_left_pred(p->data[2] + 1, s->temp[2], width2 - 1, leftv);
1049  }
1050 
1051  cy = y = 1;
1052  if (y >= height)
1053  break;
1054 
1055  /* second line is left predicted for interlaced case */
1056  if (s->interlaced) {
1058  lefty = s->llviddsp.add_left_pred(p->data[0] + p->linesize[0],
1059  s->temp[0], width, lefty);
1060  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1061  leftu = s->llviddsp.add_left_pred(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1062  leftv = s->llviddsp.add_left_pred(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1063  }
1064  y++;
1065  cy++;
1066  if (y >= height)
1067  break;
1068  }
1069 
1070  /* next 4 pixels are left predicted too */
1071  decode_422_bitstream(s, 4);
1072  lefty = s->llviddsp.add_left_pred(p->data[0] + fake_ystride,
1073  s->temp[0], 4, lefty);
1074  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1075  leftu = s->llviddsp.add_left_pred(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1076  leftv = s->llviddsp.add_left_pred(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1077  }
1078 
1079  /* next line except the first 4 pixels is median predicted */
1080  lefttopy = p->data[0][3];
1082  s->llviddsp.add_median_pred(p->data[0] + fake_ystride + 4,
1083  p->data[0] + 4, s->temp[0],
1084  width - 4, &lefty, &lefttopy);
1085  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1086  lefttopu = p->data[1][1];
1087  lefttopv = p->data[2][1];
1088  s->llviddsp.add_median_pred(p->data[1] + fake_ustride + 2, p->data[1] + 2, s->temp[1], width2 - 2, &leftu, &lefttopu);
1089  s->llviddsp.add_median_pred(p->data[2] + fake_vstride + 2, p->data[2] + 2, s->temp[2], width2 - 2, &leftv, &lefttopv);
1090  }
1091  y++;
1092  cy++;
1093 
1094  for (; y < height; y++, cy++) {
1095  uint8_t *ydst, *udst, *vdst;
1096 
1097  if (s->bitstream_bpp == 12) {
1098  while (2 * cy > y) {
1100  ydst = p->data[0] + p->linesize[0] * y;
1101  s->llviddsp.add_median_pred(ydst, ydst - fake_ystride,
1102  s->temp[0], width,
1103  &lefty, &lefttopy);
1104  y++;
1105  }
1106  if (y >= height)
1107  break;
1108  }
1109  draw_slice(s, p, y);
1110 
1112 
1113  ydst = p->data[0] + p->linesize[0] * y;
1114  udst = p->data[1] + p->linesize[1] * cy;
1115  vdst = p->data[2] + p->linesize[2] * cy;
1116 
1117  s->llviddsp.add_median_pred(ydst, ydst - fake_ystride,
1118  s->temp[0], width,
1119  &lefty, &lefttopy);
1120  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1121  s->llviddsp.add_median_pred(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1122  s->llviddsp.add_median_pred(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1123  }
1124  }
1125 
1126  draw_slice(s, p, height);
1127  break;
1128  }
1129  }
1130  } else {
1131  int y;
1132  uint8_t left[4];
1133  const int last_line = (y_offset + height - 1) * p->linesize[0];
1134 
1135  if (s->bitstream_bpp == 32) {
1136  left[A] = p->data[0][last_line + A] = get_bits(&s->gb, 8);
1137  left[R] = p->data[0][last_line + R] = get_bits(&s->gb, 8);
1138  left[G] = p->data[0][last_line + G] = get_bits(&s->gb, 8);
1139  left[B] = p->data[0][last_line + B] = get_bits(&s->gb, 8);
1140  } else {
1141  left[R] = p->data[0][last_line + R] = get_bits(&s->gb, 8);
1142  left[G] = p->data[0][last_line + G] = get_bits(&s->gb, 8);
1143  left[B] = p->data[0][last_line + B] = get_bits(&s->gb, 8);
1144  left[A] = p->data[0][last_line + A] = 255;
1145  skip_bits(&s->gb, 8);
1146  }
1147 
1148  if (s->bgr32) {
1149  switch (s->predictor) {
1150  case LEFT:
1151  case PLANE:
1153  s->hdsp.add_hfyu_left_pred_bgr32(p->data[0] + last_line + 4,
1154  s->temp[0], width - 1, left);
1155 
1156  for (y = height - 2; y >= 0; y--) { // Yes it is stored upside down.
1158 
1159  s->hdsp.add_hfyu_left_pred_bgr32(p->data[0] + p->linesize[0] * (y + y_offset),
1160  s->temp[0], width, left);
1161  if (s->predictor == PLANE) {
1162  if (s->bitstream_bpp != 32)
1163  left[A] = 0;
1164  if (y < height - 1 - s->interlaced) {
1165  s->llviddsp.add_bytes(p->data[0] + p->linesize[0] * (y + y_offset),
1166  p->data[0] + p->linesize[0] * (y + y_offset) +
1167  fake_ystride, 4 * width);
1168  }
1169  }
1170  }
1171  // just 1 large slice as this is not possible in reverse order
1172  draw_slice(s, p, height);
1173  break;
1174  default:
1175  av_log(avctx, AV_LOG_ERROR,
1176  "prediction type not supported!\n");
1177  }
1178  } else {
1179  av_log(avctx, AV_LOG_ERROR,
1180  "BGR24 output is not implemented yet\n");
1181  return AVERROR_PATCHWELCOME;
1182  }
1183  }
1184 
1185  return 0;
1186 }
1187 
1188 static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
1189  AVPacket *avpkt)
1190 {
1191  const uint8_t *buf = avpkt->data;
1192  int buf_size = avpkt->size;
1193  HYuvContext *s = avctx->priv_data;
1194  const int width = s->width;
1195  const int height = s->height;
1196  ThreadFrame frame = { .f = data };
1197  AVFrame *const p = data;
1198  int slice, table_size = 0, ret, nb_slices;
1199  unsigned slices_info_offset;
1200  int slice_height;
1201 
1202  if (buf_size < (width * height + 7)/8)
1203  return AVERROR_INVALIDDATA;
1204 
1205  av_fast_padded_malloc(&s->bitstream_buffer,
1206  &s->bitstream_buffer_size,
1207  buf_size);
1208  if (!s->bitstream_buffer)
1209  return AVERROR(ENOMEM);
1210 
1211  s->bdsp.bswap_buf((uint32_t *) s->bitstream_buffer,
1212  (const uint32_t *) buf, buf_size / 4);
1213 
1214  if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
1215  return ret;
1216 
1217  if (s->context) {
1218  table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
1219  if (table_size < 0)
1220  return table_size;
1221  }
1222 
1223  if ((unsigned) (buf_size - table_size) >= INT_MAX / 8)
1224  return AVERROR_INVALIDDATA;
1225 
1226  s->last_slice_end = 0;
1227 
1228  if (avctx->codec_id == AV_CODEC_ID_HYMT &&
1229  (buf_size > 32 && AV_RL32(avpkt->data + buf_size - 16) == 0)) {
1230  slices_info_offset = AV_RL32(avpkt->data + buf_size - 4);
1231  slice_height = AV_RL32(avpkt->data + buf_size - 8);
1232  nb_slices = AV_RL32(avpkt->data + buf_size - 12);
1233  if (nb_slices * 8LL + slices_info_offset > buf_size - 16 ||
1234  s->chroma_v_shift ||
1235  slice_height <= 0 || nb_slices * (uint64_t)slice_height > height)
1236  return AVERROR_INVALIDDATA;
1237  } else {
1238  slice_height = height;
1239  nb_slices = 1;
1240  }
1241 
1242  for (slice = 0; slice < nb_slices; slice++) {
1243  int y_offset, slice_offset, slice_size;
1244 
1245  if (nb_slices > 1) {
1246  slice_offset = AV_RL32(avpkt->data + slices_info_offset + slice * 8);
1247  slice_size = AV_RL32(avpkt->data + slices_info_offset + slice * 8 + 4);
1248 
1249  if (slice_offset < 0 || slice_size <= 0 || (slice_offset&3) ||
1250  slice_offset + (int64_t)slice_size > buf_size)
1251  return AVERROR_INVALIDDATA;
1252 
1253  y_offset = height - (slice + 1) * slice_height;
1254  s->bdsp.bswap_buf((uint32_t *)s->bitstream_buffer,
1255  (const uint32_t *)(buf + slice_offset), slice_size / 4);
1256  } else {
1257  y_offset = 0;
1258  slice_offset = 0;
1259  slice_size = buf_size;
1260  }
1261 
1262  ret = decode_slice(avctx, p, slice_height, slice_size, y_offset, table_size);
1263  emms_c();
1264  if (ret < 0)
1265  return ret;
1266  }
1267 
1268  *got_frame = 1;
1269 
1270  return (get_bits_count(&s->gb) + 31) / 32 * 4 + table_size;
1271 }
1272 
1274  .name = "huffyuv",
1275  .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1276  .type = AVMEDIA_TYPE_VIDEO,
1277  .id = AV_CODEC_ID_HUFFYUV,
1278  .priv_data_size = sizeof(HYuvContext),
1279  .init = decode_init,
1280  .close = decode_end,
1281  .decode = decode_frame,
1284 };
1285 
1286 #if CONFIG_FFVHUFF_DECODER
1288  .name = "ffvhuff",
1289  .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1290  .type = AVMEDIA_TYPE_VIDEO,
1291  .id = AV_CODEC_ID_FFVHUFF,
1292  .priv_data_size = sizeof(HYuvContext),
1293  .init = decode_init,
1294  .close = decode_end,
1295  .decode = decode_frame,
1298 };
1299 #endif /* CONFIG_FFVHUFF_DECODER */
1300 
1301 #if CONFIG_HYMT_DECODER
1303  .name = "hymt",
1304  .long_name = NULL_IF_CONFIG_SMALL("HuffYUV MT"),
1305  .type = AVMEDIA_TYPE_VIDEO,
1306  .id = AV_CODEC_ID_HYMT,
1307  .priv_data_size = sizeof(HYuvContext),
1308  .init = decode_init,
1309  .close = decode_end,
1310  .decode = decode_frame,
1313 };
1314 #endif /* CONFIG_HYMT_DECODER */
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:30
VLC_INTERN
#define VLC_INTERN(dst, table, gb, name, bits, max_depth)
Subset of GET_VLC for use in hand-roller VLC code.
Definition: huffyuvdec.c:574
add_bytes
static void add_bytes(HYuvContext *s, uint8_t *dst, uint8_t *src, int w)
Definition: huffyuvdec.c:859
AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:442
HYuvContext
Definition: huffyuv.h:55
AVCodec
AVCodec.
Definition: codec.h:197
generate_joint_tables
static int generate_joint_tables(HYuvContext *s)
Definition: huffyuvdec.c:118
decorrelate
static void decorrelate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median)
Definition: snowenc.c:1330
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
r
const char * r
Definition: vf_curves.c:116
acc
int acc
Definition: yuv2rgb.c:555
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AV_CODEC_ID_HUFFYUV
@ AV_CODEC_ID_HUFFYUV
Definition: codec_id.h:74
out
FILE * out
Definition: movenc.c:54
AV_CODEC_ID_HYMT
@ AV_CODEC_ID_HYMT
Definition: codec_id.h:291
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:264
MAX_VLC_N
#define MAX_VLC_N
Definition: huffyuv.h:47
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
AV_PIX_FMT_YUVA422P9
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:434
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
pixdesc.h
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:441
w
uint8_t w
Definition: llviddspenc.c:39
decode_gray_bitstream
static void decode_gray_bitstream(HYuvContext *s, int count)
Definition: huffyuvdec.c:731
R
#define R
Definition: huffyuvdsp.h:34
AVPacket::data
uint8_t * data
Definition: packet.h:369
huffyuvdsp.h
AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:436
init_vlc
#define init_vlc(vlc, nb_bits, nb_codes, bits, bits_wrap, bits_size, codes, codes_wrap, codes_size, flags)
Definition: vlc.h:38
b
#define b
Definition: input.c:41
READ_2PIX
#define READ_2PIX(dst0, dst1, plane1)
Definition: huffyuvdec.c:621
classic_add_luma
static const unsigned char classic_add_luma[256]
Definition: huffyuvdec.c:61
data
const char data[16]
Definition: mxf.c:142
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:399
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:797
MEDIAN
@ MEDIAN
Definition: huffyuv.h:52
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
decode_init
static av_cold int decode_init(AVCodecContext *avctx)
Definition: huffyuvdec.c:291
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:178
OP8bits
#define OP8bits(dst0, dst1, code)
Definition: huffyuvdec.c:619
OP14bits
#define OP14bits(dst0, dst1, code)
Definition: huffyuvdec.c:660
AV_PIX_FMT_YUVA422P10
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:437
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
thread.h
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:332
add_median_prediction
static void add_median_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, const uint8_t *diff, int w, int *left, int *left_top)
Definition: huffyuvdec.c:868
ff_huffyuv_alloc_temp
av_cold int ff_huffyuv_alloc_temp(HYuvContext *s)
Definition: huffyuv.c:58
A
#define A(x)
Definition: vp56_arith.h:28
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
AV_PIX_FMT_YUVA420P9
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:433
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:417
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:415
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:443
GetBitContext
Definition: get_bits.h:61
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:397
classic_shift_chroma_table_size
#define classic_shift_chroma_table_size
Definition: huffyuvdec.c:52
val
static double val(void *priv, double ch)
Definition: aeval.c:76
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2601
classic_shift_luma
static const unsigned char classic_shift_luma[classic_shift_luma_table_size+AV_INPUT_BUFFER_PADDING_SIZE]
Definition: huffyuvdec.c:45
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:383
decode_bgr_1
static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha)
Definition: huffyuvdec.c:749
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:402
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
av_cold
#define av_cold
Definition: attributes.h:90
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:411
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
read_huffman_tables
static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length)
Definition: huffyuvdec.c:208
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:149
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:638
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:412
draw_slice
static void draw_slice(HYuvContext *s, AVFrame *frame, int y)
Definition: huffyuvdec.c:822
g
const char * g
Definition: vf_curves.c:117
bits
uint8_t bits
Definition: vp3data.h:141
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:396
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:410
get_bits.h
ff_free_vlc
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:431
AV_CODEC_ID_FFVHUFF
@ AV_CODEC_ID_FFVHUFF
Definition: codec_id.h:116
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:546
if
if(ret)
Definition: filter_design.txt:179
ff_huffyuv_common_end
av_cold void ff_huffyuv_common_end(HYuvContext *s)
Definition: huffyuv.c:86
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:108
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:418
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
AV_PIX_FMT_YUYV422
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:67
src
#define src
Definition: vp8dsp.c:255
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:199
ff_huffyuv_generate_bits_table
int ff_huffyuv_generate_bits_table(uint32_t *dst, const uint8_t *len_table, int n)
Definition: huffyuv.c:39
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:400
ff_init_vlc_sparse
int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
Definition: bitstream.c:323
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
VLC_BITS
#define VLC_BITS
Definition: cfhd.h:98
READ_2PIX_PLANE16
#define READ_2PIX_PLANE16(dst0, dst1, plane)
Definition: huffyuvdec.c:664
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:414
index
int index
Definition: gxfenc.c:89
READ_2PIX_PLANE
#define READ_2PIX_PLANE(dst0, dst1, plane, OP)
Definition: huffyuvdec.c:655
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
left_prediction
static int left_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int acc)
Definition: huffyuvdec.c:850
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:308
AVPacket::size
int size
Definition: packet.h:370
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:404
AV_NUM_DATA_POINTERS
#define AV_NUM_DATA_POINTERS
Definition: frame.h:319
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:406
decode_end
static av_cold int decode_end(AVCodecContext *avctx)
Definition: huffyuvdec.c:277
LEFT
#define LEFT
Definition: cdgraphics.c:166
ff_huffyuvdsp_init
av_cold void ff_huffyuvdsp_init(HuffYUVDSPContext *c, enum AVPixelFormat pix_fmt)
Definition: huffyuvdsp.c:83
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:138
height
#define height
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:372
FFMIN
#define FFMIN(a, b)
Definition: common.h:105
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
classic_add_chroma
static const unsigned char classic_add_chroma[256]
Definition: huffyuvdec.c:80
AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:438
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
decode_slice
static int decode_slice(AVCodecContext *avctx, AVFrame *p, int height, int buf_size, int y_offset, int table_size)
Definition: huffyuvdec.c:877
decode_plane_bitstream
static void decode_plane_bitstream(HYuvContext *s, int width, int plane)
Definition: huffyuvdec.c:670
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1740
interlaced
uint8_t interlaced
Definition: mxfenc.c:2208
i
int i
Definition: input.c:407
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:637
ff_hymt_decoder
AVCodec ff_hymt_decoder
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:416
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:50
av_always_inline
#define av_always_inline
Definition: attributes.h:49
uint8_t
uint8_t
Definition: audio_convert.c:194
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:204
len
int len
Definition: vorbis_enc_data.h:452
PLANE
@ PLANE
Definition: huffyuv.h:51
AVCodecContext::height
int height
Definition: avcodec.h:709
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:746
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:398
avcodec.h
G
#define G
Definition: huffyuvdsp.h:33
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AV_PIX_FMT_0RGB32
#define AV_PIX_FMT_0RGB32
Definition: pixfmt.h:376
decode_bgr_bitstream
static void decode_bgr_bitstream(HYuvContext *s, int count)
Definition: huffyuvdec.c:807
classic_shift_chroma
static const unsigned char classic_shift_chroma[classic_shift_chroma_table_size+AV_INPUT_BUFFER_PADDING_SIZE]
Definition: huffyuvdec.c:53
AV_PIX_FMT_YUVA444P9
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:435
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:403
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: avcodec.h:215
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
classic_shift_luma_table_size
#define classic_shift_luma_table_size
Definition: huffyuvdec.c:44
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
BITS_LEFT
#define BITS_LEFT(name, gb)
Definition: get_bits.h:191
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:408
B
#define B
Definition: huffyuvdsp.h:32
ff_llviddsp_init
void ff_llviddsp_init(LLVidDSPContext *c)
Definition: lossless_videodsp.c:112
decode_422_bitstream
static void decode_422_bitstream(HYuvContext *s, int count)
Definition: huffyuvdec.c:626
AVCodecContext
main external API structure.
Definition: avcodec.h:536
ThreadFrame
Definition: thread.h:34
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:211
VLC
Definition: vlc.h:26
sign_extend
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:130
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
lossless_videodsp.h
map
const VDPAUPixFmtMap * map
Definition: hwcontext_vdpau.c:71
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:136
ff_ffvhuff_decoder
AVCodec ff_ffvhuff_decoder
alpha
static const int16_t alpha[]
Definition: ilbcdata.h:55
ff_huffyuv_common_init
av_cold void ff_huffyuv_common_init(AVCodecContext *avctx)
Definition: huffyuv.c:71
decode_frame
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: huffyuvdec.c:1188
AVPacket
This structure stores compressed data.
Definition: packet.h:346
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:563
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
read_len_table
static int read_len_table(uint8_t *dst, GetBitContext *gb, int n)
Definition: huffyuvdec.c:99
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:709
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:349
AV_CODEC_CAP_DRAW_HORIZ_BAND
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: codec.h:44
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
ff_huffyuv_decoder
AVCodec ff_huffyuv_decoder
Definition: huffyuvdec.c:1273
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
h
h
Definition: vp9dsp_template.c:2038
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:409
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:317
huffyuv.h
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:176
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:407
re
float re
Definition: fft.c:82
read_old_huffman_tables
static int read_old_huffman_tables(HYuvContext *s)
Definition: huffyuvdec.c:237