FFmpeg
huffyuvdec.c
Go to the documentation of this file.
1 /*
2  * huffyuv decoder
3  *
4  * Copyright (c) 2002-2014 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
7  * the algorithm used
8  *
9  * This file is part of FFmpeg.
10  *
11  * FFmpeg is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public
13  * License as published by the Free Software Foundation; either
14  * version 2.1 of the License, or (at your option) any later version.
15  *
16  * FFmpeg is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with FFmpeg; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24  *
25  * yuva, gray, 4:4:4, 4:1:1, 4:1:0 and >8 bit per sample support sponsored by NOA
26  */
27 
28 /**
29  * @file
30  * huffyuv decoder
31  */
32 
33 #define UNCHECKED_BITSTREAM_READER 1
34 
35 #include "avcodec.h"
36 #include "get_bits.h"
37 #include "huffyuv.h"
38 #include "huffyuvdsp.h"
39 #include "internal.h"
40 #include "lossless_videodsp.h"
41 #include "thread.h"
42 #include "libavutil/imgutils.h"
43 #include "libavutil/pixdesc.h"
44 
45 #define classic_shift_luma_table_size 42
47  34, 36, 35, 69, 135, 232, 9, 16, 10, 24, 11, 23, 12, 16, 13, 10,
48  14, 8, 15, 8, 16, 8, 17, 20, 16, 10, 207, 206, 205, 236, 11, 8,
49  10, 21, 9, 23, 8, 8, 199, 70, 69, 68, 0,
50  0,0,0,0,0,0,0,0,
51 };
52 
53 #define classic_shift_chroma_table_size 59
55  66, 36, 37, 38, 39, 40, 41, 75, 76, 77, 110, 239, 144, 81, 82, 83,
56  84, 85, 118, 183, 56, 57, 88, 89, 56, 89, 154, 57, 58, 57, 26, 141,
57  57, 56, 58, 57, 58, 57, 184, 119, 214, 245, 116, 83, 82, 49, 80, 79,
58  78, 77, 44, 75, 41, 40, 39, 38, 37, 36, 34, 0,
59  0,0,0,0,0,0,0,0,
60 };
61 
62 static const unsigned char classic_add_luma[256] = {
63  3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
64  73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
65  68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
66  35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
67  37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
68  35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
69  27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
70  15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
71  12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
72  12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
73  18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
74  28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
75  28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
76  62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
77  54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
78  46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
79 };
80 
81 static const unsigned char classic_add_chroma[256] = {
82  3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
83  7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
84  11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
85  43, 45, 76, 81, 46, 82, 75, 55, 56, 144, 58, 80, 60, 74, 147, 63,
86  143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
87  80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
88  17, 14, 5, 6, 100, 54, 47, 50, 51, 53, 106, 107, 108, 109, 110, 111,
89  112, 113, 114, 115, 4, 117, 118, 92, 94, 121, 122, 3, 124, 103, 2, 1,
90  0, 129, 130, 131, 120, 119, 126, 125, 136, 137, 138, 139, 140, 141, 142, 134,
91  135, 132, 133, 104, 64, 101, 62, 57, 102, 95, 93, 59, 61, 28, 97, 96,
92  52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
93  19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
94  7, 128, 127, 105, 123, 116, 35, 34, 33, 145, 31, 79, 42, 146, 78, 26,
95  83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
96  14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
97  6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
98 };
99 
100 static int read_len_table(uint8_t *dst, GetBitContext *gb, int n)
101 {
102  int i, val, repeat;
103 
104  for (i = 0; i < n;) {
105  repeat = get_bits(gb, 3);
106  val = get_bits(gb, 5);
107  if (repeat == 0)
108  repeat = get_bits(gb, 8);
109  if (i + repeat > n || get_bits_left(gb) < 0) {
110  av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
111  return AVERROR_INVALIDDATA;
112  }
113  while (repeat--)
114  dst[i++] = val;
115  }
116  return 0;
117 }
118 
120 {
121  int ret;
122  uint16_t *symbols = av_mallocz(5 << VLC_BITS);
123  uint16_t *bits;
124  uint8_t *len;
125  if (!symbols)
126  return AVERROR(ENOMEM);
127  bits = symbols + (1 << VLC_BITS);
128  len = (uint8_t *)(bits + (1 << VLC_BITS));
129 
130  if (s->bitstream_bpp < 24 || s->version > 2) {
131  int p, i, y, u;
132  for (p = 0; p < 4; p++) {
133  int p0 = s->version > 2 ? p : 0;
134  for (i = y = 0; y < s->vlc_n; y++) {
135  int len0 = s->len[p0][y];
136  int limit = VLC_BITS - len0;
137  if (limit <= 0 || !len0)
138  continue;
139  if ((sign_extend(y, 8) & (s->vlc_n-1)) != y)
140  continue;
141  for (u = 0; u < s->vlc_n; u++) {
142  int len1 = s->len[p][u];
143  if (len1 > limit || !len1)
144  continue;
145  if ((sign_extend(u, 8) & (s->vlc_n-1)) != u)
146  continue;
147  av_assert0(i < (1 << VLC_BITS));
148  len[i] = len0 + len1;
149  bits[i] = (s->bits[p0][y] << len1) + s->bits[p][u];
150  symbols[i] = (y << 8) + (u & 0xFF);
151  i++;
152  }
153  }
154  ff_free_vlc(&s->vlc[4 + p]);
155  if ((ret = ff_init_vlc_sparse(&s->vlc[4 + p], VLC_BITS, i, len, 1, 1,
156  bits, 2, 2, symbols, 2, 2, 0)) < 0)
157  goto out;
158  }
159  } else {
160  uint8_t (*map)[4] = (uint8_t(*)[4]) s->pix_bgr_map;
161  int i, b, g, r, code;
162  int p0 = s->decorrelate;
163  int p1 = !s->decorrelate;
164  /* Restrict the range to +/-16 because that's pretty much guaranteed
165  * to cover all the combinations that fit in 11 bits total, and it
166  * does not matter if we miss a few rare codes. */
167  for (i = 0, g = -16; g < 16; g++) {
168  int len0 = s->len[p0][g & 255];
169  int limit0 = VLC_BITS - len0;
170  if (limit0 < 2 || !len0)
171  continue;
172  for (b = -16; b < 16; b++) {
173  int len1 = s->len[p1][b & 255];
174  int limit1 = limit0 - len1;
175  if (limit1 < 1 || !len1)
176  continue;
177  code = (s->bits[p0][g & 255] << len1) + s->bits[p1][b & 255];
178  for (r = -16; r < 16; r++) {
179  int len2 = s->len[2][r & 255];
180  if (len2 > limit1 || !len2)
181  continue;
182  av_assert0(i < (1 << VLC_BITS));
183  len[i] = len0 + len1 + len2;
184  bits[i] = (code << len2) + s->bits[2][r & 255];
185  if (s->decorrelate) {
186  map[i][G] = g;
187  map[i][B] = g + b;
188  map[i][R] = g + r;
189  } else {
190  map[i][B] = g;
191  map[i][G] = b;
192  map[i][R] = r;
193  }
194  i++;
195  }
196  }
197  }
198  ff_free_vlc(&s->vlc[4]);
199  if ((ret = init_vlc(&s->vlc[4], VLC_BITS, i, len, 1, 1,
200  bits, 2, 2, 0)) < 0)
201  goto out;
202  }
203  ret = 0;
204 out:
205  av_freep(&symbols);
206  return ret;
207 }
208 
209 static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length)
210 {
211  GetBitContext gb;
212  int i, ret;
213  int count = 3;
214 
215  if ((ret = init_get_bits(&gb, src, length * 8)) < 0)
216  return ret;
217 
218  if (s->version > 2)
219  count = 1 + s->alpha + 2*s->chroma;
220 
221  for (i = 0; i < count; i++) {
222  if ((ret = read_len_table(s->len[i], &gb, s->vlc_n)) < 0)
223  return ret;
224  if ((ret = ff_huffyuv_generate_bits_table(s->bits[i], s->len[i], s->vlc_n)) < 0)
225  return ret;
226  ff_free_vlc(&s->vlc[i]);
227  if ((ret = init_vlc(&s->vlc[i], VLC_BITS, s->vlc_n, s->len[i], 1, 1,
228  s->bits[i], 4, 4, 0)) < 0)
229  return ret;
230  }
231 
232  if ((ret = generate_joint_tables(s)) < 0)
233  return ret;
234 
235  return (get_bits_count(&gb) + 7) / 8;
236 }
237 
239 {
240  GetBitContext gb;
241  int i, ret;
242 
245  if ((ret = read_len_table(s->len[0], &gb, 256)) < 0)
246  return ret;
247 
250  if ((ret = read_len_table(s->len[1], &gb, 256)) < 0)
251  return ret;
252 
253  for (i = 0; i < 256; i++)
254  s->bits[0][i] = classic_add_luma[i];
255  for (i = 0; i < 256; i++)
256  s->bits[1][i] = classic_add_chroma[i];
257 
258  if (s->bitstream_bpp >= 24) {
259  memcpy(s->bits[1], s->bits[0], 256 * sizeof(uint32_t));
260  memcpy(s->len[1], s->len[0], 256 * sizeof(uint8_t));
261  }
262  memcpy(s->bits[2], s->bits[1], 256 * sizeof(uint32_t));
263  memcpy(s->len[2], s->len[1], 256 * sizeof(uint8_t));
264 
265  for (i = 0; i < 4; i++) {
266  ff_free_vlc(&s->vlc[i]);
267  if ((ret = init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1,
268  s->bits[i], 4, 4, 0)) < 0)
269  return ret;
270  }
271 
272  if ((ret = generate_joint_tables(s)) < 0)
273  return ret;
274 
275  return 0;
276 }
277 
279 {
280  HYuvContext *s = avctx->priv_data;
281  int i;
282 
284  av_freep(&s->bitstream_buffer);
285 
286  for (i = 0; i < 8; i++)
287  ff_free_vlc(&s->vlc[i]);
288 
289  return 0;
290 }
291 
293 {
294  HYuvContext *s = avctx->priv_data;
295  int ret;
296 
297  ret = av_image_check_size(avctx->width, avctx->height, 0, avctx);
298  if (ret < 0)
299  return ret;
300 
301  ff_huffyuvdsp_init(&s->hdsp, avctx->pix_fmt);
302  ff_llviddsp_init(&s->llviddsp);
303  memset(s->vlc, 0, 4 * sizeof(VLC));
304 
305  s->interlaced = avctx->height > 288;
306  s->bgr32 = 1;
307 
308  if (avctx->extradata_size) {
309  if ((avctx->bits_per_coded_sample & 7) &&
310  avctx->bits_per_coded_sample != 12)
311  s->version = 1; // do such files exist at all?
312  else if (avctx->extradata_size > 3 && avctx->extradata[3] == 0)
313  s->version = 2;
314  else
315  s->version = 3;
316  } else
317  s->version = 0;
318 
319  s->bps = 8;
320  s->n = 1<<s->bps;
321  s->vlc_n = FFMIN(s->n, MAX_VLC_N);
322  s->chroma = 1;
323  if (s->version >= 2) {
324  int method, interlace;
325 
326  if (avctx->extradata_size < 4)
327  return AVERROR_INVALIDDATA;
328 
329  method = avctx->extradata[0];
330  s->decorrelate = method & 64 ? 1 : 0;
331  s->predictor = method & 63;
332  if (s->version == 2) {
333  s->bitstream_bpp = avctx->extradata[1];
334  if (s->bitstream_bpp == 0)
335  s->bitstream_bpp = avctx->bits_per_coded_sample & ~7;
336  } else {
337  s->bps = (avctx->extradata[1] >> 4) + 1;
338  s->n = 1<<s->bps;
339  s->vlc_n = FFMIN(s->n, MAX_VLC_N);
340  s->chroma_h_shift = avctx->extradata[1] & 3;
341  s->chroma_v_shift = (avctx->extradata[1] >> 2) & 3;
342  s->yuv = !!(avctx->extradata[2] & 1);
343  s->chroma= !!(avctx->extradata[2] & 3);
344  s->alpha = !!(avctx->extradata[2] & 4);
345  }
346  interlace = (avctx->extradata[2] & 0x30) >> 4;
347  s->interlaced = (interlace == 1) ? 1 : (interlace == 2) ? 0 : s->interlaced;
348  s->context = avctx->extradata[2] & 0x40 ? 1 : 0;
349 
350  if ((ret = read_huffman_tables(s, avctx->extradata + 4,
351  avctx->extradata_size - 4)) < 0)
352  return ret;
353  } else {
354  switch (avctx->bits_per_coded_sample & 7) {
355  case 1:
356  s->predictor = LEFT;
357  s->decorrelate = 0;
358  break;
359  case 2:
360  s->predictor = LEFT;
361  s->decorrelate = 1;
362  break;
363  case 3:
364  s->predictor = PLANE;
365  s->decorrelate = avctx->bits_per_coded_sample >= 24;
366  break;
367  case 4:
368  s->predictor = MEDIAN;
369  s->decorrelate = 0;
370  break;
371  default:
372  s->predictor = LEFT; // OLD
373  s->decorrelate = 0;
374  break;
375  }
376  s->bitstream_bpp = avctx->bits_per_coded_sample & ~7;
377  s->context = 0;
378 
379  if ((ret = read_old_huffman_tables(s)) < 0)
380  return ret;
381  }
382 
383  if (s->version <= 2) {
384  switch (s->bitstream_bpp) {
385  case 12:
386  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
387  s->yuv = 1;
388  break;
389  case 16:
390  if (s->yuy2)
391  avctx->pix_fmt = AV_PIX_FMT_YUYV422;
392  else
393  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
394  s->yuv = 1;
395  break;
396  case 24:
397  if (s->bgr32)
398  avctx->pix_fmt = AV_PIX_FMT_0RGB32;
399  else
400  avctx->pix_fmt = AV_PIX_FMT_BGR24;
401  break;
402  case 32:
403  av_assert0(s->bgr32);
404  avctx->pix_fmt = AV_PIX_FMT_RGB32;
405  s->alpha = 1;
406  break;
407  default:
408  return AVERROR_INVALIDDATA;
409  }
411  &s->chroma_h_shift,
412  &s->chroma_v_shift);
413  } else {
414  switch ( (s->chroma<<10) | (s->yuv<<9) | (s->alpha<<8) | ((s->bps-1)<<4) | s->chroma_h_shift | (s->chroma_v_shift<<2)) {
415  case 0x070:
416  avctx->pix_fmt = AV_PIX_FMT_GRAY8;
417  break;
418  case 0x0F0:
419  avctx->pix_fmt = AV_PIX_FMT_GRAY16;
420  break;
421  case 0x470:
422  avctx->pix_fmt = AV_PIX_FMT_GBRP;
423  break;
424  case 0x480:
425  avctx->pix_fmt = AV_PIX_FMT_GBRP9;
426  break;
427  case 0x490:
428  avctx->pix_fmt = AV_PIX_FMT_GBRP10;
429  break;
430  case 0x4B0:
431  avctx->pix_fmt = AV_PIX_FMT_GBRP12;
432  break;
433  case 0x4D0:
434  avctx->pix_fmt = AV_PIX_FMT_GBRP14;
435  break;
436  case 0x4F0:
437  avctx->pix_fmt = AV_PIX_FMT_GBRP16;
438  break;
439  case 0x570:
440  avctx->pix_fmt = AV_PIX_FMT_GBRAP;
441  break;
442  case 0x670:
443  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
444  break;
445  case 0x680:
446  avctx->pix_fmt = AV_PIX_FMT_YUV444P9;
447  break;
448  case 0x690:
449  avctx->pix_fmt = AV_PIX_FMT_YUV444P10;
450  break;
451  case 0x6B0:
452  avctx->pix_fmt = AV_PIX_FMT_YUV444P12;
453  break;
454  case 0x6D0:
455  avctx->pix_fmt = AV_PIX_FMT_YUV444P14;
456  break;
457  case 0x6F0:
458  avctx->pix_fmt = AV_PIX_FMT_YUV444P16;
459  break;
460  case 0x671:
461  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
462  break;
463  case 0x681:
464  avctx->pix_fmt = AV_PIX_FMT_YUV422P9;
465  break;
466  case 0x691:
467  avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
468  break;
469  case 0x6B1:
470  avctx->pix_fmt = AV_PIX_FMT_YUV422P12;
471  break;
472  case 0x6D1:
473  avctx->pix_fmt = AV_PIX_FMT_YUV422P14;
474  break;
475  case 0x6F1:
476  avctx->pix_fmt = AV_PIX_FMT_YUV422P16;
477  break;
478  case 0x672:
479  avctx->pix_fmt = AV_PIX_FMT_YUV411P;
480  break;
481  case 0x674:
482  avctx->pix_fmt = AV_PIX_FMT_YUV440P;
483  break;
484  case 0x675:
485  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
486  break;
487  case 0x685:
488  avctx->pix_fmt = AV_PIX_FMT_YUV420P9;
489  break;
490  case 0x695:
491  avctx->pix_fmt = AV_PIX_FMT_YUV420P10;
492  break;
493  case 0x6B5:
494  avctx->pix_fmt = AV_PIX_FMT_YUV420P12;
495  break;
496  case 0x6D5:
497  avctx->pix_fmt = AV_PIX_FMT_YUV420P14;
498  break;
499  case 0x6F5:
500  avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
501  break;
502  case 0x67A:
503  avctx->pix_fmt = AV_PIX_FMT_YUV410P;
504  break;
505  case 0x770:
506  avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
507  break;
508  case 0x780:
509  avctx->pix_fmt = AV_PIX_FMT_YUVA444P9;
510  break;
511  case 0x790:
513  break;
514  case 0x7F0:
516  break;
517  case 0x771:
518  avctx->pix_fmt = AV_PIX_FMT_YUVA422P;
519  break;
520  case 0x781:
521  avctx->pix_fmt = AV_PIX_FMT_YUVA422P9;
522  break;
523  case 0x791:
525  break;
526  case 0x7F1:
528  break;
529  case 0x775:
530  avctx->pix_fmt = AV_PIX_FMT_YUVA420P;
531  break;
532  case 0x785:
533  avctx->pix_fmt = AV_PIX_FMT_YUVA420P9;
534  break;
535  case 0x795:
537  break;
538  case 0x7F5:
540  break;
541  default:
542  return AVERROR_INVALIDDATA;
543  }
544  }
545 
546  ff_huffyuv_common_init(avctx);
547 
548  if ((avctx->pix_fmt == AV_PIX_FMT_YUV422P || avctx->pix_fmt == AV_PIX_FMT_YUV420P) && avctx->width & 1) {
549  av_log(avctx, AV_LOG_ERROR, "width must be even for this colorspace\n");
550  return AVERROR_INVALIDDATA;
551  }
552  if (s->predictor == MEDIAN && avctx->pix_fmt == AV_PIX_FMT_YUV422P &&
553  avctx->width % 4) {
554  av_log(avctx, AV_LOG_ERROR, "width must be a multiple of 4 "
555  "for this combination of colorspace and predictor type.\n");
556  return AVERROR_INVALIDDATA;
557  }
558 
559  if ((ret = ff_huffyuv_alloc_temp(s)) < 0)
560  return ret;
561 
562  return 0;
563 }
564 
565 /** Subset of GET_VLC for use in hand-roller VLC code */
566 #define VLC_INTERN(dst, table, gb, name, bits, max_depth) \
567  code = table[index][0]; \
568  n = table[index][1]; \
569  if (max_depth > 1 && n < 0) { \
570  LAST_SKIP_BITS(name, gb, bits); \
571  UPDATE_CACHE(name, gb); \
572  \
573  nb_bits = -n; \
574  index = SHOW_UBITS(name, gb, nb_bits) + code; \
575  code = table[index][0]; \
576  n = table[index][1]; \
577  if (max_depth > 2 && n < 0) { \
578  LAST_SKIP_BITS(name, gb, nb_bits); \
579  UPDATE_CACHE(name, gb); \
580  \
581  nb_bits = -n; \
582  index = SHOW_UBITS(name, gb, nb_bits) + code; \
583  code = table[index][0]; \
584  n = table[index][1]; \
585  } \
586  } \
587  dst = code; \
588  LAST_SKIP_BITS(name, gb, n)
589 
590 
591 #define GET_VLC_DUAL(dst0, dst1, name, gb, dtable, table1, table2, \
592  bits, max_depth, OP) \
593  do { \
594  unsigned int index = SHOW_UBITS(name, gb, bits); \
595  int code, n = dtable[index][1]; \
596  \
597  if (n<=0) { \
598  int nb_bits; \
599  VLC_INTERN(dst0, table1, gb, name, bits, max_depth); \
600  \
601  UPDATE_CACHE(re, gb); \
602  index = SHOW_UBITS(name, gb, bits); \
603  VLC_INTERN(dst1, table2, gb, name, bits, max_depth); \
604  } else { \
605  code = dtable[index][0]; \
606  OP(dst0, dst1, code); \
607  LAST_SKIP_BITS(name, gb, n); \
608  } \
609  } while (0)
610 
611 #define OP8bits(dst0, dst1, code) dst0 = code>>8; dst1 = code
612 
613 #define READ_2PIX(dst0, dst1, plane1) \
614  UPDATE_CACHE(re, &s->gb); \
615  GET_VLC_DUAL(dst0, dst1, re, &s->gb, s->vlc[4+plane1].table, \
616  s->vlc[0].table, s->vlc[plane1].table, VLC_BITS, 3, OP8bits)
617 
618 static void decode_422_bitstream(HYuvContext *s, int count)
619 {
620  int i, icount;
621  OPEN_READER(re, &s->gb);
622  count /= 2;
623 
624  icount = get_bits_left(&s->gb) / (32 * 4);
625  if (count >= icount) {
626  for (i = 0; i < icount; i++) {
627  READ_2PIX(s->temp[0][2 * i], s->temp[1][i], 1);
628  READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
629  }
630  for (; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
631  READ_2PIX(s->temp[0][2 * i ], s->temp[1][i], 1);
632  if (BITS_LEFT(re, &s->gb) <= 0) break;
633  READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
634  }
635  for (; i < count; i++)
636  s->temp[0][2 * i ] = s->temp[1][i] =
637  s->temp[0][2 * i + 1] = s->temp[2][i] = 0;
638  } else {
639  for (i = 0; i < count; i++) {
640  READ_2PIX(s->temp[0][2 * i], s->temp[1][i], 1);
641  READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
642  }
643  }
644  CLOSE_READER(re, &s->gb);
645 }
646 
647 #define READ_2PIX_PLANE(dst0, dst1, plane, OP) \
648  UPDATE_CACHE(re, &s->gb); \
649  GET_VLC_DUAL(dst0, dst1, re, &s->gb, s->vlc[4+plane].table, \
650  s->vlc[plane].table, s->vlc[plane].table, VLC_BITS, 3, OP)
651 
652 #define OP14bits(dst0, dst1, code) dst0 = code>>8; dst1 = sign_extend(code, 8)
653 
654 /* TODO instead of restarting the read when the code isn't in the first level
655  * of the joint table, jump into the 2nd level of the individual table. */
656 #define READ_2PIX_PLANE16(dst0, dst1, plane){\
657  dst0 = get_vlc2(&s->gb, s->vlc[plane].table, VLC_BITS, 3)<<2;\
658  dst0 += get_bits(&s->gb, 2);\
659  dst1 = get_vlc2(&s->gb, s->vlc[plane].table, VLC_BITS, 3)<<2;\
660  dst1 += get_bits(&s->gb, 2);\
661 }
662 static void decode_plane_bitstream(HYuvContext *s, int width, int plane)
663 {
664  int i, count = width/2;
665 
666  if (s->bps <= 8) {
667  OPEN_READER(re, &s->gb);
668  if (count >= (get_bits_left(&s->gb)) / (32 * 2)) {
669  for (i = 0; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
670  READ_2PIX_PLANE(s->temp[0][2 * i], s->temp[0][2 * i + 1], plane, OP8bits);
671  }
672  } else {
673  for(i=0; i<count; i++){
674  READ_2PIX_PLANE(s->temp[0][2 * i], s->temp[0][2 * i + 1], plane, OP8bits);
675  }
676  }
677  if( width&1 && BITS_LEFT(re, &s->gb)>0 ) {
678  unsigned int index;
679  int nb_bits, code, n;
680  UPDATE_CACHE(re, &s->gb);
681  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
682  VLC_INTERN(s->temp[0][width-1], s->vlc[plane].table,
683  &s->gb, re, VLC_BITS, 3);
684  }
685  CLOSE_READER(re, &s->gb);
686  } else if (s->bps <= 14) {
687  OPEN_READER(re, &s->gb);
688  if (count >= (get_bits_left(&s->gb)) / (32 * 2)) {
689  for (i = 0; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
690  READ_2PIX_PLANE(s->temp16[0][2 * i], s->temp16[0][2 * i + 1], plane, OP14bits);
691  }
692  } else {
693  for(i=0; i<count; i++){
694  READ_2PIX_PLANE(s->temp16[0][2 * i], s->temp16[0][2 * i + 1], plane, OP14bits);
695  }
696  }
697  if( width&1 && BITS_LEFT(re, &s->gb)>0 ) {
698  unsigned int index;
699  int nb_bits, code, n;
700  UPDATE_CACHE(re, &s->gb);
701  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
702  VLC_INTERN(s->temp16[0][width-1], s->vlc[plane].table,
703  &s->gb, re, VLC_BITS, 3);
704  }
705  CLOSE_READER(re, &s->gb);
706  } else {
707  if (count >= (get_bits_left(&s->gb)) / (32 * 2)) {
708  for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
709  READ_2PIX_PLANE16(s->temp16[0][2 * i], s->temp16[0][2 * i + 1], plane);
710  }
711  } else {
712  for(i=0; i<count; i++){
713  READ_2PIX_PLANE16(s->temp16[0][2 * i], s->temp16[0][2 * i + 1], plane);
714  }
715  }
716  if( width&1 && get_bits_left(&s->gb)>0 ) {
717  int dst = get_vlc2(&s->gb, s->vlc[plane].table, VLC_BITS, 3)<<2;
718  s->temp16[0][width-1] = dst + get_bits(&s->gb, 2);
719  }
720  }
721 }
722 
723 static void decode_gray_bitstream(HYuvContext *s, int count)
724 {
725  int i;
726  OPEN_READER(re, &s->gb);
727  count /= 2;
728 
729  if (count >= (get_bits_left(&s->gb)) / (32 * 2)) {
730  for (i = 0; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
731  READ_2PIX(s->temp[0][2 * i], s->temp[0][2 * i + 1], 0);
732  }
733  } else {
734  for (i = 0; i < count; i++) {
735  READ_2PIX(s->temp[0][2 * i], s->temp[0][2 * i + 1], 0);
736  }
737  }
738  CLOSE_READER(re, &s->gb);
739 }
740 
741 static av_always_inline void decode_bgr_1(HYuvContext *s, int count,
742  int decorrelate, int alpha)
743 {
744  int i;
745  OPEN_READER(re, &s->gb);
746 
747  for (i = 0; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
748  unsigned int index;
749  int code, n, nb_bits;
750 
751  UPDATE_CACHE(re, &s->gb);
752  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
753  n = s->vlc[4].table[index][1];
754 
755  if (n>0) {
756  code = s->vlc[4].table[index][0];
757  *(uint32_t *) &s->temp[0][4 * i] = s->pix_bgr_map[code];
758  LAST_SKIP_BITS(re, &s->gb, n);
759  } else {
760  if (decorrelate) {
761  VLC_INTERN(s->temp[0][4 * i + G], s->vlc[1].table,
762  &s->gb, re, VLC_BITS, 3);
763 
764  UPDATE_CACHE(re, &s->gb);
765  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
766  VLC_INTERN(code, s->vlc[0].table, &s->gb, re, VLC_BITS, 3);
767  s->temp[0][4 * i + B] = code + s->temp[0][4 * i + G];
768 
769  UPDATE_CACHE(re, &s->gb);
770  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
771  VLC_INTERN(code, s->vlc[2].table, &s->gb, re, VLC_BITS, 3);
772  s->temp[0][4 * i + R] = code + s->temp[0][4 * i + G];
773  } else {
774  VLC_INTERN(s->temp[0][4 * i + B], s->vlc[0].table,
775  &s->gb, re, VLC_BITS, 3);
776 
777  UPDATE_CACHE(re, &s->gb);
778  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
779  VLC_INTERN(s->temp[0][4 * i + G], s->vlc[1].table,
780  &s->gb, re, VLC_BITS, 3);
781 
782  UPDATE_CACHE(re, &s->gb);
783  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
784  VLC_INTERN(s->temp[0][4 * i + R], s->vlc[2].table,
785  &s->gb, re, VLC_BITS, 3);
786  }
787  }
788  if (alpha) {
789  UPDATE_CACHE(re, &s->gb);
790  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
791  VLC_INTERN(s->temp[0][4 * i + A], s->vlc[2].table,
792  &s->gb, re, VLC_BITS, 3);
793  } else
794  s->temp[0][4 * i + A] = 0;
795  }
796  CLOSE_READER(re, &s->gb);
797 }
798 
799 static void decode_bgr_bitstream(HYuvContext *s, int count)
800 {
801  if (s->decorrelate) {
802  if (s->bitstream_bpp == 24)
803  decode_bgr_1(s, count, 1, 0);
804  else
805  decode_bgr_1(s, count, 1, 1);
806  } else {
807  if (s->bitstream_bpp == 24)
808  decode_bgr_1(s, count, 0, 0);
809  else
810  decode_bgr_1(s, count, 0, 1);
811  }
812 }
813 
814 static void draw_slice(HYuvContext *s, AVFrame *frame, int y)
815 {
816  int h, cy, i;
818 
819  if (!s->avctx->draw_horiz_band)
820  return;
821 
822  h = y - s->last_slice_end;
823  y -= h;
824 
825  if (s->bitstream_bpp == 12)
826  cy = y >> 1;
827  else
828  cy = y;
829 
830  offset[0] = frame->linesize[0] * y;
831  offset[1] = frame->linesize[1] * cy;
832  offset[2] = frame->linesize[2] * cy;
833  for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
834  offset[i] = 0;
835  emms_c();
836 
837  s->avctx->draw_horiz_band(s->avctx, frame, offset, y, 3, h);
838 
839  s->last_slice_end = y + h;
840 }
841 
842 static int left_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int acc)
843 {
844  if (s->bps <= 8) {
845  return s->llviddsp.add_left_pred(dst, src, w, acc);
846  } else {
847  return s->llviddsp.add_left_pred_int16(( uint16_t *)dst, (const uint16_t *)src, s->n-1, w, acc);
848  }
849 }
850 
851 static void add_bytes(HYuvContext *s, uint8_t *dst, uint8_t *src, int w)
852 {
853  if (s->bps <= 8) {
854  s->llviddsp.add_bytes(dst, src, w);
855  } else {
856  s->hdsp.add_int16((uint16_t*)dst, (const uint16_t*)src, s->n - 1, w);
857  }
858 }
859 
860 static void add_median_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, const uint8_t *diff, int w, int *left, int *left_top)
861 {
862  if (s->bps <= 8) {
863  s->llviddsp.add_median_pred(dst, src, diff, w, left, left_top);
864  } else {
865  s->hdsp.add_hfyu_median_pred_int16((uint16_t *)dst, (const uint16_t *)src, (const uint16_t *)diff, s->n-1, w, left, left_top);
866  }
867 }
868 
869 static int decode_slice(AVCodecContext *avctx, AVFrame *p, int height,
870  int buf_size, int y_offset, int table_size)
871 {
872  HYuvContext *s = avctx->priv_data;
873  int fake_ystride, fake_ustride, fake_vstride;
874  const int width = s->width;
875  const int width2 = s->width >> 1;
876  int ret;
877 
878  if ((ret = init_get_bits8(&s->gb, s->bitstream_buffer + table_size, buf_size - table_size)) < 0)
879  return ret;
880 
881  fake_ystride = s->interlaced ? p->linesize[0] * 2 : p->linesize[0];
882  fake_ustride = s->interlaced ? p->linesize[1] * 2 : p->linesize[1];
883  fake_vstride = s->interlaced ? p->linesize[2] * 2 : p->linesize[2];
884 
885  if (s->version > 2) {
886  int plane;
887  for(plane = 0; plane < 1 + 2*s->chroma + s->alpha; plane++) {
888  int left, lefttop, y;
889  int w = width;
890  int h = height;
891  int fake_stride = fake_ystride;
892 
893  if (s->chroma && (plane == 1 || plane == 2)) {
894  w >>= s->chroma_h_shift;
895  h >>= s->chroma_v_shift;
896  fake_stride = plane == 1 ? fake_ustride : fake_vstride;
897  }
898 
899  switch (s->predictor) {
900  case LEFT:
901  case PLANE:
902  decode_plane_bitstream(s, w, plane);
903  left = left_prediction(s, p->data[plane], s->temp[0], w, 0);
904 
905  for (y = 1; y < h; y++) {
906  uint8_t *dst = p->data[plane] + p->linesize[plane]*y;
907 
908  decode_plane_bitstream(s, w, plane);
909  left = left_prediction(s, dst, s->temp[0], w, left);
910  if (s->predictor == PLANE) {
911  if (y > s->interlaced) {
912  add_bytes(s, dst, dst - fake_stride, w);
913  }
914  }
915  }
916 
917  break;
918  case MEDIAN:
919  decode_plane_bitstream(s, w, plane);
920  left= left_prediction(s, p->data[plane], s->temp[0], w, 0);
921 
922  y = 1;
923  if (y >= h)
924  break;
925 
926  /* second line is left predicted for interlaced case */
927  if (s->interlaced) {
928  decode_plane_bitstream(s, w, plane);
929  left = left_prediction(s, p->data[plane] + p->linesize[plane], s->temp[0], w, left);
930  y++;
931  if (y >= h)
932  break;
933  }
934 
935  lefttop = p->data[plane][0];
936  decode_plane_bitstream(s, w, plane);
937  add_median_prediction(s, p->data[plane] + fake_stride, p->data[plane], s->temp[0], w, &left, &lefttop);
938  y++;
939 
940  for (; y<h; y++) {
941  uint8_t *dst;
942 
943  decode_plane_bitstream(s, w, plane);
944 
945  dst = p->data[plane] + p->linesize[plane] * y;
946 
947  add_median_prediction(s, dst, dst - fake_stride, s->temp[0], w, &left, &lefttop);
948  }
949 
950  break;
951  }
952  }
953  draw_slice(s, p, height);
954  } else if (s->bitstream_bpp < 24) {
955  int y, cy;
956  int lefty, leftu, leftv;
957  int lefttopy, lefttopu, lefttopv;
958 
959  if (s->yuy2) {
960  p->data[0][3] = get_bits(&s->gb, 8);
961  p->data[0][2] = get_bits(&s->gb, 8);
962  p->data[0][1] = get_bits(&s->gb, 8);
963  p->data[0][0] = get_bits(&s->gb, 8);
964 
965  av_log(avctx, AV_LOG_ERROR,
966  "YUY2 output is not implemented yet\n");
967  return AVERROR_PATCHWELCOME;
968  } else {
969  leftv =
970  p->data[2][0 + y_offset * p->linesize[2]] = get_bits(&s->gb, 8);
971  lefty =
972  p->data[0][1 + y_offset * p->linesize[0]] = get_bits(&s->gb, 8);
973  leftu =
974  p->data[1][0 + y_offset * p->linesize[1]] = get_bits(&s->gb, 8);
975  p->data[0][0 + y_offset * p->linesize[0]] = get_bits(&s->gb, 8);
976 
977  switch (s->predictor) {
978  case LEFT:
979  case PLANE:
981  lefty = s->llviddsp.add_left_pred(p->data[0] + p->linesize[0] * y_offset + 2, s->temp[0],
982  width - 2, lefty);
983  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
984  leftu = s->llviddsp.add_left_pred(p->data[1] + p->linesize[1] * y_offset + 1, s->temp[1], width2 - 1, leftu);
985  leftv = s->llviddsp.add_left_pred(p->data[2] + p->linesize[2] * y_offset + 1, s->temp[2], width2 - 1, leftv);
986  }
987 
988  for (cy = y = 1; y < height; y++, cy++) {
989  uint8_t *ydst, *udst, *vdst;
990 
991  if (s->bitstream_bpp == 12) {
993 
994  ydst = p->data[0] + p->linesize[0] * (y + y_offset);
995 
996  lefty = s->llviddsp.add_left_pred(ydst, s->temp[0],
997  width, lefty);
998  if (s->predictor == PLANE) {
999  if (y > s->interlaced)
1000  s->llviddsp.add_bytes(ydst, ydst - fake_ystride, width);
1001  }
1002  y++;
1003  if (y >= height)
1004  break;
1005  }
1006 
1007  draw_slice(s, p, y);
1008 
1009  ydst = p->data[0] + p->linesize[0] * (y + y_offset);
1010  udst = p->data[1] + p->linesize[1] * (cy + y_offset);
1011  vdst = p->data[2] + p->linesize[2] * (cy + y_offset);
1012 
1014  lefty = s->llviddsp.add_left_pred(ydst, s->temp[0],
1015  width, lefty);
1016  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1017  leftu = s->llviddsp.add_left_pred(udst, s->temp[1], width2, leftu);
1018  leftv = s->llviddsp.add_left_pred(vdst, s->temp[2], width2, leftv);
1019  }
1020  if (s->predictor == PLANE) {
1021  if (cy > s->interlaced) {
1022  s->llviddsp.add_bytes(ydst, ydst - fake_ystride, width);
1023  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1024  s->llviddsp.add_bytes(udst, udst - fake_ustride, width2);
1025  s->llviddsp.add_bytes(vdst, vdst - fake_vstride, width2);
1026  }
1027  }
1028  }
1029  }
1030  draw_slice(s, p, height);
1031 
1032  break;
1033  case MEDIAN:
1034  /* first line except first 2 pixels is left predicted */
1036  lefty = s->llviddsp.add_left_pred(p->data[0] + 2, s->temp[0],
1037  width - 2, lefty);
1038  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1039  leftu = s->llviddsp.add_left_pred(p->data[1] + 1, s->temp[1], width2 - 1, leftu);
1040  leftv = s->llviddsp.add_left_pred(p->data[2] + 1, s->temp[2], width2 - 1, leftv);
1041  }
1042 
1043  cy = y = 1;
1044  if (y >= height)
1045  break;
1046 
1047  /* second line is left predicted for interlaced case */
1048  if (s->interlaced) {
1050  lefty = s->llviddsp.add_left_pred(p->data[0] + p->linesize[0],
1051  s->temp[0], width, lefty);
1052  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1053  leftu = s->llviddsp.add_left_pred(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1054  leftv = s->llviddsp.add_left_pred(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1055  }
1056  y++;
1057  cy++;
1058  if (y >= height)
1059  break;
1060  }
1061 
1062  /* next 4 pixels are left predicted too */
1063  decode_422_bitstream(s, 4);
1064  lefty = s->llviddsp.add_left_pred(p->data[0] + fake_ystride,
1065  s->temp[0], 4, lefty);
1066  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1067  leftu = s->llviddsp.add_left_pred(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1068  leftv = s->llviddsp.add_left_pred(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1069  }
1070 
1071  /* next line except the first 4 pixels is median predicted */
1072  lefttopy = p->data[0][3];
1074  s->llviddsp.add_median_pred(p->data[0] + fake_ystride + 4,
1075  p->data[0] + 4, s->temp[0],
1076  width - 4, &lefty, &lefttopy);
1077  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1078  lefttopu = p->data[1][1];
1079  lefttopv = p->data[2][1];
1080  s->llviddsp.add_median_pred(p->data[1] + fake_ustride + 2, p->data[1] + 2, s->temp[1], width2 - 2, &leftu, &lefttopu);
1081  s->llviddsp.add_median_pred(p->data[2] + fake_vstride + 2, p->data[2] + 2, s->temp[2], width2 - 2, &leftv, &lefttopv);
1082  }
1083  y++;
1084  cy++;
1085 
1086  for (; y < height; y++, cy++) {
1087  uint8_t *ydst, *udst, *vdst;
1088 
1089  if (s->bitstream_bpp == 12) {
1090  while (2 * cy > y) {
1092  ydst = p->data[0] + p->linesize[0] * y;
1093  s->llviddsp.add_median_pred(ydst, ydst - fake_ystride,
1094  s->temp[0], width,
1095  &lefty, &lefttopy);
1096  y++;
1097  }
1098  if (y >= height)
1099  break;
1100  }
1101  draw_slice(s, p, y);
1102 
1104 
1105  ydst = p->data[0] + p->linesize[0] * y;
1106  udst = p->data[1] + p->linesize[1] * cy;
1107  vdst = p->data[2] + p->linesize[2] * cy;
1108 
1109  s->llviddsp.add_median_pred(ydst, ydst - fake_ystride,
1110  s->temp[0], width,
1111  &lefty, &lefttopy);
1112  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1113  s->llviddsp.add_median_pred(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1114  s->llviddsp.add_median_pred(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1115  }
1116  }
1117 
1118  draw_slice(s, p, height);
1119  break;
1120  }
1121  }
1122  } else {
1123  int y;
1124  uint8_t left[4];
1125  const int last_line = (y_offset + height - 1) * p->linesize[0];
1126 
1127  if (s->bitstream_bpp == 32) {
1128  left[A] = p->data[0][last_line + A] = get_bits(&s->gb, 8);
1129  left[R] = p->data[0][last_line + R] = get_bits(&s->gb, 8);
1130  left[G] = p->data[0][last_line + G] = get_bits(&s->gb, 8);
1131  left[B] = p->data[0][last_line + B] = get_bits(&s->gb, 8);
1132  } else {
1133  left[R] = p->data[0][last_line + R] = get_bits(&s->gb, 8);
1134  left[G] = p->data[0][last_line + G] = get_bits(&s->gb, 8);
1135  left[B] = p->data[0][last_line + B] = get_bits(&s->gb, 8);
1136  left[A] = p->data[0][last_line + A] = 255;
1137  skip_bits(&s->gb, 8);
1138  }
1139 
1140  if (s->bgr32) {
1141  switch (s->predictor) {
1142  case LEFT:
1143  case PLANE:
1145  s->hdsp.add_hfyu_left_pred_bgr32(p->data[0] + last_line + 4,
1146  s->temp[0], width - 1, left);
1147 
1148  for (y = height - 2; y >= 0; y--) { // Yes it is stored upside down.
1150 
1151  s->hdsp.add_hfyu_left_pred_bgr32(p->data[0] + p->linesize[0] * (y + y_offset),
1152  s->temp[0], width, left);
1153  if (s->predictor == PLANE) {
1154  if (s->bitstream_bpp != 32)
1155  left[A] = 0;
1156  if (y < height - 1 - s->interlaced) {
1157  s->llviddsp.add_bytes(p->data[0] + p->linesize[0] * (y + y_offset),
1158  p->data[0] + p->linesize[0] * (y + y_offset) +
1159  fake_ystride, 4 * width);
1160  }
1161  }
1162  }
1163  // just 1 large slice as this is not possible in reverse order
1164  draw_slice(s, p, height);
1165  break;
1166  default:
1167  av_log(avctx, AV_LOG_ERROR,
1168  "prediction type not supported!\n");
1169  }
1170  } else {
1171  av_log(avctx, AV_LOG_ERROR,
1172  "BGR24 output is not implemented yet\n");
1173  return AVERROR_PATCHWELCOME;
1174  }
1175  }
1176 
1177  return 0;
1178 }
1179 
1180 static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
1181  AVPacket *avpkt)
1182 {
1183  const uint8_t *buf = avpkt->data;
1184  int buf_size = avpkt->size;
1185  HYuvContext *s = avctx->priv_data;
1186  const int width = s->width;
1187  const int height = s->height;
1188  ThreadFrame frame = { .f = data };
1189  AVFrame *const p = data;
1190  int slice, table_size = 0, ret, nb_slices;
1191  unsigned slices_info_offset;
1192  int slice_height;
1193 
1194  if (buf_size < (width * height + 7)/8)
1195  return AVERROR_INVALIDDATA;
1196 
1197  av_fast_padded_malloc(&s->bitstream_buffer,
1198  &s->bitstream_buffer_size,
1199  buf_size);
1200  if (!s->bitstream_buffer)
1201  return AVERROR(ENOMEM);
1202 
1203  s->bdsp.bswap_buf((uint32_t *) s->bitstream_buffer,
1204  (const uint32_t *) buf, buf_size / 4);
1205 
1206  if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
1207  return ret;
1208 
1209  if (s->context) {
1210  table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
1211  if (table_size < 0)
1212  return table_size;
1213  }
1214 
1215  if ((unsigned) (buf_size - table_size) >= INT_MAX / 8)
1216  return AVERROR_INVALIDDATA;
1217 
1218  s->last_slice_end = 0;
1219 
1220  if (avctx->codec_id == AV_CODEC_ID_HYMT &&
1221  (buf_size > 32 && AV_RL32(avpkt->data + buf_size - 16) == 0)) {
1222  slices_info_offset = AV_RL32(avpkt->data + buf_size - 4);
1223  slice_height = AV_RL32(avpkt->data + buf_size - 8);
1224  nb_slices = AV_RL32(avpkt->data + buf_size - 12);
1225  if (nb_slices * 8LL + slices_info_offset > buf_size - 16 ||
1226  s->chroma_v_shift ||
1227  slice_height <= 0 || nb_slices * (uint64_t)slice_height > height)
1228  return AVERROR_INVALIDDATA;
1229  } else {
1230  slice_height = height;
1231  nb_slices = 1;
1232  }
1233 
1234  for (slice = 0; slice < nb_slices; slice++) {
1235  int y_offset, slice_offset, slice_size;
1236 
1237  if (nb_slices > 1) {
1238  slice_offset = AV_RL32(avpkt->data + slices_info_offset + slice * 8);
1239  slice_size = AV_RL32(avpkt->data + slices_info_offset + slice * 8 + 4);
1240 
1241  if (slice_offset < 0 || slice_size <= 0 || (slice_offset&3) ||
1242  slice_offset + (int64_t)slice_size > buf_size)
1243  return AVERROR_INVALIDDATA;
1244 
1245  y_offset = height - (slice + 1) * slice_height;
1246  s->bdsp.bswap_buf((uint32_t *)s->bitstream_buffer,
1247  (const uint32_t *)(buf + slice_offset), slice_size / 4);
1248  } else {
1249  y_offset = 0;
1250  slice_offset = 0;
1251  slice_size = buf_size;
1252  }
1253 
1254  ret = decode_slice(avctx, p, slice_height, slice_size, y_offset, table_size);
1255  emms_c();
1256  if (ret < 0)
1257  return ret;
1258  }
1259 
1260  *got_frame = 1;
1261 
1262  return (get_bits_count(&s->gb) + 31) / 32 * 4 + table_size;
1263 }
1264 
1266  .name = "huffyuv",
1267  .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1268  .type = AVMEDIA_TYPE_VIDEO,
1269  .id = AV_CODEC_ID_HUFFYUV,
1270  .priv_data_size = sizeof(HYuvContext),
1271  .init = decode_init,
1272  .close = decode_end,
1273  .decode = decode_frame,
1277 };
1278 
1279 #if CONFIG_FFVHUFF_DECODER
1280 const AVCodec ff_ffvhuff_decoder = {
1281  .name = "ffvhuff",
1282  .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1283  .type = AVMEDIA_TYPE_VIDEO,
1284  .id = AV_CODEC_ID_FFVHUFF,
1285  .priv_data_size = sizeof(HYuvContext),
1286  .init = decode_init,
1287  .close = decode_end,
1288  .decode = decode_frame,
1292 };
1293 #endif /* CONFIG_FFVHUFF_DECODER */
1294 
1295 #if CONFIG_HYMT_DECODER
1296 const AVCodec ff_hymt_decoder = {
1297  .name = "hymt",
1298  .long_name = NULL_IF_CONFIG_SMALL("HuffYUV MT"),
1299  .type = AVMEDIA_TYPE_VIDEO,
1300  .id = AV_CODEC_ID_HYMT,
1301  .priv_data_size = sizeof(HYuvContext),
1302  .init = decode_init,
1303  .close = decode_end,
1304  .decode = decode_frame,
1308 };
1309 #endif /* CONFIG_HYMT_DECODER */
VLC_INTERN
#define VLC_INTERN(dst, table, gb, name, bits, max_depth)
Subset of GET_VLC for use in hand-roller VLC code.
Definition: huffyuvdec.c:566
add_bytes
static void add_bytes(HYuvContext *s, uint8_t *dst, uint8_t *src, int w)
Definition: huffyuvdec.c:851
AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:434
HYuvContext
Definition: huffyuv.h:55
AVCodec
AVCodec.
Definition: codec.h:202
generate_joint_tables
static int generate_joint_tables(HYuvContext *s)
Definition: huffyuvdec.c:119
decorrelate
static void decorrelate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median)
Definition: snowenc.c:1319
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:42
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:850
r
const char * r
Definition: vf_curves.c:116
acc
int acc
Definition: yuv2rgb.c:554
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AV_CODEC_ID_HUFFYUV
@ AV_CODEC_ID_HUFFYUV
Definition: codec_id.h:75
out
FILE * out
Definition: movenc.c:54
AV_CODEC_ID_HYMT
@ AV_CODEC_ID_HYMT
Definition: codec_id.h:291
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:264
MAX_VLC_N
#define MAX_VLC_N
Definition: huffyuv.h:47
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:220
AV_PIX_FMT_YUVA422P9
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:426
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
pixdesc.h
index
fg index
Definition: ffmpeg_filter.c:168
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:433
w
uint8_t w
Definition: llviddspenc.c:38
decode_gray_bitstream
static void decode_gray_bitstream(HYuvContext *s, int count)
Definition: huffyuvdec.c:723
R
#define R
Definition: huffyuvdsp.h:34
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:373
huffyuvdsp.h
AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:428
init_vlc
#define init_vlc(vlc, nb_bits, nb_codes, bits, bits_wrap, bits_size, codes, codes_wrap, codes_size, flags)
Definition: vlc.h:38
b
#define b
Definition: input.c:40
READ_2PIX
#define READ_2PIX(dst0, dst1, plane1)
Definition: huffyuvdec.c:613
ff_ffvhuff_decoder
const AVCodec ff_ffvhuff_decoder
classic_add_luma
static const unsigned char classic_add_luma[256]
Definition: huffyuvdec.c:62
data
const char data[16]
Definition: mxf.c:143
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:391
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:798
MEDIAN
@ MEDIAN
Definition: huffyuv.h:52
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
decode_init
static av_cold int decode_init(AVCodecContext *avctx)
Definition: huffyuvdec.c:292
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:179
OP8bits
#define OP8bits(dst0, dst1, code)
Definition: huffyuvdec.c:611
OP14bits
#define OP14bits(dst0, dst1, code)
Definition: huffyuvdec.c:652
AV_PIX_FMT_YUVA422P10
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:429
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:660
thread.h
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:317
add_median_prediction
static void add_median_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, const uint8_t *diff, int w, int *left, int *left_top)
Definition: huffyuvdec.c:860
ff_huffyuv_alloc_temp
av_cold int ff_huffyuv_alloc_temp(HYuvContext *s)
Definition: huffyuv.c:58
A
#define A(x)
Definition: vp56_arith.h:28
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:468
AV_PIX_FMT_YUVA420P9
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:425
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:380
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:409
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:407
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:435
GetBitContext
Definition: get_bits.h:62
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:389
classic_shift_chroma_table_size
#define classic_shift_chroma_table_size
Definition: huffyuvdec.c:53
val
static double val(void *priv, double ch)
Definition: aeval.c:76
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2592
classic_shift_luma
static const unsigned char classic_shift_luma[classic_shift_luma_table_size+AV_INPUT_BUFFER_PADDING_SIZE]
Definition: huffyuvdec.c:46
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:375
decode_bgr_1
static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha)
Definition: huffyuvdec.c:741
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:394
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:403
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:678
read_huffman_tables
static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length)
Definition: huffyuvdec.c:209
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:150
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:485
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:404
draw_slice
static void draw_slice(HYuvContext *s, AVFrame *frame, int y)
Definition: huffyuvdec.c:814
g
const char * g
Definition: vf_curves.c:117
bits
uint8_t bits
Definition: vp3data.h:141
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:388
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:402
get_bits.h
ff_free_vlc
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:431
AV_CODEC_ID_FFVHUFF
@ AV_CODEC_ID_FFVHUFF
Definition: codec_id.h:117
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:393
if
if(ret)
Definition: filter_design.txt:179
ff_huffyuv_common_end
av_cold void ff_huffyuv_common_end(HYuvContext *s)
Definition: huffyuv.c:86
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:113
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:410
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
ff_hymt_decoder
const AVCodec ff_hymt_decoder
AV_PIX_FMT_YUYV422
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:67
src
#define src
Definition: vp8dsp.c:255
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:200
ff_huffyuv_generate_bits_table
int ff_huffyuv_generate_bits_table(uint32_t *dst, const uint8_t *len_table, int n)
Definition: huffyuv.c:39
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:392
ff_init_vlc_sparse
int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
Definition: bitstream.c:323
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
VLC_BITS
#define VLC_BITS
Definition: cfhd.h:96
READ_2PIX_PLANE16
#define READ_2PIX_PLANE16(dst0, dst1, plane)
Definition: huffyuvdec.c:656
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:406
READ_2PIX_PLANE
#define READ_2PIX_PLANE(dst0, dst1, plane, OP)
Definition: huffyuvdec.c:647
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
left_prediction
static int left_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int acc)
Definition: huffyuvdec.c:842
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:243
AVPacket::size
int size
Definition: packet.h:374
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:396
AV_NUM_DATA_POINTERS
#define AV_NUM_DATA_POINTERS
Definition: frame.h:304
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:398
decode_end
static av_cold int decode_end(AVCodecContext *avctx)
Definition: huffyuvdec.c:278
LEFT
#define LEFT
Definition: cdgraphics.c:166
ff_huffyuvdsp_init
av_cold void ff_huffyuvdsp_init(HuffYUVDSPContext *c, enum AVPixelFormat pix_fmt)
Definition: huffyuvdsp.c:83
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:139
height
#define height
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:364
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:167
classic_add_chroma
static const unsigned char classic_add_chroma[256]
Definition: huffyuvdec.c:81
AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:430
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
decode_slice
static int decode_slice(AVCodecContext *avctx, AVFrame *p, int height, int buf_size, int y_offset, int table_size)
Definition: huffyuvdec.c:869
decode_plane_bitstream
static void decode_plane_bitstream(HYuvContext *s, int width, int plane)
Definition: huffyuvdec.c:662
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1418
interlaced
uint8_t interlaced
Definition: mxfenc.c:2207
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:484
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:50
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:408
ff_huffyuv_decoder
const AVCodec ff_huffyuv_decoder
Definition: huffyuvdec.c:1265
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:50
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:263
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:209
len
int len
Definition: vorbis_enc_data.h:426
PLANE
@ PLANE
Definition: huffyuv.h:51
AVCodecContext::height
int height
Definition: avcodec.h:556
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:593
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:390
avcodec.h
limit
static double limit(double x)
Definition: vf_pseudocolor.c:128
G
#define G
Definition: huffyuvdsp.h:33
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AV_PIX_FMT_0RGB32
#define AV_PIX_FMT_0RGB32
Definition: pixfmt.h:368
decode_bgr_bitstream
static void decode_bgr_bitstream(HYuvContext *s, int count)
Definition: huffyuvdec.c:799
classic_shift_chroma
static const unsigned char classic_shift_chroma[classic_shift_chroma_table_size+AV_INPUT_BUFFER_PADDING_SIZE]
Definition: huffyuvdec.c:54
AV_PIX_FMT_YUVA444P9
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:427
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:395
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
classic_shift_luma_table_size
#define classic_shift_luma_table_size
Definition: huffyuvdec.c:45
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
BITS_LEFT
#define BITS_LEFT(name, gb)
Definition: get_bits.h:192
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:400
B
#define B
Definition: huffyuvdsp.h:32
ff_llviddsp_init
void ff_llviddsp_init(LLVidDSPContext *c)
Definition: lossless_videodsp.c:113
decode_422_bitstream
static void decode_422_bitstream(HYuvContext *s, int count)
Definition: huffyuvdec.c:618
AVCodecContext
main external API structure.
Definition: avcodec.h:383
ThreadFrame
Definition: thread.h:34
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:212
VLC
Definition: vlc.h:26
sign_extend
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:130
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
lossless_videodsp.h
map
const VDPAUPixFmtMap * map
Definition: hwcontext_vdpau.c:71
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:139
alpha
static const int16_t alpha[]
Definition: ilbcdata.h:55
ff_huffyuv_common_init
av_cold void ff_huffyuv_common_init(AVCodecContext *avctx)
Definition: huffyuv.c:71
decode_frame
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: huffyuvdec.c:1180
AVPacket
This structure stores compressed data.
Definition: packet.h:350
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:410
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
read_len_table
static int read_len_table(uint8_t *dst, GetBitContext *gb, int n)
Definition: huffyuvdec.c:100
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:556
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:334
AV_CODEC_CAP_DRAW_HORIZ_BAND
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: codec.h:44
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
h
h
Definition: vp9dsp_template.c:2038
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:401
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
huffyuv.h
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:166
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:399
re
float re
Definition: fft.c:78
read_old_huffman_tables
static int read_old_huffman_tables(HYuvContext *s)
Definition: huffyuvdec.c:238