FFmpeg
huffyuvdec.c
Go to the documentation of this file.
1 /*
2  * huffyuv decoder
3  *
4  * Copyright (c) 2002-2014 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
7  * the algorithm used
8  *
9  * This file is part of FFmpeg.
10  *
11  * FFmpeg is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public
13  * License as published by the Free Software Foundation; either
14  * version 2.1 of the License, or (at your option) any later version.
15  *
16  * FFmpeg is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with FFmpeg; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24  *
25  * yuva, gray, 4:4:4, 4:1:1, 4:1:0 and >8 bit per sample support sponsored by NOA
26  */
27 
28 /**
29  * @file
30  * huffyuv decoder
31  */
32 
33 #define UNCHECKED_BITSTREAM_READER 1
34 
35 #include "avcodec.h"
36 #include "get_bits.h"
37 #include "huffyuv.h"
38 #include "huffyuvdsp.h"
39 #include "lossless_videodsp.h"
40 #include "thread.h"
41 #include "libavutil/imgutils.h"
42 #include "libavutil/pixdesc.h"
43 
44 #define classic_shift_luma_table_size 42
46  34, 36, 35, 69, 135, 232, 9, 16, 10, 24, 11, 23, 12, 16, 13, 10,
47  14, 8, 15, 8, 16, 8, 17, 20, 16, 10, 207, 206, 205, 236, 11, 8,
48  10, 21, 9, 23, 8, 8, 199, 70, 69, 68, 0,
49  0,0,0,0,0,0,0,0,
50 };
51 
52 #define classic_shift_chroma_table_size 59
54  66, 36, 37, 38, 39, 40, 41, 75, 76, 77, 110, 239, 144, 81, 82, 83,
55  84, 85, 118, 183, 56, 57, 88, 89, 56, 89, 154, 57, 58, 57, 26, 141,
56  57, 56, 58, 57, 58, 57, 184, 119, 214, 245, 116, 83, 82, 49, 80, 79,
57  78, 77, 44, 75, 41, 40, 39, 38, 37, 36, 34, 0,
58  0,0,0,0,0,0,0,0,
59 };
60 
61 static const unsigned char classic_add_luma[256] = {
62  3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
63  73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
64  68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
65  35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
66  37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
67  35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
68  27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
69  15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
70  12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
71  12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
72  18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
73  28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
74  28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
75  62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
76  54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
77  46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
78 };
79 
80 static const unsigned char classic_add_chroma[256] = {
81  3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
82  7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
83  11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
84  43, 45, 76, 81, 46, 82, 75, 55, 56, 144, 58, 80, 60, 74, 147, 63,
85  143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
86  80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
87  17, 14, 5, 6, 100, 54, 47, 50, 51, 53, 106, 107, 108, 109, 110, 111,
88  112, 113, 114, 115, 4, 117, 118, 92, 94, 121, 122, 3, 124, 103, 2, 1,
89  0, 129, 130, 131, 120, 119, 126, 125, 136, 137, 138, 139, 140, 141, 142, 134,
90  135, 132, 133, 104, 64, 101, 62, 57, 102, 95, 93, 59, 61, 28, 97, 96,
91  52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
92  19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
93  7, 128, 127, 105, 123, 116, 35, 34, 33, 145, 31, 79, 42, 146, 78, 26,
94  83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
95  14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
96  6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
97 };
98 
99 static int read_len_table(uint8_t *dst, GetBitContext *gb, int n)
100 {
101  int i, val, repeat;
102 
103  for (i = 0; i < n;) {
104  repeat = get_bits(gb, 3);
105  val = get_bits(gb, 5);
106  if (repeat == 0)
107  repeat = get_bits(gb, 8);
108  if (i + repeat > n || get_bits_left(gb) < 0) {
109  av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
110  return AVERROR_INVALIDDATA;
111  }
112  while (repeat--)
113  dst[i++] = val;
114  }
115  return 0;
116 }
117 
119 {
120  int ret;
121  uint16_t *symbols = av_mallocz(5 << VLC_BITS);
122  uint16_t *bits;
123  uint8_t *len;
124  if (!symbols)
125  return AVERROR(ENOMEM);
126  bits = symbols + (1 << VLC_BITS);
127  len = (uint8_t *)(bits + (1 << VLC_BITS));
128 
129  if (s->bitstream_bpp < 24 || s->version > 2) {
130  int p, i, y, u;
131  for (p = 0; p < 4; p++) {
132  int p0 = s->version > 2 ? p : 0;
133  for (i = y = 0; y < s->vlc_n; y++) {
134  int len0 = s->len[p0][y];
135  int limit = VLC_BITS - len0;
136  if (limit <= 0 || !len0)
137  continue;
138  if ((sign_extend(y, 8) & (s->vlc_n-1)) != y)
139  continue;
140  for (u = 0; u < s->vlc_n; u++) {
141  int len1 = s->len[p][u];
142  if (len1 > limit || !len1)
143  continue;
144  if ((sign_extend(u, 8) & (s->vlc_n-1)) != u)
145  continue;
146  av_assert0(i < (1 << VLC_BITS));
147  len[i] = len0 + len1;
148  bits[i] = (s->bits[p0][y] << len1) + s->bits[p][u];
149  symbols[i] = (y << 8) + (u & 0xFF);
150  i++;
151  }
152  }
153  ff_free_vlc(&s->vlc[4 + p]);
154  if ((ret = ff_init_vlc_sparse(&s->vlc[4 + p], VLC_BITS, i, len, 1, 1,
155  bits, 2, 2, symbols, 2, 2, 0)) < 0)
156  goto out;
157  }
158  } else {
159  uint8_t (*map)[4] = (uint8_t(*)[4]) s->pix_bgr_map;
160  int i, b, g, r, code;
161  int p0 = s->decorrelate;
162  int p1 = !s->decorrelate;
163  /* Restrict the range to +/-16 because that's pretty much guaranteed
164  * to cover all the combinations that fit in 11 bits total, and it
165  * does not matter if we miss a few rare codes. */
166  for (i = 0, g = -16; g < 16; g++) {
167  int len0 = s->len[p0][g & 255];
168  int limit0 = VLC_BITS - len0;
169  if (limit0 < 2 || !len0)
170  continue;
171  for (b = -16; b < 16; b++) {
172  int len1 = s->len[p1][b & 255];
173  int limit1 = limit0 - len1;
174  if (limit1 < 1 || !len1)
175  continue;
176  code = (s->bits[p0][g & 255] << len1) + s->bits[p1][b & 255];
177  for (r = -16; r < 16; r++) {
178  int len2 = s->len[2][r & 255];
179  if (len2 > limit1 || !len2)
180  continue;
181  av_assert0(i < (1 << VLC_BITS));
182  len[i] = len0 + len1 + len2;
183  bits[i] = (code << len2) + s->bits[2][r & 255];
184  if (s->decorrelate) {
185  map[i][G] = g;
186  map[i][B] = g + b;
187  map[i][R] = g + r;
188  } else {
189  map[i][B] = g;
190  map[i][G] = b;
191  map[i][R] = r;
192  }
193  i++;
194  }
195  }
196  }
197  ff_free_vlc(&s->vlc[4]);
198  if ((ret = init_vlc(&s->vlc[4], VLC_BITS, i, len, 1, 1,
199  bits, 2, 2, 0)) < 0)
200  goto out;
201  }
202  ret = 0;
203 out:
204  av_freep(&symbols);
205  return ret;
206 }
207 
209 {
210  GetBitContext gb;
211  int i, ret;
212  int count = 3;
213 
214  if ((ret = init_get_bits(&gb, src, length * 8)) < 0)
215  return ret;
216 
217  if (s->version > 2)
218  count = 1 + s->alpha + 2*s->chroma;
219 
220  for (i = 0; i < count; i++) {
221  if ((ret = read_len_table(s->len[i], &gb, s->vlc_n)) < 0)
222  return ret;
223  if ((ret = ff_huffyuv_generate_bits_table(s->bits[i], s->len[i], s->vlc_n)) < 0)
224  return ret;
225  ff_free_vlc(&s->vlc[i]);
226  if ((ret = init_vlc(&s->vlc[i], VLC_BITS, s->vlc_n, s->len[i], 1, 1,
227  s->bits[i], 4, 4, 0)) < 0)
228  return ret;
229  }
230 
231  if ((ret = generate_joint_tables(s)) < 0)
232  return ret;
233 
234  return (get_bits_count(&gb) + 7) / 8;
235 }
236 
238 {
239  GetBitContext gb;
240  int i, ret;
241 
244  if ((ret = read_len_table(s->len[0], &gb, 256)) < 0)
245  return ret;
246 
249  if ((ret = read_len_table(s->len[1], &gb, 256)) < 0)
250  return ret;
251 
252  for (i = 0; i < 256; i++)
253  s->bits[0][i] = classic_add_luma[i];
254  for (i = 0; i < 256; i++)
255  s->bits[1][i] = classic_add_chroma[i];
256 
257  if (s->bitstream_bpp >= 24) {
258  memcpy(s->bits[1], s->bits[0], 256 * sizeof(uint32_t));
259  memcpy(s->len[1], s->len[0], 256 * sizeof(uint8_t));
260  }
261  memcpy(s->bits[2], s->bits[1], 256 * sizeof(uint32_t));
262  memcpy(s->len[2], s->len[1], 256 * sizeof(uint8_t));
263 
264  for (i = 0; i < 4; i++) {
265  ff_free_vlc(&s->vlc[i]);
266  if ((ret = init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1,
267  s->bits[i], 4, 4, 0)) < 0)
268  return ret;
269  }
270 
271  if ((ret = generate_joint_tables(s)) < 0)
272  return ret;
273 
274  return 0;
275 }
276 
278 {
279  HYuvContext *s = avctx->priv_data;
280  int i;
281 
283  av_freep(&s->bitstream_buffer);
284 
285  for (i = 0; i < 8; i++)
286  ff_free_vlc(&s->vlc[i]);
287 
288  return 0;
289 }
290 
292 {
293  HYuvContext *s = avctx->priv_data;
294  int ret;
295 
296  ret = av_image_check_size(avctx->width, avctx->height, 0, avctx);
297  if (ret < 0)
298  return ret;
299 
300  ff_huffyuvdsp_init(&s->hdsp, avctx->pix_fmt);
301  ff_llviddsp_init(&s->llviddsp);
302  memset(s->vlc, 0, 4 * sizeof(VLC));
303 
304  s->interlaced = avctx->height > 288;
305  s->bgr32 = 1;
306 
307  if (avctx->extradata_size) {
308  if ((avctx->bits_per_coded_sample & 7) &&
309  avctx->bits_per_coded_sample != 12)
310  s->version = 1; // do such files exist at all?
311  else if (avctx->extradata_size > 3 && avctx->extradata[3] == 0)
312  s->version = 2;
313  else
314  s->version = 3;
315  } else
316  s->version = 0;
317 
318  s->bps = 8;
319  s->n = 1<<s->bps;
320  s->vlc_n = FFMIN(s->n, MAX_VLC_N);
321  s->chroma = 1;
322  if (s->version >= 2) {
323  int method, interlace;
324 
325  if (avctx->extradata_size < 4)
326  return AVERROR_INVALIDDATA;
327 
328  method = avctx->extradata[0];
329  s->decorrelate = method & 64 ? 1 : 0;
330  s->predictor = method & 63;
331  if (s->version == 2) {
332  s->bitstream_bpp = avctx->extradata[1];
333  if (s->bitstream_bpp == 0)
334  s->bitstream_bpp = avctx->bits_per_coded_sample & ~7;
335  } else {
336  s->bps = (avctx->extradata[1] >> 4) + 1;
337  s->n = 1<<s->bps;
338  s->vlc_n = FFMIN(s->n, MAX_VLC_N);
339  s->chroma_h_shift = avctx->extradata[1] & 3;
340  s->chroma_v_shift = (avctx->extradata[1] >> 2) & 3;
341  s->yuv = !!(avctx->extradata[2] & 1);
342  s->chroma= !!(avctx->extradata[2] & 3);
343  s->alpha = !!(avctx->extradata[2] & 4);
344  }
345  interlace = (avctx->extradata[2] & 0x30) >> 4;
346  s->interlaced = (interlace == 1) ? 1 : (interlace == 2) ? 0 : s->interlaced;
347  s->context = avctx->extradata[2] & 0x40 ? 1 : 0;
348 
349  if ((ret = read_huffman_tables(s, avctx->extradata + 4,
350  avctx->extradata_size - 4)) < 0)
351  goto error;
352  } else {
353  switch (avctx->bits_per_coded_sample & 7) {
354  case 1:
355  s->predictor = LEFT;
356  s->decorrelate = 0;
357  break;
358  case 2:
359  s->predictor = LEFT;
360  s->decorrelate = 1;
361  break;
362  case 3:
363  s->predictor = PLANE;
364  s->decorrelate = avctx->bits_per_coded_sample >= 24;
365  break;
366  case 4:
367  s->predictor = MEDIAN;
368  s->decorrelate = 0;
369  break;
370  default:
371  s->predictor = LEFT; // OLD
372  s->decorrelate = 0;
373  break;
374  }
375  s->bitstream_bpp = avctx->bits_per_coded_sample & ~7;
376  s->context = 0;
377 
378  if ((ret = read_old_huffman_tables(s)) < 0)
379  goto error;
380  }
381 
382  if (s->version <= 2) {
383  switch (s->bitstream_bpp) {
384  case 12:
385  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
386  s->yuv = 1;
387  break;
388  case 16:
389  if (s->yuy2)
390  avctx->pix_fmt = AV_PIX_FMT_YUYV422;
391  else
392  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
393  s->yuv = 1;
394  break;
395  case 24:
396  if (s->bgr32)
397  avctx->pix_fmt = AV_PIX_FMT_0RGB32;
398  else
399  avctx->pix_fmt = AV_PIX_FMT_BGR24;
400  break;
401  case 32:
402  av_assert0(s->bgr32);
403  avctx->pix_fmt = AV_PIX_FMT_RGB32;
404  s->alpha = 1;
405  break;
406  default:
408  goto error;
409  }
411  &s->chroma_h_shift,
412  &s->chroma_v_shift);
413  } else {
414  switch ( (s->chroma<<10) | (s->yuv<<9) | (s->alpha<<8) | ((s->bps-1)<<4) | s->chroma_h_shift | (s->chroma_v_shift<<2)) {
415  case 0x070:
416  avctx->pix_fmt = AV_PIX_FMT_GRAY8;
417  break;
418  case 0x0F0:
419  avctx->pix_fmt = AV_PIX_FMT_GRAY16;
420  break;
421  case 0x470:
422  avctx->pix_fmt = AV_PIX_FMT_GBRP;
423  break;
424  case 0x480:
425  avctx->pix_fmt = AV_PIX_FMT_GBRP9;
426  break;
427  case 0x490:
428  avctx->pix_fmt = AV_PIX_FMT_GBRP10;
429  break;
430  case 0x4B0:
431  avctx->pix_fmt = AV_PIX_FMT_GBRP12;
432  break;
433  case 0x4D0:
434  avctx->pix_fmt = AV_PIX_FMT_GBRP14;
435  break;
436  case 0x4F0:
437  avctx->pix_fmt = AV_PIX_FMT_GBRP16;
438  break;
439  case 0x570:
440  avctx->pix_fmt = AV_PIX_FMT_GBRAP;
441  break;
442  case 0x670:
443  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
444  break;
445  case 0x680:
446  avctx->pix_fmt = AV_PIX_FMT_YUV444P9;
447  break;
448  case 0x690:
449  avctx->pix_fmt = AV_PIX_FMT_YUV444P10;
450  break;
451  case 0x6B0:
452  avctx->pix_fmt = AV_PIX_FMT_YUV444P12;
453  break;
454  case 0x6D0:
455  avctx->pix_fmt = AV_PIX_FMT_YUV444P14;
456  break;
457  case 0x6F0:
458  avctx->pix_fmt = AV_PIX_FMT_YUV444P16;
459  break;
460  case 0x671:
461  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
462  break;
463  case 0x681:
464  avctx->pix_fmt = AV_PIX_FMT_YUV422P9;
465  break;
466  case 0x691:
467  avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
468  break;
469  case 0x6B1:
470  avctx->pix_fmt = AV_PIX_FMT_YUV422P12;
471  break;
472  case 0x6D1:
473  avctx->pix_fmt = AV_PIX_FMT_YUV422P14;
474  break;
475  case 0x6F1:
476  avctx->pix_fmt = AV_PIX_FMT_YUV422P16;
477  break;
478  case 0x672:
479  avctx->pix_fmt = AV_PIX_FMT_YUV411P;
480  break;
481  case 0x674:
482  avctx->pix_fmt = AV_PIX_FMT_YUV440P;
483  break;
484  case 0x675:
485  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
486  break;
487  case 0x685:
488  avctx->pix_fmt = AV_PIX_FMT_YUV420P9;
489  break;
490  case 0x695:
491  avctx->pix_fmt = AV_PIX_FMT_YUV420P10;
492  break;
493  case 0x6B5:
494  avctx->pix_fmt = AV_PIX_FMT_YUV420P12;
495  break;
496  case 0x6D5:
497  avctx->pix_fmt = AV_PIX_FMT_YUV420P14;
498  break;
499  case 0x6F5:
500  avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
501  break;
502  case 0x67A:
503  avctx->pix_fmt = AV_PIX_FMT_YUV410P;
504  break;
505  case 0x770:
506  avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
507  break;
508  case 0x780:
509  avctx->pix_fmt = AV_PIX_FMT_YUVA444P9;
510  break;
511  case 0x790:
513  break;
514  case 0x7F0:
516  break;
517  case 0x771:
518  avctx->pix_fmt = AV_PIX_FMT_YUVA422P;
519  break;
520  case 0x781:
521  avctx->pix_fmt = AV_PIX_FMT_YUVA422P9;
522  break;
523  case 0x791:
525  break;
526  case 0x7F1:
528  break;
529  case 0x775:
530  avctx->pix_fmt = AV_PIX_FMT_YUVA420P;
531  break;
532  case 0x785:
533  avctx->pix_fmt = AV_PIX_FMT_YUVA420P9;
534  break;
535  case 0x795:
537  break;
538  case 0x7F5:
540  break;
541  default:
543  goto error;
544  }
545  }
546 
547  ff_huffyuv_common_init(avctx);
548 
549  if ((avctx->pix_fmt == AV_PIX_FMT_YUV422P || avctx->pix_fmt == AV_PIX_FMT_YUV420P) && avctx->width & 1) {
550  av_log(avctx, AV_LOG_ERROR, "width must be even for this colorspace\n");
552  goto error;
553  }
554  if (s->predictor == MEDIAN && avctx->pix_fmt == AV_PIX_FMT_YUV422P &&
555  avctx->width % 4) {
556  av_log(avctx, AV_LOG_ERROR, "width must be a multiple of 4 "
557  "for this combination of colorspace and predictor type.\n");
559  goto error;
560  }
561 
562  if ((ret = ff_huffyuv_alloc_temp(s)) < 0) {
564  goto error;
565  }
566 
567  return 0;
568  error:
569  decode_end(avctx);
570  return ret;
571 }
572 
573 #if HAVE_THREADS
574 static av_cold int decode_init_thread_copy(AVCodecContext *avctx)
575 {
576  HYuvContext *s = avctx->priv_data;
577  int i, ret;
578 
579  s->avctx = avctx;
580 
581  if ((ret = ff_huffyuv_alloc_temp(s)) < 0) {
583  return ret;
584  }
585 
586  for (i = 0; i < 8; i++)
587  s->vlc[i].table = NULL;
588 
589  if (s->version >= 2) {
590  if ((ret = read_huffman_tables(s, avctx->extradata + 4,
591  avctx->extradata_size)) < 0)
592  return ret;
593  } else {
594  if ((ret = read_old_huffman_tables(s)) < 0)
595  return ret;
596  }
597 
598  return 0;
599 }
600 #endif
601 
602 /** Subset of GET_VLC for use in hand-roller VLC code */
603 #define VLC_INTERN(dst, table, gb, name, bits, max_depth) \
604  code = table[index][0]; \
605  n = table[index][1]; \
606  if (max_depth > 1 && n < 0) { \
607  LAST_SKIP_BITS(name, gb, bits); \
608  UPDATE_CACHE(name, gb); \
609  \
610  nb_bits = -n; \
611  index = SHOW_UBITS(name, gb, nb_bits) + code; \
612  code = table[index][0]; \
613  n = table[index][1]; \
614  if (max_depth > 2 && n < 0) { \
615  LAST_SKIP_BITS(name, gb, nb_bits); \
616  UPDATE_CACHE(name, gb); \
617  \
618  nb_bits = -n; \
619  index = SHOW_UBITS(name, gb, nb_bits) + code; \
620  code = table[index][0]; \
621  n = table[index][1]; \
622  } \
623  } \
624  dst = code; \
625  LAST_SKIP_BITS(name, gb, n)
626 
627 
628 #define GET_VLC_DUAL(dst0, dst1, name, gb, dtable, table1, table2, \
629  bits, max_depth, OP) \
630  do { \
631  unsigned int index = SHOW_UBITS(name, gb, bits); \
632  int code, n = dtable[index][1]; \
633  \
634  if (n<=0) { \
635  int nb_bits; \
636  VLC_INTERN(dst0, table1, gb, name, bits, max_depth); \
637  \
638  UPDATE_CACHE(re, gb); \
639  index = SHOW_UBITS(name, gb, bits); \
640  VLC_INTERN(dst1, table2, gb, name, bits, max_depth); \
641  } else { \
642  code = dtable[index][0]; \
643  OP(dst0, dst1, code); \
644  LAST_SKIP_BITS(name, gb, n); \
645  } \
646  } while (0)
647 
648 #define OP8bits(dst0, dst1, code) dst0 = code>>8; dst1 = code
649 
650 #define READ_2PIX(dst0, dst1, plane1) \
651  UPDATE_CACHE(re, &s->gb); \
652  GET_VLC_DUAL(dst0, dst1, re, &s->gb, s->vlc[4+plane1].table, \
653  s->vlc[0].table, s->vlc[plane1].table, VLC_BITS, 3, OP8bits)
654 
656 {
657  int i, icount;
658  OPEN_READER(re, &s->gb);
659  count /= 2;
660 
661  icount = get_bits_left(&s->gb) / (32 * 4);
662  if (count >= icount) {
663  for (i = 0; i < icount; i++) {
664  READ_2PIX(s->temp[0][2 * i], s->temp[1][i], 1);
665  READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
666  }
667  for (; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
668  READ_2PIX(s->temp[0][2 * i ], s->temp[1][i], 1);
669  if (BITS_LEFT(re, &s->gb) <= 0) break;
670  READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
671  }
672  for (; i < count; i++)
673  s->temp[0][2 * i ] = s->temp[1][i] =
674  s->temp[0][2 * i + 1] = s->temp[2][i] = 0;
675  } else {
676  for (i = 0; i < count; i++) {
677  READ_2PIX(s->temp[0][2 * i], s->temp[1][i], 1);
678  READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
679  }
680  }
681  CLOSE_READER(re, &s->gb);
682 }
683 
684 #define READ_2PIX_PLANE(dst0, dst1, plane, OP) \
685  UPDATE_CACHE(re, &s->gb); \
686  GET_VLC_DUAL(dst0, dst1, re, &s->gb, s->vlc[4+plane].table, \
687  s->vlc[plane].table, s->vlc[plane].table, VLC_BITS, 3, OP)
688 
689 #define OP14bits(dst0, dst1, code) dst0 = code>>8; dst1 = sign_extend(code, 8)
690 
691 /* TODO instead of restarting the read when the code isn't in the first level
692  * of the joint table, jump into the 2nd level of the individual table. */
693 #define READ_2PIX_PLANE16(dst0, dst1, plane){\
694  dst0 = get_vlc2(&s->gb, s->vlc[plane].table, VLC_BITS, 3)<<2;\
695  dst0 += get_bits(&s->gb, 2);\
696  dst1 = get_vlc2(&s->gb, s->vlc[plane].table, VLC_BITS, 3)<<2;\
697  dst1 += get_bits(&s->gb, 2);\
698 }
700 {
701  int i, count = width/2;
702 
703  if (s->bps <= 8) {
704  OPEN_READER(re, &s->gb);
705  if (count >= (get_bits_left(&s->gb)) / (32 * 2)) {
706  for (i = 0; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
707  READ_2PIX_PLANE(s->temp[0][2 * i], s->temp[0][2 * i + 1], plane, OP8bits);
708  }
709  } else {
710  for(i=0; i<count; i++){
711  READ_2PIX_PLANE(s->temp[0][2 * i], s->temp[0][2 * i + 1], plane, OP8bits);
712  }
713  }
714  if( width&1 && BITS_LEFT(re, &s->gb)>0 ) {
715  unsigned int index;
716  int nb_bits, code, n;
717  UPDATE_CACHE(re, &s->gb);
718  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
719  VLC_INTERN(s->temp[0][width-1], s->vlc[plane].table,
720  &s->gb, re, VLC_BITS, 3);
721  }
722  CLOSE_READER(re, &s->gb);
723  } else if (s->bps <= 14) {
724  OPEN_READER(re, &s->gb);
725  if (count >= (get_bits_left(&s->gb)) / (32 * 2)) {
726  for (i = 0; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
727  READ_2PIX_PLANE(s->temp16[0][2 * i], s->temp16[0][2 * i + 1], plane, OP14bits);
728  }
729  } else {
730  for(i=0; i<count; i++){
731  READ_2PIX_PLANE(s->temp16[0][2 * i], s->temp16[0][2 * i + 1], plane, OP14bits);
732  }
733  }
734  if( width&1 && BITS_LEFT(re, &s->gb)>0 ) {
735  unsigned int index;
736  int nb_bits, code, n;
737  UPDATE_CACHE(re, &s->gb);
738  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
739  VLC_INTERN(s->temp16[0][width-1], s->vlc[plane].table,
740  &s->gb, re, VLC_BITS, 3);
741  }
742  CLOSE_READER(re, &s->gb);
743  } else {
744  if (count >= (get_bits_left(&s->gb)) / (32 * 2)) {
745  for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
746  READ_2PIX_PLANE16(s->temp16[0][2 * i], s->temp16[0][2 * i + 1], plane);
747  }
748  } else {
749  for(i=0; i<count; i++){
750  READ_2PIX_PLANE16(s->temp16[0][2 * i], s->temp16[0][2 * i + 1], plane);
751  }
752  }
753  if( width&1 && get_bits_left(&s->gb)>0 ) {
754  int dst = get_vlc2(&s->gb, s->vlc[plane].table, VLC_BITS, 3)<<2;
755  s->temp16[0][width-1] = dst + get_bits(&s->gb, 2);
756  }
757  }
758 }
759 
761 {
762  int i;
763  OPEN_READER(re, &s->gb);
764  count /= 2;
765 
766  if (count >= (get_bits_left(&s->gb)) / (32 * 2)) {
767  for (i = 0; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
768  READ_2PIX(s->temp[0][2 * i], s->temp[0][2 * i + 1], 0);
769  }
770  } else {
771  for (i = 0; i < count; i++) {
772  READ_2PIX(s->temp[0][2 * i], s->temp[0][2 * i + 1], 0);
773  }
774  }
775  CLOSE_READER(re, &s->gb);
776 }
777 
779  int decorrelate, int alpha)
780 {
781  int i;
782  OPEN_READER(re, &s->gb);
783 
784  for (i = 0; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
785  unsigned int index;
786  int code, n, nb_bits;
787 
788  UPDATE_CACHE(re, &s->gb);
789  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
790  n = s->vlc[4].table[index][1];
791 
792  if (n>0) {
793  code = s->vlc[4].table[index][0];
794  *(uint32_t *) &s->temp[0][4 * i] = s->pix_bgr_map[code];
795  LAST_SKIP_BITS(re, &s->gb, n);
796  } else {
797  if (decorrelate) {
798  VLC_INTERN(s->temp[0][4 * i + G], s->vlc[1].table,
799  &s->gb, re, VLC_BITS, 3);
800 
801  UPDATE_CACHE(re, &s->gb);
802  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
803  VLC_INTERN(code, s->vlc[0].table, &s->gb, re, VLC_BITS, 3);
804  s->temp[0][4 * i + B] = code + s->temp[0][4 * i + G];
805 
806  UPDATE_CACHE(re, &s->gb);
807  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
808  VLC_INTERN(code, s->vlc[2].table, &s->gb, re, VLC_BITS, 3);
809  s->temp[0][4 * i + R] = code + s->temp[0][4 * i + G];
810  } else {
811  VLC_INTERN(s->temp[0][4 * i + B], s->vlc[0].table,
812  &s->gb, re, VLC_BITS, 3);
813 
814  UPDATE_CACHE(re, &s->gb);
815  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
816  VLC_INTERN(s->temp[0][4 * i + G], s->vlc[1].table,
817  &s->gb, re, VLC_BITS, 3);
818 
819  UPDATE_CACHE(re, &s->gb);
820  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
821  VLC_INTERN(s->temp[0][4 * i + R], s->vlc[2].table,
822  &s->gb, re, VLC_BITS, 3);
823  }
824  }
825  if (alpha) {
826  UPDATE_CACHE(re, &s->gb);
827  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
828  VLC_INTERN(s->temp[0][4 * i + A], s->vlc[2].table,
829  &s->gb, re, VLC_BITS, 3);
830  } else
831  s->temp[0][4 * i + A] = 0;
832  }
833  CLOSE_READER(re, &s->gb);
834 }
835 
837 {
838  if (s->decorrelate) {
839  if (s->bitstream_bpp == 24)
840  decode_bgr_1(s, count, 1, 0);
841  else
842  decode_bgr_1(s, count, 1, 1);
843  } else {
844  if (s->bitstream_bpp == 24)
845  decode_bgr_1(s, count, 0, 0);
846  else
847  decode_bgr_1(s, count, 0, 1);
848  }
849 }
850 
851 static void draw_slice(HYuvContext *s, AVFrame *frame, int y)
852 {
853  int h, cy, i;
855 
856  if (!s->avctx->draw_horiz_band)
857  return;
858 
859  h = y - s->last_slice_end;
860  y -= h;
861 
862  if (s->bitstream_bpp == 12)
863  cy = y >> 1;
864  else
865  cy = y;
866 
867  offset[0] = frame->linesize[0] * y;
868  offset[1] = frame->linesize[1] * cy;
869  offset[2] = frame->linesize[2] * cy;
870  for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
871  offset[i] = 0;
872  emms_c();
873 
874  s->avctx->draw_horiz_band(s->avctx, frame, offset, y, 3, h);
875 
876  s->last_slice_end = y + h;
877 }
878 
879 static int left_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int acc)
880 {
881  if (s->bps <= 8) {
882  return s->llviddsp.add_left_pred(dst, src, w, acc);
883  } else {
884  return s->llviddsp.add_left_pred_int16(( uint16_t *)dst, (const uint16_t *)src, s->n-1, w, acc);
885  }
886 }
887 
888 static void add_bytes(HYuvContext *s, uint8_t *dst, uint8_t *src, int w)
889 {
890  if (s->bps <= 8) {
891  s->llviddsp.add_bytes(dst, src, w);
892  } else {
893  s->hdsp.add_int16((uint16_t*)dst, (const uint16_t*)src, s->n - 1, w);
894  }
895 }
896 
897 static void add_median_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, const uint8_t *diff, int w, int *left, int *left_top)
898 {
899  if (s->bps <= 8) {
900  s->llviddsp.add_median_pred(dst, src, diff, w, left, left_top);
901  } else {
902  s->hdsp.add_hfyu_median_pred_int16((uint16_t *)dst, (const uint16_t *)src, (const uint16_t *)diff, s->n-1, w, left, left_top);
903  }
904 }
905 
906 static int decode_slice(AVCodecContext *avctx, AVFrame *p, int height,
907  int buf_size, int y_offset, int table_size)
908 {
909  HYuvContext *s = avctx->priv_data;
910  int fake_ystride, fake_ustride, fake_vstride;
911  const int width = s->width;
912  const int width2 = s->width >> 1;
913  int ret;
914 
915  if ((ret = init_get_bits8(&s->gb, s->bitstream_buffer + table_size, buf_size - table_size)) < 0)
916  return ret;
917 
918  fake_ystride = s->interlaced ? p->linesize[0] * 2 : p->linesize[0];
919  fake_ustride = s->interlaced ? p->linesize[1] * 2 : p->linesize[1];
920  fake_vstride = s->interlaced ? p->linesize[2] * 2 : p->linesize[2];
921 
922  if (s->version > 2) {
923  int plane;
924  for(plane = 0; plane < 1 + 2*s->chroma + s->alpha; plane++) {
925  int left, lefttop, y;
926  int w = width;
927  int h = height;
928  int fake_stride = fake_ystride;
929 
930  if (s->chroma && (plane == 1 || plane == 2)) {
931  w >>= s->chroma_h_shift;
932  h >>= s->chroma_v_shift;
933  fake_stride = plane == 1 ? fake_ustride : fake_vstride;
934  }
935 
936  switch (s->predictor) {
937  case LEFT:
938  case PLANE:
940  left = left_prediction(s, p->data[plane], s->temp[0], w, 0);
941 
942  for (y = 1; y < h; y++) {
943  uint8_t *dst = p->data[plane] + p->linesize[plane]*y;
944 
946  left = left_prediction(s, dst, s->temp[0], w, left);
947  if (s->predictor == PLANE) {
948  if (y > s->interlaced) {
949  add_bytes(s, dst, dst - fake_stride, w);
950  }
951  }
952  }
953 
954  break;
955  case MEDIAN:
957  left= left_prediction(s, p->data[plane], s->temp[0], w, 0);
958 
959  y = 1;
960  if (y >= h)
961  break;
962 
963  /* second line is left predicted for interlaced case */
964  if (s->interlaced) {
966  left = left_prediction(s, p->data[plane] + p->linesize[plane], s->temp[0], w, left);
967  y++;
968  if (y >= h)
969  break;
970  }
971 
972  lefttop = p->data[plane][0];
974  add_median_prediction(s, p->data[plane] + fake_stride, p->data[plane], s->temp[0], w, &left, &lefttop);
975  y++;
976 
977  for (; y<h; y++) {
978  uint8_t *dst;
979 
981 
982  dst = p->data[plane] + p->linesize[plane] * y;
983 
984  add_median_prediction(s, dst, dst - fake_stride, s->temp[0], w, &left, &lefttop);
985  }
986 
987  break;
988  }
989  }
990  draw_slice(s, p, height);
991  } else if (s->bitstream_bpp < 24) {
992  int y, cy;
993  int lefty, leftu, leftv;
994  int lefttopy, lefttopu, lefttopv;
995 
996  if (s->yuy2) {
997  p->data[0][3] = get_bits(&s->gb, 8);
998  p->data[0][2] = get_bits(&s->gb, 8);
999  p->data[0][1] = get_bits(&s->gb, 8);
1000  p->data[0][0] = get_bits(&s->gb, 8);
1001 
1002  av_log(avctx, AV_LOG_ERROR,
1003  "YUY2 output is not implemented yet\n");
1004  return AVERROR_PATCHWELCOME;
1005  } else {
1006  leftv =
1007  p->data[2][0 + y_offset * p->linesize[2]] = get_bits(&s->gb, 8);
1008  lefty =
1009  p->data[0][1 + y_offset * p->linesize[0]] = get_bits(&s->gb, 8);
1010  leftu =
1011  p->data[1][0 + y_offset * p->linesize[1]] = get_bits(&s->gb, 8);
1012  p->data[0][0 + y_offset * p->linesize[0]] = get_bits(&s->gb, 8);
1013 
1014  switch (s->predictor) {
1015  case LEFT:
1016  case PLANE:
1018  lefty = s->llviddsp.add_left_pred(p->data[0] + p->linesize[0] * y_offset + 2, s->temp[0],
1019  width - 2, lefty);
1020  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1021  leftu = s->llviddsp.add_left_pred(p->data[1] + p->linesize[1] * y_offset + 1, s->temp[1], width2 - 1, leftu);
1022  leftv = s->llviddsp.add_left_pred(p->data[2] + p->linesize[2] * y_offset + 1, s->temp[2], width2 - 1, leftv);
1023  }
1024 
1025  for (cy = y = 1; y < height; y++, cy++) {
1026  uint8_t *ydst, *udst, *vdst;
1027 
1028  if (s->bitstream_bpp == 12) {
1030 
1031  ydst = p->data[0] + p->linesize[0] * (y + y_offset);
1032 
1033  lefty = s->llviddsp.add_left_pred(ydst, s->temp[0],
1034  width, lefty);
1035  if (s->predictor == PLANE) {
1036  if (y > s->interlaced)
1037  s->llviddsp.add_bytes(ydst, ydst - fake_ystride, width);
1038  }
1039  y++;
1040  if (y >= height)
1041  break;
1042  }
1043 
1044  draw_slice(s, p, y);
1045 
1046  ydst = p->data[0] + p->linesize[0] * (y + y_offset);
1047  udst = p->data[1] + p->linesize[1] * (cy + y_offset);
1048  vdst = p->data[2] + p->linesize[2] * (cy + y_offset);
1049 
1051  lefty = s->llviddsp.add_left_pred(ydst, s->temp[0],
1052  width, lefty);
1053  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1054  leftu = s->llviddsp.add_left_pred(udst, s->temp[1], width2, leftu);
1055  leftv = s->llviddsp.add_left_pred(vdst, s->temp[2], width2, leftv);
1056  }
1057  if (s->predictor == PLANE) {
1058  if (cy > s->interlaced) {
1059  s->llviddsp.add_bytes(ydst, ydst - fake_ystride, width);
1060  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1061  s->llviddsp.add_bytes(udst, udst - fake_ustride, width2);
1062  s->llviddsp.add_bytes(vdst, vdst - fake_vstride, width2);
1063  }
1064  }
1065  }
1066  }
1067  draw_slice(s, p, height);
1068 
1069  break;
1070  case MEDIAN:
1071  /* first line except first 2 pixels is left predicted */
1073  lefty = s->llviddsp.add_left_pred(p->data[0] + 2, s->temp[0],
1074  width - 2, lefty);
1075  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1076  leftu = s->llviddsp.add_left_pred(p->data[1] + 1, s->temp[1], width2 - 1, leftu);
1077  leftv = s->llviddsp.add_left_pred(p->data[2] + 1, s->temp[2], width2 - 1, leftv);
1078  }
1079 
1080  cy = y = 1;
1081  if (y >= height)
1082  break;
1083 
1084  /* second line is left predicted for interlaced case */
1085  if (s->interlaced) {
1087  lefty = s->llviddsp.add_left_pred(p->data[0] + p->linesize[0],
1088  s->temp[0], width, lefty);
1089  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1090  leftu = s->llviddsp.add_left_pred(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1091  leftv = s->llviddsp.add_left_pred(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1092  }
1093  y++;
1094  cy++;
1095  if (y >= height)
1096  break;
1097  }
1098 
1099  /* next 4 pixels are left predicted too */
1100  decode_422_bitstream(s, 4);
1101  lefty = s->llviddsp.add_left_pred(p->data[0] + fake_ystride,
1102  s->temp[0], 4, lefty);
1103  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1104  leftu = s->llviddsp.add_left_pred(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1105  leftv = s->llviddsp.add_left_pred(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1106  }
1107 
1108  /* next line except the first 4 pixels is median predicted */
1109  lefttopy = p->data[0][3];
1111  s->llviddsp.add_median_pred(p->data[0] + fake_ystride + 4,
1112  p->data[0] + 4, s->temp[0],
1113  width - 4, &lefty, &lefttopy);
1114  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1115  lefttopu = p->data[1][1];
1116  lefttopv = p->data[2][1];
1117  s->llviddsp.add_median_pred(p->data[1] + fake_ustride + 2, p->data[1] + 2, s->temp[1], width2 - 2, &leftu, &lefttopu);
1118  s->llviddsp.add_median_pred(p->data[2] + fake_vstride + 2, p->data[2] + 2, s->temp[2], width2 - 2, &leftv, &lefttopv);
1119  }
1120  y++;
1121  cy++;
1122 
1123  for (; y < height; y++, cy++) {
1124  uint8_t *ydst, *udst, *vdst;
1125 
1126  if (s->bitstream_bpp == 12) {
1127  while (2 * cy > y) {
1129  ydst = p->data[0] + p->linesize[0] * y;
1130  s->llviddsp.add_median_pred(ydst, ydst - fake_ystride,
1131  s->temp[0], width,
1132  &lefty, &lefttopy);
1133  y++;
1134  }
1135  if (y >= height)
1136  break;
1137  }
1138  draw_slice(s, p, y);
1139 
1141 
1142  ydst = p->data[0] + p->linesize[0] * y;
1143  udst = p->data[1] + p->linesize[1] * cy;
1144  vdst = p->data[2] + p->linesize[2] * cy;
1145 
1146  s->llviddsp.add_median_pred(ydst, ydst - fake_ystride,
1147  s->temp[0], width,
1148  &lefty, &lefttopy);
1149  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1150  s->llviddsp.add_median_pred(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1151  s->llviddsp.add_median_pred(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1152  }
1153  }
1154 
1155  draw_slice(s, p, height);
1156  break;
1157  }
1158  }
1159  } else {
1160  int y;
1161  uint8_t left[4];
1162  const int last_line = (y_offset + height - 1) * p->linesize[0];
1163 
1164  if (s->bitstream_bpp == 32) {
1165  left[A] = p->data[0][last_line + A] = get_bits(&s->gb, 8);
1166  left[R] = p->data[0][last_line + R] = get_bits(&s->gb, 8);
1167  left[G] = p->data[0][last_line + G] = get_bits(&s->gb, 8);
1168  left[B] = p->data[0][last_line + B] = get_bits(&s->gb, 8);
1169  } else {
1170  left[R] = p->data[0][last_line + R] = get_bits(&s->gb, 8);
1171  left[G] = p->data[0][last_line + G] = get_bits(&s->gb, 8);
1172  left[B] = p->data[0][last_line + B] = get_bits(&s->gb, 8);
1173  left[A] = p->data[0][last_line + A] = 255;
1174  skip_bits(&s->gb, 8);
1175  }
1176 
1177  if (s->bgr32) {
1178  switch (s->predictor) {
1179  case LEFT:
1180  case PLANE:
1182  s->hdsp.add_hfyu_left_pred_bgr32(p->data[0] + last_line + 4,
1183  s->temp[0], width - 1, left);
1184 
1185  for (y = height - 2; y >= 0; y--) { // Yes it is stored upside down.
1187 
1188  s->hdsp.add_hfyu_left_pred_bgr32(p->data[0] + p->linesize[0] * (y + y_offset),
1189  s->temp[0], width, left);
1190  if (s->predictor == PLANE) {
1191  if (s->bitstream_bpp != 32)
1192  left[A] = 0;
1193  if (y < height - 1 - s->interlaced) {
1194  s->llviddsp.add_bytes(p->data[0] + p->linesize[0] * (y + y_offset),
1195  p->data[0] + p->linesize[0] * (y + y_offset) +
1196  fake_ystride, 4 * width);
1197  }
1198  }
1199  }
1200  // just 1 large slice as this is not possible in reverse order
1201  draw_slice(s, p, height);
1202  break;
1203  default:
1204  av_log(avctx, AV_LOG_ERROR,
1205  "prediction type not supported!\n");
1206  }
1207  } else {
1208  av_log(avctx, AV_LOG_ERROR,
1209  "BGR24 output is not implemented yet\n");
1210  return AVERROR_PATCHWELCOME;
1211  }
1212  }
1213 
1214  return 0;
1215 }
1216 
1217 static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
1218  AVPacket *avpkt)
1219 {
1220  const uint8_t *buf = avpkt->data;
1221  int buf_size = avpkt->size;
1222  HYuvContext *s = avctx->priv_data;
1223  const int width = s->width;
1224  const int height = s->height;
1225  ThreadFrame frame = { .f = data };
1226  AVFrame *const p = data;
1227  int slice, table_size = 0, ret, nb_slices;
1228  unsigned slices_info_offset;
1229  int slice_height;
1230 
1231  if (buf_size < (width * height + 7)/8)
1232  return AVERROR_INVALIDDATA;
1233 
1234  av_fast_padded_malloc(&s->bitstream_buffer,
1235  &s->bitstream_buffer_size,
1236  buf_size);
1237  if (!s->bitstream_buffer)
1238  return AVERROR(ENOMEM);
1239 
1240  s->bdsp.bswap_buf((uint32_t *) s->bitstream_buffer,
1241  (const uint32_t *) buf, buf_size / 4);
1242 
1243  if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
1244  return ret;
1245 
1246  if (s->context) {
1247  table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
1248  if (table_size < 0)
1249  return table_size;
1250  }
1251 
1252  if ((unsigned) (buf_size - table_size) >= INT_MAX / 8)
1253  return AVERROR_INVALIDDATA;
1254 
1255  s->last_slice_end = 0;
1256 
1257  if (avctx->codec_id == AV_CODEC_ID_HYMT &&
1258  (buf_size > 32 && AV_RL32(avpkt->data + buf_size - 16) == 0)) {
1259  slices_info_offset = AV_RL32(avpkt->data + buf_size - 4);
1260  slice_height = AV_RL32(avpkt->data + buf_size - 8);
1261  nb_slices = AV_RL32(avpkt->data + buf_size - 12);
1262  if (nb_slices * 8LL + slices_info_offset > buf_size - 16 ||
1263  s->chroma_v_shift ||
1264  slice_height <= 0 || nb_slices * (uint64_t)slice_height > height)
1265  return AVERROR_INVALIDDATA;
1266  } else {
1267  slice_height = height;
1268  nb_slices = 1;
1269  }
1270 
1271  for (slice = 0; slice < nb_slices; slice++) {
1272  int y_offset, slice_offset, slice_size;
1273 
1274  if (nb_slices > 1) {
1275  slice_offset = AV_RL32(avpkt->data + slices_info_offset + slice * 8);
1276  slice_size = AV_RL32(avpkt->data + slices_info_offset + slice * 8 + 4);
1277 
1278  if (slice_offset < 0 || slice_size <= 0 || (slice_offset&3) ||
1279  slice_offset + (int64_t)slice_size > buf_size)
1280  return AVERROR_INVALIDDATA;
1281 
1282  y_offset = height - (slice + 1) * slice_height;
1283  s->bdsp.bswap_buf((uint32_t *)s->bitstream_buffer,
1284  (const uint32_t *)(buf + slice_offset), slice_size / 4);
1285  } else {
1286  y_offset = 0;
1287  slice_offset = 0;
1288  slice_size = buf_size;
1289  }
1290 
1291  ret = decode_slice(avctx, p, slice_height, slice_size, y_offset, table_size);
1292  emms_c();
1293  if (ret < 0)
1294  return ret;
1295  }
1296 
1297  *got_frame = 1;
1298 
1299  return (get_bits_count(&s->gb) + 31) / 32 * 4 + table_size;
1300 }
1301 
1303  .name = "huffyuv",
1304  .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1305  .type = AVMEDIA_TYPE_VIDEO,
1306  .id = AV_CODEC_ID_HUFFYUV,
1307  .priv_data_size = sizeof(HYuvContext),
1308  .init = decode_init,
1309  .close = decode_end,
1310  .decode = decode_frame,
1313  .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1314 };
1315 
1316 #if CONFIG_FFVHUFF_DECODER
1318  .name = "ffvhuff",
1319  .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1320  .type = AVMEDIA_TYPE_VIDEO,
1321  .id = AV_CODEC_ID_FFVHUFF,
1322  .priv_data_size = sizeof(HYuvContext),
1323  .init = decode_init,
1324  .close = decode_end,
1325  .decode = decode_frame,
1328  .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1329 };
1330 #endif /* CONFIG_FFVHUFF_DECODER */
1331 
1332 #if CONFIG_HYMT_DECODER
1334  .name = "hymt",
1335  .long_name = NULL_IF_CONFIG_SMALL("HuffYUV MT"),
1336  .type = AVMEDIA_TYPE_VIDEO,
1337  .id = AV_CODEC_ID_HYMT,
1338  .priv_data_size = sizeof(HYuvContext),
1339  .init = decode_init,
1340  .close = decode_end,
1341  .decode = decode_frame,
1344  .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1345 };
1346 #endif /* CONFIG_HYMT_DECODER */
VLC_INTERN
#define VLC_INTERN(dst, table, gb, name, bits, max_depth)
Subset of GET_VLC for use in hand-roller VLC code.
Definition: huffyuvdec.c:603
add_bytes
static void add_bytes(HYuvContext *s, uint8_t *dst, uint8_t *src, int w)
Definition: huffyuvdec.c:888
AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:430
HYuvContext
Definition: huffyuv.h:55
AVCodec
AVCodec.
Definition: avcodec.h:3481
generate_joint_tables
static int generate_joint_tables(HYuvContext *s)
Definition: huffyuvdec.c:118
decorrelate
static void decorrelate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median)
Definition: snowenc.c:1329
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
r
const char * r
Definition: vf_curves.c:114
acc
int acc
Definition: yuv2rgb.c:555
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AV_CODEC_ID_HUFFYUV
@ AV_CODEC_ID_HUFFYUV
Definition: avcodec.h:243
out
FILE * out
Definition: movenc.c:54
AV_CODEC_ID_HYMT
@ AV_CODEC_ID_HYMT
Definition: avcodec.h:455
n
int n
Definition: avisynth_c.h:760
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:252
MAX_VLC_N
#define MAX_VLC_N
Definition: huffyuv.h:47
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
count
void INT64 INT64 count
Definition: avisynth_c.h:767
AV_PIX_FMT_YUVA422P9
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:422
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
pixdesc.h
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:429
w
uint8_t w
Definition: llviddspenc.c:38
decode_gray_bitstream
static void decode_gray_bitstream(HYuvContext *s, int count)
Definition: huffyuvdec.c:760
R
#define R
Definition: huffyuvdsp.h:34
AVPacket::data
uint8_t * data
Definition: avcodec.h:1477
huffyuvdsp.h
AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:424
init_vlc
#define init_vlc(vlc, nb_bits, nb_codes, bits, bits_wrap, bits_size, codes, codes_wrap, codes_size, flags)
Definition: vlc.h:38
b
#define b
Definition: input.c:41
READ_2PIX
#define READ_2PIX(dst0, dst1, plane1)
Definition: huffyuvdec.c:650
classic_add_luma
static const unsigned char classic_add_luma[256]
Definition: huffyuvdec.c:61
data
const char data[16]
Definition: mxf.c:91
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:387
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:797
MEDIAN
@ MEDIAN
Definition: huffyuv.h:52
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
decode_init
static av_cold int decode_init(AVCodecContext *avctx)
Definition: huffyuvdec.c:291
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:178
OP8bits
#define OP8bits(dst0, dst1, code)
Definition: huffyuvdec.c:648
OP14bits
#define OP14bits(dst0, dst1, code)
Definition: huffyuvdec.c:689
AV_PIX_FMT_YUVA422P10
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:425
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
thread.h
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
add_median_prediction
static void add_median_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, const uint8_t *diff, int w, int *left, int *left_top)
Definition: huffyuvdec.c:897
ff_huffyuv_alloc_temp
av_cold int ff_huffyuv_alloc_temp(HYuvContext *s)
Definition: huffyuv.c:58
A
#define A(x)
Definition: vp56_arith.h:28
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
AV_PIX_FMT_YUVA420P9
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:421
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:405
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:403
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:431
plane
int plane
Definition: avisynth_c.h:384
GetBitContext
Definition: get_bits.h:61
VLC_BITS
#define VLC_BITS
Definition: asvdec.c:37
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:385
classic_shift_chroma_table_size
#define classic_shift_chroma_table_size
Definition: huffyuvdec.c:52
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2550
classic_shift_luma
static const unsigned char classic_shift_luma[classic_shift_luma_table_size+AV_INPUT_BUFFER_PADDING_SIZE]
Definition: huffyuvdec.c:45
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:371
decode_bgr_1
static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha)
Definition: huffyuvdec.c:778
src
#define src
Definition: vp8dsp.c:254
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:390
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
buf
void * buf
Definition: avisynth_c.h:766
av_cold
#define av_cold
Definition: attributes.h:84
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:399
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
read_huffman_tables
static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length)
Definition: huffyuvdec.c:208
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:149
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:42
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:1667
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:400
draw_slice
static void draw_slice(HYuvContext *s, AVFrame *frame, int y)
Definition: huffyuvdec.c:851
g
const char * g
Definition: vf_curves.c:115
bits
uint8_t bits
Definition: vp3data.h:202
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:384
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:398
get_bits.h
ff_free_vlc
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:359
AV_CODEC_ID_FFVHUFF
@ AV_CODEC_ID_FFVHUFF
Definition: avcodec.h:285
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:1575
if
if(ret)
Definition: filter_design.txt:179
ff_huffyuv_common_end
av_cold void ff_huffyuv_common_end(HYuvContext *s)
Definition: huffyuv.c:86
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:1037
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:406
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
AV_PIX_FMT_YUYV422
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:67
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:199
ff_huffyuv_generate_bits_table
int ff_huffyuv_generate_bits_table(uint32_t *dst, const uint8_t *len_table, int n)
Definition: huffyuv.c:39
ONLY_IF_THREADS_ENABLED
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:227
ff_thread_get_buffer
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:964
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:388
ff_init_vlc_sparse
int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
Definition: bitstream.c:273
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
READ_2PIX_PLANE16
#define READ_2PIX_PLANE16(dst0, dst1, plane)
Definition: huffyuvdec.c:693
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:402
index
int index
Definition: gxfenc.c:89
READ_2PIX_PLANE
#define READ_2PIX_PLANE(dst0, dst1, plane, OP)
Definition: huffyuvdec.c:684
error
static void error(const char *err)
Definition: target_dec_fuzzer.c:61
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
left_prediction
static int left_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int acc)
Definition: huffyuvdec.c:879
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:981
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:883
AVPacket::size
int size
Definition: avcodec.h:1478
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:392
init_thread_copy
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have add an init_thread_copy() which re-allocates them for other threads. Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little speed gain at this point but it should work. If there are inter-frame dependencies
AV_NUM_DATA_POINTERS
#define AV_NUM_DATA_POINTERS
Definition: frame.h:296
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:394
decode_end
static av_cold int decode_end(AVCodecContext *avctx)
Definition: huffyuvdec.c:277
LEFT
#define LEFT
Definition: cdgraphics.c:166
val
const char const char void * val
Definition: avisynth_c.h:863
ff_huffyuvdsp_init
av_cold void ff_huffyuvdsp_init(HuffYUVDSPContext *c, enum AVPixelFormat pix_fmt)
Definition: huffyuvdsp.c:83
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:138
height
#define height
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:360
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
classic_add_chroma
static const unsigned char classic_add_chroma[256]
Definition: huffyuvdec.c:80
AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:426
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
decode_slice
static int decode_slice(AVCodecContext *avctx, AVFrame *p, int height, int buf_size, int y_offset, int table_size)
Definition: huffyuvdec.c:906
decode_plane_bitstream
static void decode_plane_bitstream(HYuvContext *s, int width, int plane)
Definition: huffyuvdec.c:699
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:2789
interlaced
uint8_t interlaced
Definition: mxfenc.c:2217
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1666
ff_hymt_decoder
AVCodec ff_hymt_decoder
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:404
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:70
av_always_inline
#define av_always_inline
Definition: attributes.h:43
uint8_t
uint8_t
Definition: audio_convert.c:194
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
AVCodec::name
const char * name
Name of the codec implementation.
Definition: avcodec.h:3488
len
int len
Definition: vorbis_enc_data.h:452
PLANE
@ PLANE
Definition: huffyuv.h:51
AVCodecContext::height
int height
Definition: avcodec.h:1738
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1775
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:386
avcodec.h
G
#define G
Definition: huffyuvdsp.h:33
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AV_PIX_FMT_0RGB32
#define AV_PIX_FMT_0RGB32
Definition: pixfmt.h:364
decode_bgr_bitstream
static void decode_bgr_bitstream(HYuvContext *s, int count)
Definition: huffyuvdec.c:836
classic_shift_chroma
static const unsigned char classic_shift_chroma[classic_shift_chroma_table_size+AV_INPUT_BUFFER_PADDING_SIZE]
Definition: huffyuvdec.c:53
AV_PIX_FMT_YUVA444P9
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:423
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:391
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: avcodec.h:790
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
classic_shift_luma_table_size
#define classic_shift_luma_table_size
Definition: huffyuvdec.c:44
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:88
BITS_LEFT
#define BITS_LEFT(name, gb)
Definition: get_bits.h:191
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:396
B
#define B
Definition: huffyuvdsp.h:32
ff_llviddsp_init
void ff_llviddsp_init(LLVidDSPContext *c)
Definition: lossless_videodsp.c:112
decode_422_bitstream
static void decode_422_bitstream(HYuvContext *s, int count)
Definition: huffyuvdec.c:655
AVCodecContext
main external API structure.
Definition: avcodec.h:1565
ThreadFrame
Definition: thread.h:34
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:211
VLC
Definition: vlc.h:26
sign_extend
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:130
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
lossless_videodsp.h
map
const VDPAUPixFmtMap * map
Definition: hwcontext_vdpau.c:85
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:136
ff_ffvhuff_decoder
AVCodec ff_ffvhuff_decoder
alpha
static const int16_t alpha[]
Definition: ilbcdata.h:55
ff_huffyuv_common_init
av_cold void ff_huffyuv_common_init(AVCodecContext *avctx)
Definition: huffyuv.c:71
decode_frame
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: huffyuvdec.c:1217
AVPacket
This structure stores compressed data.
Definition: avcodec.h:1454
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:1592
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
read_len_table
static int read_len_table(uint8_t *dst, GetBitContext *gb, int n)
Definition: huffyuvdec.c:99
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:1738
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
AV_CODEC_CAP_DRAW_HORIZ_BAND
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: avcodec.h:975
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
ff_huffyuv_decoder
AVCodec ff_huffyuv_decoder
Definition: huffyuvdec.c:1302
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
length
const char int length
Definition: avisynth_c.h:860
h
h
Definition: vp9dsp_template.c:2038
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:397
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:282
huffyuv.h
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:176
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:395
re
float re
Definition: fft.c:82
read_old_huffman_tables
static int read_old_huffman_tables(HYuvContext *s)
Definition: huffyuvdec.c:237