FFmpeg
huffyuvdec.c
Go to the documentation of this file.
1 /*
2  * huffyuv decoder
3  *
4  * Copyright (c) 2002-2014 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
7  * the algorithm used
8  *
9  * This file is part of FFmpeg.
10  *
11  * FFmpeg is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public
13  * License as published by the Free Software Foundation; either
14  * version 2.1 of the License, or (at your option) any later version.
15  *
16  * FFmpeg is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with FFmpeg; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24  *
25  * yuva, gray, 4:4:4, 4:1:1, 4:1:0 and >8 bit per sample support sponsored by NOA
26  */
27 
28 /**
29  * @file
30  * huffyuv decoder
31  */
32 
33 #define UNCHECKED_BITSTREAM_READER 1
34 
35 #include "avcodec.h"
36 #include "get_bits.h"
37 #include "huffyuv.h"
38 #include "huffyuvdsp.h"
39 #include "lossless_videodsp.h"
40 #include "thread.h"
41 #include "libavutil/imgutils.h"
42 #include "libavutil/pixdesc.h"
43 
44 #define classic_shift_luma_table_size 42
46  34, 36, 35, 69, 135, 232, 9, 16, 10, 24, 11, 23, 12, 16, 13, 10,
47  14, 8, 15, 8, 16, 8, 17, 20, 16, 10, 207, 206, 205, 236, 11, 8,
48  10, 21, 9, 23, 8, 8, 199, 70, 69, 68, 0,
49  0,0,0,0,0,0,0,0,
50 };
51 
52 #define classic_shift_chroma_table_size 59
54  66, 36, 37, 38, 39, 40, 41, 75, 76, 77, 110, 239, 144, 81, 82, 83,
55  84, 85, 118, 183, 56, 57, 88, 89, 56, 89, 154, 57, 58, 57, 26, 141,
56  57, 56, 58, 57, 58, 57, 184, 119, 214, 245, 116, 83, 82, 49, 80, 79,
57  78, 77, 44, 75, 41, 40, 39, 38, 37, 36, 34, 0,
58  0,0,0,0,0,0,0,0,
59 };
60 
61 static const unsigned char classic_add_luma[256] = {
62  3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
63  73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
64  68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
65  35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
66  37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
67  35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
68  27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
69  15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
70  12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
71  12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
72  18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
73  28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
74  28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
75  62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
76  54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
77  46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
78 };
79 
80 static const unsigned char classic_add_chroma[256] = {
81  3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
82  7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
83  11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
84  43, 45, 76, 81, 46, 82, 75, 55, 56, 144, 58, 80, 60, 74, 147, 63,
85  143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
86  80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
87  17, 14, 5, 6, 100, 54, 47, 50, 51, 53, 106, 107, 108, 109, 110, 111,
88  112, 113, 114, 115, 4, 117, 118, 92, 94, 121, 122, 3, 124, 103, 2, 1,
89  0, 129, 130, 131, 120, 119, 126, 125, 136, 137, 138, 139, 140, 141, 142, 134,
90  135, 132, 133, 104, 64, 101, 62, 57, 102, 95, 93, 59, 61, 28, 97, 96,
91  52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
92  19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
93  7, 128, 127, 105, 123, 116, 35, 34, 33, 145, 31, 79, 42, 146, 78, 26,
94  83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
95  14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
96  6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
97 };
98 
99 static int read_len_table(uint8_t *dst, GetBitContext *gb, int n)
100 {
101  int i, val, repeat;
102 
103  for (i = 0; i < n;) {
104  repeat = get_bits(gb, 3);
105  val = get_bits(gb, 5);
106  if (repeat == 0)
107  repeat = get_bits(gb, 8);
108  if (i + repeat > n || get_bits_left(gb) < 0) {
109  av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
110  return AVERROR_INVALIDDATA;
111  }
112  while (repeat--)
113  dst[i++] = val;
114  }
115  return 0;
116 }
117 
119 {
120  int ret;
121  uint16_t *symbols = av_mallocz(5 << VLC_BITS);
122  uint16_t *bits;
123  uint8_t *len;
124  if (!symbols)
125  return AVERROR(ENOMEM);
126  bits = symbols + (1 << VLC_BITS);
127  len = (uint8_t *)(bits + (1 << VLC_BITS));
128 
129  if (s->bitstream_bpp < 24 || s->version > 2) {
130  int p, i, y, u;
131  for (p = 0; p < 4; p++) {
132  int p0 = s->version > 2 ? p : 0;
133  for (i = y = 0; y < s->vlc_n; y++) {
134  int len0 = s->len[p0][y];
135  int limit = VLC_BITS - len0;
136  if (limit <= 0 || !len0)
137  continue;
138  if ((sign_extend(y, 8) & (s->vlc_n-1)) != y)
139  continue;
140  for (u = 0; u < s->vlc_n; u++) {
141  int len1 = s->len[p][u];
142  if (len1 > limit || !len1)
143  continue;
144  if ((sign_extend(u, 8) & (s->vlc_n-1)) != u)
145  continue;
146  av_assert0(i < (1 << VLC_BITS));
147  len[i] = len0 + len1;
148  bits[i] = (s->bits[p0][y] << len1) + s->bits[p][u];
149  symbols[i] = (y << 8) + (u & 0xFF);
150  i++;
151  }
152  }
153  ff_free_vlc(&s->vlc[4 + p]);
154  if ((ret = ff_init_vlc_sparse(&s->vlc[4 + p], VLC_BITS, i, len, 1, 1,
155  bits, 2, 2, symbols, 2, 2, 0)) < 0)
156  goto out;
157  }
158  } else {
159  uint8_t (*map)[4] = (uint8_t(*)[4]) s->pix_bgr_map;
160  int i, b, g, r, code;
161  int p0 = s->decorrelate;
162  int p1 = !s->decorrelate;
163  /* Restrict the range to +/-16 because that's pretty much guaranteed
164  * to cover all the combinations that fit in 11 bits total, and it
165  * does not matter if we miss a few rare codes. */
166  for (i = 0, g = -16; g < 16; g++) {
167  int len0 = s->len[p0][g & 255];
168  int limit0 = VLC_BITS - len0;
169  if (limit0 < 2 || !len0)
170  continue;
171  for (b = -16; b < 16; b++) {
172  int len1 = s->len[p1][b & 255];
173  int limit1 = limit0 - len1;
174  if (limit1 < 1 || !len1)
175  continue;
176  code = (s->bits[p0][g & 255] << len1) + s->bits[p1][b & 255];
177  for (r = -16; r < 16; r++) {
178  int len2 = s->len[2][r & 255];
179  if (len2 > limit1 || !len2)
180  continue;
181  av_assert0(i < (1 << VLC_BITS));
182  len[i] = len0 + len1 + len2;
183  bits[i] = (code << len2) + s->bits[2][r & 255];
184  if (s->decorrelate) {
185  map[i][G] = g;
186  map[i][B] = g + b;
187  map[i][R] = g + r;
188  } else {
189  map[i][B] = g;
190  map[i][G] = b;
191  map[i][R] = r;
192  }
193  i++;
194  }
195  }
196  }
197  ff_free_vlc(&s->vlc[4]);
198  if ((ret = init_vlc(&s->vlc[4], VLC_BITS, i, len, 1, 1,
199  bits, 2, 2, 0)) < 0)
200  goto out;
201  }
202  ret = 0;
203 out:
204  av_freep(&symbols);
205  return ret;
206 }
207 
209 {
210  GetBitContext gb;
211  int i, ret;
212  int count = 3;
213 
214  if ((ret = init_get_bits(&gb, src, length * 8)) < 0)
215  return ret;
216 
217  if (s->version > 2)
218  count = 1 + s->alpha + 2*s->chroma;
219 
220  for (i = 0; i < count; i++) {
221  if ((ret = read_len_table(s->len[i], &gb, s->vlc_n)) < 0)
222  return ret;
223  if ((ret = ff_huffyuv_generate_bits_table(s->bits[i], s->len[i], s->vlc_n)) < 0)
224  return ret;
225  ff_free_vlc(&s->vlc[i]);
226  if ((ret = init_vlc(&s->vlc[i], VLC_BITS, s->vlc_n, s->len[i], 1, 1,
227  s->bits[i], 4, 4, 0)) < 0)
228  return ret;
229  }
230 
231  if ((ret = generate_joint_tables(s)) < 0)
232  return ret;
233 
234  return (get_bits_count(&gb) + 7) / 8;
235 }
236 
238 {
239  GetBitContext gb;
240  int i, ret;
241 
244  if ((ret = read_len_table(s->len[0], &gb, 256)) < 0)
245  return ret;
246 
249  if ((ret = read_len_table(s->len[1], &gb, 256)) < 0)
250  return ret;
251 
252  for (i = 0; i < 256; i++)
253  s->bits[0][i] = classic_add_luma[i];
254  for (i = 0; i < 256; i++)
255  s->bits[1][i] = classic_add_chroma[i];
256 
257  if (s->bitstream_bpp >= 24) {
258  memcpy(s->bits[1], s->bits[0], 256 * sizeof(uint32_t));
259  memcpy(s->len[1], s->len[0], 256 * sizeof(uint8_t));
260  }
261  memcpy(s->bits[2], s->bits[1], 256 * sizeof(uint32_t));
262  memcpy(s->len[2], s->len[1], 256 * sizeof(uint8_t));
263 
264  for (i = 0; i < 4; i++) {
265  ff_free_vlc(&s->vlc[i]);
266  if ((ret = init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1,
267  s->bits[i], 4, 4, 0)) < 0)
268  return ret;
269  }
270 
271  if ((ret = generate_joint_tables(s)) < 0)
272  return ret;
273 
274  return 0;
275 }
276 
278 {
279  HYuvContext *s = avctx->priv_data;
280  int i;
281 
284 
285  for (i = 0; i < 8; i++)
286  ff_free_vlc(&s->vlc[i]);
287 
288  return 0;
289 }
290 
292 {
293  HYuvContext *s = avctx->priv_data;
294  int ret;
295 
296  ret = av_image_check_size(avctx->width, avctx->height, 0, avctx);
297  if (ret < 0)
298  return ret;
299 
300  ff_huffyuvdsp_init(&s->hdsp, avctx->pix_fmt);
302  memset(s->vlc, 0, 4 * sizeof(VLC));
303 
304  s->interlaced = avctx->height > 288;
305  s->bgr32 = 1;
306 
307  if (avctx->extradata_size) {
308  if ((avctx->bits_per_coded_sample & 7) &&
309  avctx->bits_per_coded_sample != 12)
310  s->version = 1; // do such files exist at all?
311  else if (avctx->extradata_size > 3 && avctx->extradata[3] == 0)
312  s->version = 2;
313  else
314  s->version = 3;
315  } else
316  s->version = 0;
317 
318  s->bps = 8;
319  s->n = 1<<s->bps;
320  s->vlc_n = FFMIN(s->n, MAX_VLC_N);
321  s->chroma = 1;
322  if (s->version >= 2) {
323  int method, interlace;
324 
325  if (avctx->extradata_size < 4)
326  return AVERROR_INVALIDDATA;
327 
328  method = avctx->extradata[0];
329  s->decorrelate = method & 64 ? 1 : 0;
330  s->predictor = method & 63;
331  if (s->version == 2) {
332  s->bitstream_bpp = avctx->extradata[1];
333  if (s->bitstream_bpp == 0)
334  s->bitstream_bpp = avctx->bits_per_coded_sample & ~7;
335  } else {
336  s->bps = (avctx->extradata[1] >> 4) + 1;
337  s->n = 1<<s->bps;
338  s->vlc_n = FFMIN(s->n, MAX_VLC_N);
339  s->chroma_h_shift = avctx->extradata[1] & 3;
340  s->chroma_v_shift = (avctx->extradata[1] >> 2) & 3;
341  s->yuv = !!(avctx->extradata[2] & 1);
342  s->chroma= !!(avctx->extradata[2] & 3);
343  s->alpha = !!(avctx->extradata[2] & 4);
344  }
345  interlace = (avctx->extradata[2] & 0x30) >> 4;
346  s->interlaced = (interlace == 1) ? 1 : (interlace == 2) ? 0 : s->interlaced;
347  s->context = avctx->extradata[2] & 0x40 ? 1 : 0;
348 
349  if ((ret = read_huffman_tables(s, avctx->extradata + 4,
350  avctx->extradata_size - 4)) < 0)
351  goto error;
352  } else {
353  switch (avctx->bits_per_coded_sample & 7) {
354  case 1:
355  s->predictor = LEFT;
356  s->decorrelate = 0;
357  break;
358  case 2:
359  s->predictor = LEFT;
360  s->decorrelate = 1;
361  break;
362  case 3:
363  s->predictor = PLANE;
364  s->decorrelate = avctx->bits_per_coded_sample >= 24;
365  break;
366  case 4:
367  s->predictor = MEDIAN;
368  s->decorrelate = 0;
369  break;
370  default:
371  s->predictor = LEFT; // OLD
372  s->decorrelate = 0;
373  break;
374  }
375  s->bitstream_bpp = avctx->bits_per_coded_sample & ~7;
376  s->context = 0;
377 
378  if ((ret = read_old_huffman_tables(s)) < 0)
379  goto error;
380  }
381 
382  if (s->version <= 2) {
383  switch (s->bitstream_bpp) {
384  case 12:
385  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
386  s->yuv = 1;
387  break;
388  case 16:
389  if (s->yuy2)
390  avctx->pix_fmt = AV_PIX_FMT_YUYV422;
391  else
392  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
393  s->yuv = 1;
394  break;
395  case 24:
396  if (s->bgr32)
397  avctx->pix_fmt = AV_PIX_FMT_0RGB32;
398  else
399  avctx->pix_fmt = AV_PIX_FMT_BGR24;
400  break;
401  case 32:
402  av_assert0(s->bgr32);
403  avctx->pix_fmt = AV_PIX_FMT_RGB32;
404  s->alpha = 1;
405  break;
406  default:
407  ret = AVERROR_INVALIDDATA;
408  goto error;
409  }
411  &s->chroma_h_shift,
412  &s->chroma_v_shift);
413  } else {
414  switch ( (s->chroma<<10) | (s->yuv<<9) | (s->alpha<<8) | ((s->bps-1)<<4) | s->chroma_h_shift | (s->chroma_v_shift<<2)) {
415  case 0x070:
416  avctx->pix_fmt = AV_PIX_FMT_GRAY8;
417  break;
418  case 0x0F0:
419  avctx->pix_fmt = AV_PIX_FMT_GRAY16;
420  break;
421  case 0x470:
422  avctx->pix_fmt = AV_PIX_FMT_GBRP;
423  break;
424  case 0x480:
425  avctx->pix_fmt = AV_PIX_FMT_GBRP9;
426  break;
427  case 0x490:
428  avctx->pix_fmt = AV_PIX_FMT_GBRP10;
429  break;
430  case 0x4B0:
431  avctx->pix_fmt = AV_PIX_FMT_GBRP12;
432  break;
433  case 0x4D0:
434  avctx->pix_fmt = AV_PIX_FMT_GBRP14;
435  break;
436  case 0x4F0:
437  avctx->pix_fmt = AV_PIX_FMT_GBRP16;
438  break;
439  case 0x570:
440  avctx->pix_fmt = AV_PIX_FMT_GBRAP;
441  break;
442  case 0x670:
443  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
444  break;
445  case 0x680:
446  avctx->pix_fmt = AV_PIX_FMT_YUV444P9;
447  break;
448  case 0x690:
449  avctx->pix_fmt = AV_PIX_FMT_YUV444P10;
450  break;
451  case 0x6B0:
452  avctx->pix_fmt = AV_PIX_FMT_YUV444P12;
453  break;
454  case 0x6D0:
455  avctx->pix_fmt = AV_PIX_FMT_YUV444P14;
456  break;
457  case 0x6F0:
458  avctx->pix_fmt = AV_PIX_FMT_YUV444P16;
459  break;
460  case 0x671:
461  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
462  break;
463  case 0x681:
464  avctx->pix_fmt = AV_PIX_FMT_YUV422P9;
465  break;
466  case 0x691:
467  avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
468  break;
469  case 0x6B1:
470  avctx->pix_fmt = AV_PIX_FMT_YUV422P12;
471  break;
472  case 0x6D1:
473  avctx->pix_fmt = AV_PIX_FMT_YUV422P14;
474  break;
475  case 0x6F1:
476  avctx->pix_fmt = AV_PIX_FMT_YUV422P16;
477  break;
478  case 0x672:
479  avctx->pix_fmt = AV_PIX_FMT_YUV411P;
480  break;
481  case 0x674:
482  avctx->pix_fmt = AV_PIX_FMT_YUV440P;
483  break;
484  case 0x675:
485  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
486  break;
487  case 0x685:
488  avctx->pix_fmt = AV_PIX_FMT_YUV420P9;
489  break;
490  case 0x695:
491  avctx->pix_fmt = AV_PIX_FMT_YUV420P10;
492  break;
493  case 0x6B5:
494  avctx->pix_fmt = AV_PIX_FMT_YUV420P12;
495  break;
496  case 0x6D5:
497  avctx->pix_fmt = AV_PIX_FMT_YUV420P14;
498  break;
499  case 0x6F5:
500  avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
501  break;
502  case 0x67A:
503  avctx->pix_fmt = AV_PIX_FMT_YUV410P;
504  break;
505  case 0x770:
506  avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
507  break;
508  case 0x780:
509  avctx->pix_fmt = AV_PIX_FMT_YUVA444P9;
510  break;
511  case 0x790:
513  break;
514  case 0x7F0:
516  break;
517  case 0x771:
518  avctx->pix_fmt = AV_PIX_FMT_YUVA422P;
519  break;
520  case 0x781:
521  avctx->pix_fmt = AV_PIX_FMT_YUVA422P9;
522  break;
523  case 0x791:
525  break;
526  case 0x7F1:
528  break;
529  case 0x775:
530  avctx->pix_fmt = AV_PIX_FMT_YUVA420P;
531  break;
532  case 0x785:
533  avctx->pix_fmt = AV_PIX_FMT_YUVA420P9;
534  break;
535  case 0x795:
537  break;
538  case 0x7F5:
540  break;
541  default:
542  ret = AVERROR_INVALIDDATA;
543  goto error;
544  }
545  }
546 
547  ff_huffyuv_common_init(avctx);
548 
549  if ((avctx->pix_fmt == AV_PIX_FMT_YUV422P || avctx->pix_fmt == AV_PIX_FMT_YUV420P) && avctx->width & 1) {
550  av_log(avctx, AV_LOG_ERROR, "width must be even for this colorspace\n");
551  ret = AVERROR_INVALIDDATA;
552  goto error;
553  }
554  if (s->predictor == MEDIAN && avctx->pix_fmt == AV_PIX_FMT_YUV422P &&
555  avctx->width % 4) {
556  av_log(avctx, AV_LOG_ERROR, "width must be a multiple of 4 "
557  "for this combination of colorspace and predictor type.\n");
558  ret = AVERROR_INVALIDDATA;
559  goto error;
560  }
561 
562  if ((ret = ff_huffyuv_alloc_temp(s)) < 0) {
564  goto error;
565  }
566 
567  return 0;
568  error:
569  decode_end(avctx);
570  return ret;
571 }
572 
573 #if HAVE_THREADS
574 static av_cold int decode_init_thread_copy(AVCodecContext *avctx)
575 {
576  HYuvContext *s = avctx->priv_data;
577  int i, ret;
578 
579  s->avctx = avctx;
580 
581  if ((ret = ff_huffyuv_alloc_temp(s)) < 0) {
583  return ret;
584  }
585 
586  for (i = 0; i < 8; i++)
587  s->vlc[i].table = NULL;
588 
589  if (s->version >= 2) {
590  if ((ret = read_huffman_tables(s, avctx->extradata + 4,
591  avctx->extradata_size)) < 0)
592  return ret;
593  } else {
594  if ((ret = read_old_huffman_tables(s)) < 0)
595  return ret;
596  }
597 
598  return 0;
599 }
600 #endif
601 
602 /** Subset of GET_VLC for use in hand-roller VLC code */
603 #define VLC_INTERN(dst, table, gb, name, bits, max_depth) \
604  code = table[index][0]; \
605  n = table[index][1]; \
606  if (max_depth > 1 && n < 0) { \
607  LAST_SKIP_BITS(name, gb, bits); \
608  UPDATE_CACHE(name, gb); \
609  \
610  nb_bits = -n; \
611  index = SHOW_UBITS(name, gb, nb_bits) + code; \
612  code = table[index][0]; \
613  n = table[index][1]; \
614  if (max_depth > 2 && n < 0) { \
615  LAST_SKIP_BITS(name, gb, nb_bits); \
616  UPDATE_CACHE(name, gb); \
617  \
618  nb_bits = -n; \
619  index = SHOW_UBITS(name, gb, nb_bits) + code; \
620  code = table[index][0]; \
621  n = table[index][1]; \
622  } \
623  } \
624  dst = code; \
625  LAST_SKIP_BITS(name, gb, n)
626 
627 
628 #define GET_VLC_DUAL(dst0, dst1, name, gb, dtable, table1, table2, \
629  bits, max_depth, OP) \
630  do { \
631  unsigned int index = SHOW_UBITS(name, gb, bits); \
632  int code, n = dtable[index][1]; \
633  \
634  if (n<=0) { \
635  int nb_bits; \
636  VLC_INTERN(dst0, table1, gb, name, bits, max_depth); \
637  \
638  UPDATE_CACHE(re, gb); \
639  index = SHOW_UBITS(name, gb, bits); \
640  VLC_INTERN(dst1, table2, gb, name, bits, max_depth); \
641  } else { \
642  code = dtable[index][0]; \
643  OP(dst0, dst1, code); \
644  LAST_SKIP_BITS(name, gb, n); \
645  } \
646  } while (0)
647 
648 #define OP8bits(dst0, dst1, code) dst0 = code>>8; dst1 = code
649 
650 #define READ_2PIX(dst0, dst1, plane1) \
651  UPDATE_CACHE(re, &s->gb); \
652  GET_VLC_DUAL(dst0, dst1, re, &s->gb, s->vlc[4+plane1].table, \
653  s->vlc[0].table, s->vlc[plane1].table, VLC_BITS, 3, OP8bits)
654 
656 {
657  int i, icount;
658  OPEN_READER(re, &s->gb);
659  count /= 2;
660 
661  icount = get_bits_left(&s->gb) / (32 * 4);
662  if (count >= icount) {
663  for (i = 0; i < icount; i++) {
664  READ_2PIX(s->temp[0][2 * i], s->temp[1][i], 1);
665  READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
666  }
667  for (; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
668  READ_2PIX(s->temp[0][2 * i ], s->temp[1][i], 1);
669  if (BITS_LEFT(re, &s->gb) <= 0) break;
670  READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
671  }
672  for (; i < count; i++)
673  s->temp[0][2 * i ] = s->temp[1][i] =
674  s->temp[0][2 * i + 1] = s->temp[2][i] = 0;
675  } else {
676  for (i = 0; i < count; i++) {
677  READ_2PIX(s->temp[0][2 * i], s->temp[1][i], 1);
678  READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
679  }
680  }
681  CLOSE_READER(re, &s->gb);
682 }
683 
684 #define READ_2PIX_PLANE(dst0, dst1, plane, OP) \
685  UPDATE_CACHE(re, &s->gb); \
686  GET_VLC_DUAL(dst0, dst1, re, &s->gb, s->vlc[4+plane].table, \
687  s->vlc[plane].table, s->vlc[plane].table, VLC_BITS, 3, OP)
688 
689 #define OP14bits(dst0, dst1, code) dst0 = code>>8; dst1 = sign_extend(code, 8)
690 
691 /* TODO instead of restarting the read when the code isn't in the first level
692  * of the joint table, jump into the 2nd level of the individual table. */
693 #define READ_2PIX_PLANE16(dst0, dst1, plane){\
694  dst0 = get_vlc2(&s->gb, s->vlc[plane].table, VLC_BITS, 3)<<2;\
695  dst0 += get_bits(&s->gb, 2);\
696  dst1 = get_vlc2(&s->gb, s->vlc[plane].table, VLC_BITS, 3)<<2;\
697  dst1 += get_bits(&s->gb, 2);\
698 }
700 {
701  int i, count = width/2;
702 
703  if (s->bps <= 8) {
704  OPEN_READER(re, &s->gb);
705  if (count >= (get_bits_left(&s->gb)) / (32 * 2)) {
706  for (i = 0; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
707  READ_2PIX_PLANE(s->temp[0][2 * i], s->temp[0][2 * i + 1], plane, OP8bits);
708  }
709  } else {
710  for(i=0; i<count; i++){
711  READ_2PIX_PLANE(s->temp[0][2 * i], s->temp[0][2 * i + 1], plane, OP8bits);
712  }
713  }
714  if( width&1 && BITS_LEFT(re, &s->gb)>0 ) {
715  unsigned int index;
716  int nb_bits, code, n;
717  UPDATE_CACHE(re, &s->gb);
718  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
719  VLC_INTERN(s->temp[0][width-1], s->vlc[plane].table,
720  &s->gb, re, VLC_BITS, 3);
721  }
722  CLOSE_READER(re, &s->gb);
723  } else if (s->bps <= 14) {
724  OPEN_READER(re, &s->gb);
725  if (count >= (get_bits_left(&s->gb)) / (32 * 2)) {
726  for (i = 0; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
727  READ_2PIX_PLANE(s->temp16[0][2 * i], s->temp16[0][2 * i + 1], plane, OP14bits);
728  }
729  } else {
730  for(i=0; i<count; i++){
731  READ_2PIX_PLANE(s->temp16[0][2 * i], s->temp16[0][2 * i + 1], plane, OP14bits);
732  }
733  }
734  if( width&1 && BITS_LEFT(re, &s->gb)>0 ) {
735  unsigned int index;
736  int nb_bits, code, n;
737  UPDATE_CACHE(re, &s->gb);
738  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
739  VLC_INTERN(s->temp16[0][width-1], s->vlc[plane].table,
740  &s->gb, re, VLC_BITS, 3);
741  }
742  CLOSE_READER(re, &s->gb);
743  } else {
744  if (count >= (get_bits_left(&s->gb)) / (32 * 2)) {
745  for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
746  READ_2PIX_PLANE16(s->temp16[0][2 * i], s->temp16[0][2 * i + 1], plane);
747  }
748  } else {
749  for(i=0; i<count; i++){
750  READ_2PIX_PLANE16(s->temp16[0][2 * i], s->temp16[0][2 * i + 1], plane);
751  }
752  }
753  if( width&1 && get_bits_left(&s->gb)>0 ) {
754  int dst = get_vlc2(&s->gb, s->vlc[plane].table, VLC_BITS, 3)<<2;
755  s->temp16[0][width-1] = dst + get_bits(&s->gb, 2);
756  }
757  }
758 }
759 
761 {
762  int i;
763  OPEN_READER(re, &s->gb);
764  count /= 2;
765 
766  if (count >= (get_bits_left(&s->gb)) / (32 * 2)) {
767  for (i = 0; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
768  READ_2PIX(s->temp[0][2 * i], s->temp[0][2 * i + 1], 0);
769  }
770  } else {
771  for (i = 0; i < count; i++) {
772  READ_2PIX(s->temp[0][2 * i], s->temp[0][2 * i + 1], 0);
773  }
774  }
775  CLOSE_READER(re, &s->gb);
776 }
777 
779  int decorrelate, int alpha)
780 {
781  int i;
782  OPEN_READER(re, &s->gb);
783 
784  for (i = 0; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
785  unsigned int index;
786  int code, n, nb_bits;
787 
788  UPDATE_CACHE(re, &s->gb);
789  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
790  n = s->vlc[4].table[index][1];
791 
792  if (n>0) {
793  code = s->vlc[4].table[index][0];
794  *(uint32_t *) &s->temp[0][4 * i] = s->pix_bgr_map[code];
795  LAST_SKIP_BITS(re, &s->gb, n);
796  } else {
797  if (decorrelate) {
798  VLC_INTERN(s->temp[0][4 * i + G], s->vlc[1].table,
799  &s->gb, re, VLC_BITS, 3);
800 
801  UPDATE_CACHE(re, &s->gb);
802  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
803  VLC_INTERN(code, s->vlc[0].table, &s->gb, re, VLC_BITS, 3);
804  s->temp[0][4 * i + B] = code + s->temp[0][4 * i + G];
805 
806  UPDATE_CACHE(re, &s->gb);
807  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
808  VLC_INTERN(code, s->vlc[2].table, &s->gb, re, VLC_BITS, 3);
809  s->temp[0][4 * i + R] = code + s->temp[0][4 * i + G];
810  } else {
811  VLC_INTERN(s->temp[0][4 * i + B], s->vlc[0].table,
812  &s->gb, re, VLC_BITS, 3);
813 
814  UPDATE_CACHE(re, &s->gb);
815  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
816  VLC_INTERN(s->temp[0][4 * i + G], s->vlc[1].table,
817  &s->gb, re, VLC_BITS, 3);
818 
819  UPDATE_CACHE(re, &s->gb);
820  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
821  VLC_INTERN(s->temp[0][4 * i + R], s->vlc[2].table,
822  &s->gb, re, VLC_BITS, 3);
823  }
824  }
825  if (alpha) {
826  UPDATE_CACHE(re, &s->gb);
827  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
828  VLC_INTERN(s->temp[0][4 * i + A], s->vlc[2].table,
829  &s->gb, re, VLC_BITS, 3);
830  } else
831  s->temp[0][4 * i + A] = 0;
832  }
833  CLOSE_READER(re, &s->gb);
834 }
835 
837 {
838  if (s->decorrelate) {
839  if (s->bitstream_bpp == 24)
840  decode_bgr_1(s, count, 1, 0);
841  else
842  decode_bgr_1(s, count, 1, 1);
843  } else {
844  if (s->bitstream_bpp == 24)
845  decode_bgr_1(s, count, 0, 0);
846  else
847  decode_bgr_1(s, count, 0, 1);
848  }
849 }
850 
851 static void draw_slice(HYuvContext *s, AVFrame *frame, int y)
852 {
853  int h, cy, i;
855 
856  if (!s->avctx->draw_horiz_band)
857  return;
858 
859  h = y - s->last_slice_end;
860  y -= h;
861 
862  if (s->bitstream_bpp == 12)
863  cy = y >> 1;
864  else
865  cy = y;
866 
867  offset[0] = frame->linesize[0] * y;
868  offset[1] = frame->linesize[1] * cy;
869  offset[2] = frame->linesize[2] * cy;
870  for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
871  offset[i] = 0;
872  emms_c();
873 
874  s->avctx->draw_horiz_band(s->avctx, frame, offset, y, 3, h);
875 
876  s->last_slice_end = y + h;
877 }
878 
879 static int left_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int acc)
880 {
881  if (s->bps <= 8) {
882  return s->llviddsp.add_left_pred(dst, src, w, acc);
883  } else {
884  return s->llviddsp.add_left_pred_int16(( uint16_t *)dst, (const uint16_t *)src, s->n-1, w, acc);
885  }
886 }
887 
888 static void add_bytes(HYuvContext *s, uint8_t *dst, uint8_t *src, int w)
889 {
890  if (s->bps <= 8) {
891  s->llviddsp.add_bytes(dst, src, w);
892  } else {
893  s->hdsp.add_int16((uint16_t*)dst, (const uint16_t*)src, s->n - 1, w);
894  }
895 }
896 
897 static void add_median_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, const uint8_t *diff, int w, int *left, int *left_top)
898 {
899  if (s->bps <= 8) {
900  s->llviddsp.add_median_pred(dst, src, diff, w, left, left_top);
901  } else {
902  s->hdsp.add_hfyu_median_pred_int16((uint16_t *)dst, (const uint16_t *)src, (const uint16_t *)diff, s->n-1, w, left, left_top);
903  }
904 }
905 
906 static int decode_slice(AVCodecContext *avctx, AVFrame *p, int height,
907  int buf_size, int y_offset, int table_size)
908 {
909  HYuvContext *s = avctx->priv_data;
910  int fake_ystride, fake_ustride, fake_vstride;
911  const int width = s->width;
912  const int width2 = s->width >> 1;
913  int ret;
914 
915  if ((ret = init_get_bits8(&s->gb, s->bitstream_buffer + table_size, buf_size - table_size)) < 0)
916  return ret;
917 
918  fake_ystride = s->interlaced ? p->linesize[0] * 2 : p->linesize[0];
919  fake_ustride = s->interlaced ? p->linesize[1] * 2 : p->linesize[1];
920  fake_vstride = s->interlaced ? p->linesize[2] * 2 : p->linesize[2];
921 
922  if (s->version > 2) {
923  int plane;
924  for(plane = 0; plane < 1 + 2*s->chroma + s->alpha; plane++) {
925  int left, lefttop, y;
926  int w = width;
927  int h = height;
928  int fake_stride = fake_ystride;
929 
930  if (s->chroma && (plane == 1 || plane == 2)) {
931  w >>= s->chroma_h_shift;
932  h >>= s->chroma_v_shift;
933  fake_stride = plane == 1 ? fake_ustride : fake_vstride;
934  }
935 
936  switch (s->predictor) {
937  case LEFT:
938  case PLANE:
939  decode_plane_bitstream(s, w, plane);
940  left = left_prediction(s, p->data[plane], s->temp[0], w, 0);
941 
942  for (y = 1; y < h; y++) {
943  uint8_t *dst = p->data[plane] + p->linesize[plane]*y;
944 
945  decode_plane_bitstream(s, w, plane);
946  left = left_prediction(s, dst, s->temp[0], w, left);
947  if (s->predictor == PLANE) {
948  if (y > s->interlaced) {
949  add_bytes(s, dst, dst - fake_stride, w);
950  }
951  }
952  }
953 
954  break;
955  case MEDIAN:
956  decode_plane_bitstream(s, w, plane);
957  left= left_prediction(s, p->data[plane], s->temp[0], w, 0);
958 
959  y = 1;
960 
961  /* second line is left predicted for interlaced case */
962  if (s->interlaced) {
963  decode_plane_bitstream(s, w, plane);
964  left = left_prediction(s, p->data[plane] + p->linesize[plane], s->temp[0], w, left);
965  y++;
966  }
967 
968  lefttop = p->data[plane][0];
969  decode_plane_bitstream(s, w, plane);
970  add_median_prediction(s, p->data[plane] + fake_stride, p->data[plane], s->temp[0], w, &left, &lefttop);
971  y++;
972 
973  for (; y<h; y++) {
974  uint8_t *dst;
975 
976  decode_plane_bitstream(s, w, plane);
977 
978  dst = p->data[plane] + p->linesize[plane] * y;
979 
980  add_median_prediction(s, dst, dst - fake_stride, s->temp[0], w, &left, &lefttop);
981  }
982 
983  break;
984  }
985  }
986  draw_slice(s, p, height);
987  } else if (s->bitstream_bpp < 24) {
988  int y, cy;
989  int lefty, leftu, leftv;
990  int lefttopy, lefttopu, lefttopv;
991 
992  if (s->yuy2) {
993  p->data[0][3] = get_bits(&s->gb, 8);
994  p->data[0][2] = get_bits(&s->gb, 8);
995  p->data[0][1] = get_bits(&s->gb, 8);
996  p->data[0][0] = get_bits(&s->gb, 8);
997 
998  av_log(avctx, AV_LOG_ERROR,
999  "YUY2 output is not implemented yet\n");
1000  return AVERROR_PATCHWELCOME;
1001  } else {
1002  leftv =
1003  p->data[2][0 + y_offset * p->linesize[2]] = get_bits(&s->gb, 8);
1004  lefty =
1005  p->data[0][1 + y_offset * p->linesize[0]] = get_bits(&s->gb, 8);
1006  leftu =
1007  p->data[1][0 + y_offset * p->linesize[1]] = get_bits(&s->gb, 8);
1008  p->data[0][0 + y_offset * p->linesize[0]] = get_bits(&s->gb, 8);
1009 
1010  switch (s->predictor) {
1011  case LEFT:
1012  case PLANE:
1013  decode_422_bitstream(s, width - 2);
1014  lefty = s->llviddsp.add_left_pred(p->data[0] + p->linesize[0] * y_offset + 2, s->temp[0],
1015  width - 2, lefty);
1016  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1017  leftu = s->llviddsp.add_left_pred(p->data[1] + p->linesize[1] * y_offset + 1, s->temp[1], width2 - 1, leftu);
1018  leftv = s->llviddsp.add_left_pred(p->data[2] + p->linesize[2] * y_offset + 1, s->temp[2], width2 - 1, leftv);
1019  }
1020 
1021  for (cy = y = 1; y < height; y++, cy++) {
1022  uint8_t *ydst, *udst, *vdst;
1023 
1024  if (s->bitstream_bpp == 12) {
1025  decode_gray_bitstream(s, width);
1026 
1027  ydst = p->data[0] + p->linesize[0] * (y + y_offset);
1028 
1029  lefty = s->llviddsp.add_left_pred(ydst, s->temp[0],
1030  width, lefty);
1031  if (s->predictor == PLANE) {
1032  if (y > s->interlaced)
1033  s->llviddsp.add_bytes(ydst, ydst - fake_ystride, width);
1034  }
1035  y++;
1036  if (y >= height)
1037  break;
1038  }
1039 
1040  draw_slice(s, p, y);
1041 
1042  ydst = p->data[0] + p->linesize[0] * (y + y_offset);
1043  udst = p->data[1] + p->linesize[1] * (cy + y_offset);
1044  vdst = p->data[2] + p->linesize[2] * (cy + y_offset);
1045 
1046  decode_422_bitstream(s, width);
1047  lefty = s->llviddsp.add_left_pred(ydst, s->temp[0],
1048  width, lefty);
1049  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1050  leftu = s->llviddsp.add_left_pred(udst, s->temp[1], width2, leftu);
1051  leftv = s->llviddsp.add_left_pred(vdst, s->temp[2], width2, leftv);
1052  }
1053  if (s->predictor == PLANE) {
1054  if (cy > s->interlaced) {
1055  s->llviddsp.add_bytes(ydst, ydst - fake_ystride, width);
1056  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1057  s->llviddsp.add_bytes(udst, udst - fake_ustride, width2);
1058  s->llviddsp.add_bytes(vdst, vdst - fake_vstride, width2);
1059  }
1060  }
1061  }
1062  }
1063  draw_slice(s, p, height);
1064 
1065  break;
1066  case MEDIAN:
1067  /* first line except first 2 pixels is left predicted */
1068  decode_422_bitstream(s, width - 2);
1069  lefty = s->llviddsp.add_left_pred(p->data[0] + 2, s->temp[0],
1070  width - 2, lefty);
1071  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1072  leftu = s->llviddsp.add_left_pred(p->data[1] + 1, s->temp[1], width2 - 1, leftu);
1073  leftv = s->llviddsp.add_left_pred(p->data[2] + 1, s->temp[2], width2 - 1, leftv);
1074  }
1075 
1076  cy = y = 1;
1077 
1078  /* second line is left predicted for interlaced case */
1079  if (s->interlaced) {
1080  decode_422_bitstream(s, width);
1081  lefty = s->llviddsp.add_left_pred(p->data[0] + p->linesize[0],
1082  s->temp[0], width, lefty);
1083  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1084  leftu = s->llviddsp.add_left_pred(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1085  leftv = s->llviddsp.add_left_pred(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1086  }
1087  y++;
1088  cy++;
1089  }
1090 
1091  /* next 4 pixels are left predicted too */
1092  decode_422_bitstream(s, 4);
1093  lefty = s->llviddsp.add_left_pred(p->data[0] + fake_ystride,
1094  s->temp[0], 4, lefty);
1095  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1096  leftu = s->llviddsp.add_left_pred(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1097  leftv = s->llviddsp.add_left_pred(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1098  }
1099 
1100  /* next line except the first 4 pixels is median predicted */
1101  lefttopy = p->data[0][3];
1102  decode_422_bitstream(s, width - 4);
1103  s->llviddsp.add_median_pred(p->data[0] + fake_ystride + 4,
1104  p->data[0] + 4, s->temp[0],
1105  width - 4, &lefty, &lefttopy);
1106  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1107  lefttopu = p->data[1][1];
1108  lefttopv = p->data[2][1];
1109  s->llviddsp.add_median_pred(p->data[1] + fake_ustride + 2, p->data[1] + 2, s->temp[1], width2 - 2, &leftu, &lefttopu);
1110  s->llviddsp.add_median_pred(p->data[2] + fake_vstride + 2, p->data[2] + 2, s->temp[2], width2 - 2, &leftv, &lefttopv);
1111  }
1112  y++;
1113  cy++;
1114 
1115  for (; y < height; y++, cy++) {
1116  uint8_t *ydst, *udst, *vdst;
1117 
1118  if (s->bitstream_bpp == 12) {
1119  while (2 * cy > y) {
1120  decode_gray_bitstream(s, width);
1121  ydst = p->data[0] + p->linesize[0] * y;
1122  s->llviddsp.add_median_pred(ydst, ydst - fake_ystride,
1123  s->temp[0], width,
1124  &lefty, &lefttopy);
1125  y++;
1126  }
1127  if (y >= height)
1128  break;
1129  }
1130  draw_slice(s, p, y);
1131 
1132  decode_422_bitstream(s, width);
1133 
1134  ydst = p->data[0] + p->linesize[0] * y;
1135  udst = p->data[1] + p->linesize[1] * cy;
1136  vdst = p->data[2] + p->linesize[2] * cy;
1137 
1138  s->llviddsp.add_median_pred(ydst, ydst - fake_ystride,
1139  s->temp[0], width,
1140  &lefty, &lefttopy);
1141  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1142  s->llviddsp.add_median_pred(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1143  s->llviddsp.add_median_pred(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1144  }
1145  }
1146 
1147  draw_slice(s, p, height);
1148  break;
1149  }
1150  }
1151  } else {
1152  int y;
1153  uint8_t left[4];
1154  const int last_line = (y_offset + height - 1) * p->linesize[0];
1155 
1156  if (s->bitstream_bpp == 32) {
1157  left[A] = p->data[0][last_line + A] = get_bits(&s->gb, 8);
1158  left[R] = p->data[0][last_line + R] = get_bits(&s->gb, 8);
1159  left[G] = p->data[0][last_line + G] = get_bits(&s->gb, 8);
1160  left[B] = p->data[0][last_line + B] = get_bits(&s->gb, 8);
1161  } else {
1162  left[R] = p->data[0][last_line + R] = get_bits(&s->gb, 8);
1163  left[G] = p->data[0][last_line + G] = get_bits(&s->gb, 8);
1164  left[B] = p->data[0][last_line + B] = get_bits(&s->gb, 8);
1165  left[A] = p->data[0][last_line + A] = 255;
1166  skip_bits(&s->gb, 8);
1167  }
1168 
1169  if (s->bgr32) {
1170  switch (s->predictor) {
1171  case LEFT:
1172  case PLANE:
1173  decode_bgr_bitstream(s, width - 1);
1174  s->hdsp.add_hfyu_left_pred_bgr32(p->data[0] + last_line + 4,
1175  s->temp[0], width - 1, left);
1176 
1177  for (y = height - 2; y >= 0; y--) { // Yes it is stored upside down.
1178  decode_bgr_bitstream(s, width);
1179 
1180  s->hdsp.add_hfyu_left_pred_bgr32(p->data[0] + p->linesize[0] * (y + y_offset),
1181  s->temp[0], width, left);
1182  if (s->predictor == PLANE) {
1183  if (s->bitstream_bpp != 32)
1184  left[A] = 0;
1185  if (y < height - 1 - s->interlaced) {
1186  s->llviddsp.add_bytes(p->data[0] + p->linesize[0] * (y + y_offset),
1187  p->data[0] + p->linesize[0] * (y + y_offset) +
1188  fake_ystride, 4 * width);
1189  }
1190  }
1191  }
1192  // just 1 large slice as this is not possible in reverse order
1193  draw_slice(s, p, height);
1194  break;
1195  default:
1196  av_log(avctx, AV_LOG_ERROR,
1197  "prediction type not supported!\n");
1198  }
1199  } else {
1200  av_log(avctx, AV_LOG_ERROR,
1201  "BGR24 output is not implemented yet\n");
1202  return AVERROR_PATCHWELCOME;
1203  }
1204  }
1205 
1206  return 0;
1207 }
1208 
1209 static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
1210  AVPacket *avpkt)
1211 {
1212  const uint8_t *buf = avpkt->data;
1213  int buf_size = avpkt->size;
1214  HYuvContext *s = avctx->priv_data;
1215  const int width = s->width;
1216  const int height = s->height;
1217  ThreadFrame frame = { .f = data };
1218  AVFrame *const p = data;
1219  int slice, table_size = 0, ret, nb_slices;
1220  unsigned slices_info_offset;
1221  int slice_height;
1222 
1223  if (buf_size < (width * height + 7)/8)
1224  return AVERROR_INVALIDDATA;
1225 
1228  buf_size);
1229  if (!s->bitstream_buffer)
1230  return AVERROR(ENOMEM);
1231 
1232  s->bdsp.bswap_buf((uint32_t *) s->bitstream_buffer,
1233  (const uint32_t *) buf, buf_size / 4);
1234 
1235  if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
1236  return ret;
1237 
1238  if (s->context) {
1239  table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
1240  if (table_size < 0)
1241  return table_size;
1242  }
1243 
1244  if ((unsigned) (buf_size - table_size) >= INT_MAX / 8)
1245  return AVERROR_INVALIDDATA;
1246 
1247  s->last_slice_end = 0;
1248 
1249  if (avctx->codec_id == AV_CODEC_ID_HYMT &&
1250  (buf_size > 32 && AV_RL32(avpkt->data + buf_size - 16) == 0)) {
1251  slices_info_offset = AV_RL32(avpkt->data + buf_size - 4);
1252  slice_height = AV_RL32(avpkt->data + buf_size - 8);
1253  nb_slices = AV_RL32(avpkt->data + buf_size - 12);
1254  if (nb_slices * 8LL + slices_info_offset > buf_size - 16 ||
1255  s->chroma_v_shift ||
1256  slice_height <= 0 || nb_slices * (uint64_t)slice_height > height)
1257  return AVERROR_INVALIDDATA;
1258  } else {
1259  slice_height = height;
1260  nb_slices = 1;
1261  }
1262 
1263  for (slice = 0; slice < nb_slices; slice++) {
1264  int y_offset, slice_offset, slice_size;
1265 
1266  if (nb_slices > 1) {
1267  slice_offset = AV_RL32(avpkt->data + slices_info_offset + slice * 8);
1268  slice_size = AV_RL32(avpkt->data + slices_info_offset + slice * 8 + 4);
1269 
1270  if (slice_offset < 0 || slice_size <= 0 || (slice_offset&3) ||
1271  slice_offset + (int64_t)slice_size > buf_size)
1272  return AVERROR_INVALIDDATA;
1273 
1274  y_offset = height - (slice + 1) * slice_height;
1275  s->bdsp.bswap_buf((uint32_t *)s->bitstream_buffer,
1276  (const uint32_t *)(buf + slice_offset), slice_size / 4);
1277  } else {
1278  y_offset = 0;
1279  slice_offset = 0;
1280  slice_size = buf_size;
1281  }
1282 
1283  ret = decode_slice(avctx, p, slice_height, slice_size, y_offset, table_size);
1284  emms_c();
1285  if (ret < 0)
1286  return ret;
1287  }
1288 
1289  *got_frame = 1;
1290 
1291  return (get_bits_count(&s->gb) + 31) / 32 * 4 + table_size;
1292 }
1293 
1295  .name = "huffyuv",
1296  .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1297  .type = AVMEDIA_TYPE_VIDEO,
1298  .id = AV_CODEC_ID_HUFFYUV,
1299  .priv_data_size = sizeof(HYuvContext),
1300  .init = decode_init,
1301  .close = decode_end,
1302  .decode = decode_frame,
1305  .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1306 };
1307 
1308 #if CONFIG_FFVHUFF_DECODER
1310  .name = "ffvhuff",
1311  .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1312  .type = AVMEDIA_TYPE_VIDEO,
1313  .id = AV_CODEC_ID_FFVHUFF,
1314  .priv_data_size = sizeof(HYuvContext),
1315  .init = decode_init,
1316  .close = decode_end,
1317  .decode = decode_frame,
1320  .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1321 };
1322 #endif /* CONFIG_FFVHUFF_DECODER */
1323 
1324 #if CONFIG_HYMT_DECODER
1326  .name = "hymt",
1327  .long_name = NULL_IF_CONFIG_SMALL("HuffYUV MT"),
1328  .type = AVMEDIA_TYPE_VIDEO,
1329  .id = AV_CODEC_ID_HYMT,
1330  .priv_data_size = sizeof(HYuvContext),
1331  .init = decode_init,
1332  .close = decode_end,
1333  .decode = decode_frame,
1336  .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1337 };
1338 #endif /* CONFIG_HYMT_DECODER */
int plane
Definition: avisynth_c.h:384
static void add_median_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, const uint8_t *diff, int w, int *left, int *left_top)
Definition: huffyuvdec.c:897
#define OP14bits(dst0, dst1, code)
Definition: huffyuvdec.c:689
#define NULL
Definition: coverity.c:32
const char const char void * val
Definition: avisynth_c.h:863
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:430
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
int(* add_left_pred)(uint8_t *dst, const uint8_t *src, ptrdiff_t w, int left)
#define AV_NUM_DATA_POINTERS
Definition: frame.h:296
static void decode_plane_bitstream(HYuvContext *s, int width, int plane)
Definition: huffyuvdec.c:699
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:422
#define READ_2PIX_PLANE(dst0, dst1, plane, OP)
Definition: huffyuvdec.c:684
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:424
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:397
float re
Definition: fft.c:82
int yuy2
Definition: huffyuv.h:65
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:425
int bitstream_bpp
Definition: huffyuv.h:63
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
misc image utilities
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
const char * g
Definition: vf_curves.c:115
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have add an init_thread_copy() which re-allocates them for other threads.Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities.There will be very little speed gain at this point but it should work.If there are inter-frame dependencies
int acc
Definition: yuv2rgb.c:554
static void decode_422_bitstream(HYuvContext *s, int count)
Definition: huffyuvdec.c:655
int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
Definition: bitstream.c:273
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
int size
Definition: avcodec.h:1481
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:403
static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha)
Definition: huffyuvdec.c:778
#define VLC_BITS
Definition: asvdec.c:37
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1778
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:70
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:391
#define MAX_VLC_N
Definition: huffyuv.h:47
int context
Definition: huffyuv.h:77
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
#define READ_2PIX_PLANE16(dst0, dst1, plane)
Definition: huffyuvdec.c:693
unsigned int bitstream_buffer_size
Definition: huffyuv.h:88
#define src
Definition: vp8dsp.c:254
static int generate_joint_tables(HYuvContext *s)
Definition: huffyuvdec.c:118
AVCodec.
Definition: avcodec.h:3492
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
int height
Definition: huffyuv.h:75
void(* draw_horiz_band)(struct AVCodecContext *s, const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], int y, int type, int height)
If non NULL, &#39;draw_horiz_band&#39; is called by the libavcodec decoder to draw a horizontal band...
Definition: avcodec.h:1803
#define READ_2PIX(dst0, dst1, plane1)
Definition: huffyuvdec.c:650
static int read_old_huffman_tables(HYuvContext *s)
Definition: huffyuvdec.c:237
static void draw_slice(HYuvContext *s, AVFrame *frame, int y)
Definition: huffyuvdec.c:851
void(* add_int16)(uint16_t *dst, const uint16_t *src, unsigned mask, int w)
Definition: huffyuvdsp.h:39
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
uint32_t pix_bgr_map[1<< VLC_BITS]
Definition: huffyuv.h:85
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
uint8_t
#define av_cold
Definition: attributes.h:82
void(* bswap_buf)(uint32_t *dst, const uint32_t *src, int w)
Definition: bswapdsp.h:25
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
static void decode_bgr_bitstream(HYuvContext *s, int count)
Definition: huffyuvdec.c:836
int bps
Definition: huffyuv.h:67
Multithreading support functions.
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1669
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:252
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:421
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:402
#define height
uint8_t * data
Definition: avcodec.h:1480
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
void(* add_median_pred)(uint8_t *dst, const uint8_t *top, const uint8_t *diff, ptrdiff_t w, int *left, int *left_top)
bitstream reader API header.
int vlc_n
Definition: huffyuv.h:69
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:400
int chroma_h_shift
Definition: huffyuv.h:73
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:2792
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:392
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:429
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:886
#define A(x)
Definition: vp56_arith.h:28
uint16_t * temp16[3]
identical to temp but 16bit type
Definition: huffyuv.h:81
#define av_log(a,...)
uint8_t len[4][MAX_VLC_N]
Definition: huffyuv.h:83
av_cold int ff_huffyuv_alloc_temp(HYuvContext *s)
Definition: huffyuv.c:58
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:176
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:178
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define R
Definition: huffyuvdsp.h:34
int chroma_v_shift
Definition: huffyuv.h:74
Definition: huffyuv.h:51
av_cold void ff_huffyuv_common_end(HYuvContext *s)
Definition: huffyuv.c:86
#define init_vlc(vlc, nb_bits, nb_codes,bits, bits_wrap, bits_size,codes, codes_wrap, codes_size,flags)
Definition: vlc.h:38
static const unsigned char classic_add_luma[256]
Definition: huffyuvdec.c:61
AVCodec ff_hymt_decoder
uint8_t * bitstream_buffer
Definition: huffyuv.h:87
int flags
Definition: huffyuv.h:76
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2550
#define B
Definition: huffyuvdsp.h:32
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
const char * r
Definition: vf_curves.c:114
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:431
int bgr32
Definition: huffyuv.h:66
GLsizei GLsizei * length
Definition: opengl_enc.c:114
const char * name
Name of the codec implementation.
Definition: avcodec.h:3499
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:390
uint8_t bits
Definition: vp3data.h:202
int chroma
Definition: huffyuv.h:71
VLC vlc[8]
Definition: huffyuv.h:86
GLsizei count
Definition: opengl_enc.c:108
#define CLOSE_READER(name, gb)
Definition: get_bits.h:149
huffyuv codec for libavcodec.
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:1040
#define classic_shift_chroma_table_size
Definition: huffyuvdec.c:52
Definition: vlc.h:26
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:225
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:385
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:282
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: avcodec.h:978
#define b
Definition: input.c:41
int decorrelate
Definition: huffyuv.h:62
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:406
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:371
#define FFMIN(a, b)
Definition: common.h:96
int width
Definition: huffyuv.h:75
int last_slice_end
Definition: huffyuv.h:79
uint8_t interlaced
Definition: mxfenc.c:2225
#define VLC_INTERN(dst, table, gb, name, bits, max_depth)
Subset of GET_VLC for use in hand-roller VLC code.
Definition: huffyuvdec.c:603
#define width
int width
picture width / height.
Definition: avcodec.h:1741
static const unsigned char classic_shift_chroma[classic_shift_chroma_table_size+AV_INPUT_BUFFER_PADDING_SIZE]
Definition: huffyuvdec.c:53
uint8_t w
Definition: llviddspenc.c:38
int ff_huffyuv_generate_bits_table(uint32_t *dst, const uint8_t *len_table, int n)
Definition: huffyuv.c:39
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:199
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:797
int n
Definition: avisynth_c.h:760
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:426
uint8_t * temp[3]
Definition: huffyuv.h:80
AVCodec ff_ffvhuff_decoder
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:386
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:405
static void error(const char *err)
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:211
int alpha
Definition: huffyuv.h:70
if(ret)
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:398
static void add_bytes(HYuvContext *s, uint8_t *dst, uint8_t *src, int w)
Definition: huffyuvdec.c:888
int(* add_left_pred_int16)(uint16_t *dst, const uint16_t *src, unsigned mask, ptrdiff_t w, unsigned left)
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:395
AVCodec ff_huffyuv_decoder
Definition: huffyuvdec.c:1294
Libavcodec external API header.
enum AVCodecID codec_id
Definition: avcodec.h:1578
int yuv
Definition: huffyuv.h:72
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
LLVidDSPContext llviddsp
Definition: huffyuv.h:92
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
HuffYUVDSPContext hdsp
Definition: huffyuv.h:90
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
static const int16_t alpha[]
Definition: ilbcdata.h:55
main external API structure.
Definition: avcodec.h:1568
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:360
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:67
#define OPEN_READER(name, gb)
Definition: get_bits.h:138
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:206
void * buf
Definition: avisynth_c.h:766
int interlaced
Definition: huffyuv.h:61
int extradata_size
Definition: avcodec.h:1670
void ff_llviddsp_init(LLVidDSPContext *c)
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:387
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
int index
Definition: gxfenc.c:89
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
av_cold void ff_huffyuvdsp_init(HuffYUVDSPContext *c, enum AVPixelFormat pix_fmt)
Definition: huffyuvdsp.c:83
static void decode_gray_bitstream(HYuvContext *s, int count)
Definition: huffyuvdec.c:760
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:384
#define classic_shift_luma_table_size
Definition: huffyuvdec.c:44
const VDPAUPixFmtMap * map
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:130
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:396
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:404
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
int version
Definition: huffyuv.h:64
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:388
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:394
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
Predictor predictor
Definition: huffyuv.h:58
static av_cold int decode_init(AVCodecContext *avctx)
Definition: huffyuvdec.c:291
void(* add_hfyu_left_pred_bgr32)(uint8_t *dst, const uint8_t *src, intptr_t w, uint8_t *left)
Definition: huffyuvdsp.h:45
AVCodecContext * avctx
Definition: huffyuv.h:57
GetBitContext gb
Definition: huffyuv.h:59
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
Y , 8bpp.
Definition: pixfmt.h:74
Definition: huffyuv.h:52
#define G
Definition: huffyuvdsp.h:33
#define BITS_LEFT(name, gb)
Definition: get_bits.h:191
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
void(* add_hfyu_median_pred_int16)(uint16_t *dst, const uint16_t *top, const uint16_t *diff, unsigned mask, int w, int *left, int *left_top)
Definition: huffyuvdsp.h:42
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:423
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: huffyuvdec.c:1209
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
av_cold void ff_huffyuv_common_init(AVCodecContext *avctx)
Definition: huffyuv.c:71
static av_cold int decode_end(AVCodecContext *avctx)
Definition: huffyuvdec.c:277
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:793
void * priv_data
Definition: avcodec.h:1595
static av_always_inline int diff(const uint32_t a, const uint32_t b)
int len
static const unsigned char classic_add_chroma[256]
Definition: huffyuvdec.c:80
VLC_TYPE(* table)[2]
code, bits
Definition: vlc.h:28
static int left_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int acc)
Definition: huffyuvdec.c:879
static const unsigned char classic_shift_luma[classic_shift_luma_table_size+AV_INPUT_BUFFER_PADDING_SIZE]
Definition: huffyuvdec.c:45
FILE * out
Definition: movenc.c:54
#define av_freep(p)
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
static int decode_slice(AVCodecContext *avctx, AVFrame *p, int height, int buf_size, int y_offset, int table_size)
Definition: huffyuvdec.c:906
#define av_always_inline
Definition: attributes.h:39
uint32_t bits[4][MAX_VLC_N]
Definition: huffyuv.h:84
#define LEFT
Definition: cdgraphics.c:166
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
void(* add_bytes)(uint8_t *dst, uint8_t *src, ptrdiff_t w)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
This structure stores compressed data.
Definition: avcodec.h:1457
static void decorrelate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median)
Definition: snowenc.c:1329
BswapDSPContext bdsp
Definition: huffyuv.h:89
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:359
static int read_len_table(uint8_t *dst, GetBitContext *gb, int n)
Definition: huffyuvdec.c:99
#define OP8bits(dst0, dst1, code)
Definition: huffyuvdec.c:648
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:984
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:399
static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length)
Definition: huffyuvdec.c:208
for(j=16;j >0;--j)
#define AV_PIX_FMT_0RGB32
Definition: pixfmt.h:364