FFmpeg
iff.c
Go to the documentation of this file.
1 /*
2  * IFF ACBM/ANIM/DEEP/ILBM/PBM/RGB8/RGBN bitmap decoder
3  * Copyright (c) 2010 Peter Ross <pross@xvid.org>
4  * Copyright (c) 2010 Sebastian Vater <cdgs.basty@googlemail.com>
5  * Copyright (c) 2016 Paul B Mahol
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 /**
25  * @file
26  * IFF ACBM/ANIM/DEEP/ILBM/PBM/RGB8/RGBN bitmap decoder
27  */
28 
29 #include <stdint.h>
30 
31 #include "libavutil/imgutils.h"
32 
33 #include "bytestream.h"
34 #include "avcodec.h"
35 #include "internal.h"
36 #include "mathops.h"
37 
38 // TODO: masking bits
39 typedef enum {
44 } mask_type;
45 
46 typedef struct IffContext {
48  int planesize;
50  uint8_t * ham_buf; ///< temporary buffer for planar to chunky conversation
51  uint32_t *ham_palbuf; ///< HAM decode table
52  uint32_t *mask_buf; ///< temporary buffer for palette indices
53  uint32_t *mask_palbuf; ///< masking palette table
54  unsigned compression; ///< delta compression method used
55  unsigned is_short; ///< short compression method used
56  unsigned is_interlaced;///< video is interlaced
57  unsigned is_brush; ///< video is in ANBR format
58  unsigned bpp; ///< bits per plane to decode (differs from bits_per_coded_sample if HAM)
59  unsigned ham; ///< 0 if non-HAM or number of hold bits (6 for bpp > 6, 4 otherwise)
60  unsigned flags; ///< 1 for EHB, 0 is no extra half darkening
61  unsigned transparency; ///< TODO: transparency color index in palette
62  unsigned masking; ///< TODO: masking method used
63  int init; // 1 if buffer and palette data already initialized, 0 otherwise
64  int16_t tvdc[16]; ///< TVDC lookup table
67  unsigned video_size;
68  uint32_t *pal;
69 } IffContext;
70 
71 #define LUT8_PART(plane, v) \
72  AV_LE2NE64C(UINT64_C(0x0000000)<<32 | v) << plane, \
73  AV_LE2NE64C(UINT64_C(0x1000000)<<32 | v) << plane, \
74  AV_LE2NE64C(UINT64_C(0x0010000)<<32 | v) << plane, \
75  AV_LE2NE64C(UINT64_C(0x1010000)<<32 | v) << plane, \
76  AV_LE2NE64C(UINT64_C(0x0000100)<<32 | v) << plane, \
77  AV_LE2NE64C(UINT64_C(0x1000100)<<32 | v) << plane, \
78  AV_LE2NE64C(UINT64_C(0x0010100)<<32 | v) << plane, \
79  AV_LE2NE64C(UINT64_C(0x1010100)<<32 | v) << plane, \
80  AV_LE2NE64C(UINT64_C(0x0000001)<<32 | v) << plane, \
81  AV_LE2NE64C(UINT64_C(0x1000001)<<32 | v) << plane, \
82  AV_LE2NE64C(UINT64_C(0x0010001)<<32 | v) << plane, \
83  AV_LE2NE64C(UINT64_C(0x1010001)<<32 | v) << plane, \
84  AV_LE2NE64C(UINT64_C(0x0000101)<<32 | v) << plane, \
85  AV_LE2NE64C(UINT64_C(0x1000101)<<32 | v) << plane, \
86  AV_LE2NE64C(UINT64_C(0x0010101)<<32 | v) << plane, \
87  AV_LE2NE64C(UINT64_C(0x1010101)<<32 | v) << plane
88 
89 #define LUT8(plane) { \
90  LUT8_PART(plane, 0x0000000), \
91  LUT8_PART(plane, 0x1000000), \
92  LUT8_PART(plane, 0x0010000), \
93  LUT8_PART(plane, 0x1010000), \
94  LUT8_PART(plane, 0x0000100), \
95  LUT8_PART(plane, 0x1000100), \
96  LUT8_PART(plane, 0x0010100), \
97  LUT8_PART(plane, 0x1010100), \
98  LUT8_PART(plane, 0x0000001), \
99  LUT8_PART(plane, 0x1000001), \
100  LUT8_PART(plane, 0x0010001), \
101  LUT8_PART(plane, 0x1010001), \
102  LUT8_PART(plane, 0x0000101), \
103  LUT8_PART(plane, 0x1000101), \
104  LUT8_PART(plane, 0x0010101), \
105  LUT8_PART(plane, 0x1010101), \
106 }
107 
108 // 8 planes * 8-bit mask
109 static const uint64_t plane8_lut[8][256] = {
110  LUT8(0), LUT8(1), LUT8(2), LUT8(3),
111  LUT8(4), LUT8(5), LUT8(6), LUT8(7),
112 };
113 
114 #define LUT32(plane) { \
115  0, 0, 0, 0, \
116  0, 0, 0, 1U << plane, \
117  0, 0, 1U << plane, 0, \
118  0, 0, 1U << plane, 1U << plane, \
119  0, 1U << plane, 0, 0, \
120  0, 1U << plane, 0, 1U << plane, \
121  0, 1U << plane, 1U << plane, 0, \
122  0, 1U << plane, 1U << plane, 1U << plane, \
123  1U << plane, 0, 0, 0, \
124  1U << plane, 0, 0, 1U << plane, \
125  1U << plane, 0, 1U << plane, 0, \
126  1U << plane, 0, 1U << plane, 1U << plane, \
127  1U << plane, 1U << plane, 0, 0, \
128  1U << plane, 1U << plane, 0, 1U << plane, \
129  1U << plane, 1U << plane, 1U << plane, 0, \
130  1U << plane, 1U << plane, 1U << plane, 1U << plane, \
131 }
132 
133 // 32 planes * 4-bit mask * 4 lookup tables each
134 static const uint32_t plane32_lut[32][16*4] = {
135  LUT32( 0), LUT32( 1), LUT32( 2), LUT32( 3),
136  LUT32( 4), LUT32( 5), LUT32( 6), LUT32( 7),
137  LUT32( 8), LUT32( 9), LUT32(10), LUT32(11),
138  LUT32(12), LUT32(13), LUT32(14), LUT32(15),
139  LUT32(16), LUT32(17), LUT32(18), LUT32(19),
140  LUT32(20), LUT32(21), LUT32(22), LUT32(23),
141  LUT32(24), LUT32(25), LUT32(26), LUT32(27),
142  LUT32(28), LUT32(29), LUT32(30), LUT32(31),
143 };
144 
145 // Gray to RGB, required for palette table of grayscale images with bpp < 8
146 static av_always_inline uint32_t gray2rgb(const uint32_t x) {
147  return x << 16 | x << 8 | x;
148 }
149 
150 /**
151  * Convert CMAP buffer (stored in extradata) to lavc palette format
152  */
153 static int cmap_read_palette(AVCodecContext *avctx, uint32_t *pal)
154 {
155  IffContext *s = avctx->priv_data;
156  int count, i;
157  const uint8_t *const palette = avctx->extradata + AV_RB16(avctx->extradata);
158  int palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
159 
160  if (avctx->bits_per_coded_sample > 8) {
161  av_log(avctx, AV_LOG_ERROR, "bits_per_coded_sample > 8 not supported\n");
162  return AVERROR_INVALIDDATA;
163  }
164 
165  count = 1 << avctx->bits_per_coded_sample;
166  // If extradata is smaller than actually needed, fill the remaining with black.
167  count = FFMIN(palette_size / 3, count);
168  if (count) {
169  for (i = 0; i < count; i++)
170  pal[i] = 0xFF000000 | AV_RB24(palette + i*3);
171  if (s->flags && count >= 32) { // EHB
172  for (i = 0; i < 32; i++)
173  pal[i + 32] = 0xFF000000 | (AV_RB24(palette + i*3) & 0xFEFEFE) >> 1;
174  count = FFMAX(count, 64);
175  }
176  } else { // Create gray-scale color palette for bps < 8
177  count = 1 << avctx->bits_per_coded_sample;
178 
179  for (i = 0; i < count; i++)
180  pal[i] = 0xFF000000 | gray2rgb((i * 255) >> avctx->bits_per_coded_sample);
181  }
182  if (s->masking == MASK_HAS_MASK) {
183  if ((1 << avctx->bits_per_coded_sample) < count) {
184  avpriv_request_sample(avctx, "overlapping mask");
185  return AVERROR_PATCHWELCOME;
186  }
187  memcpy(pal + (1 << avctx->bits_per_coded_sample), pal, count * 4);
188  for (i = 0; i < count; i++)
189  pal[i] &= 0xFFFFFF;
190  } else if (s->masking == MASK_HAS_TRANSPARENT_COLOR &&
191  s->transparency < 1 << avctx->bits_per_coded_sample)
192  pal[s->transparency] &= 0xFFFFFF;
193  return 0;
194 }
195 
196 /**
197  * Extracts the IFF extra context and updates internal
198  * decoder structures.
199  *
200  * @param avctx the AVCodecContext where to extract extra context to
201  * @param avpkt the AVPacket to extract extra context from or NULL to use avctx
202  * @return >= 0 in case of success, a negative error code otherwise
203  */
204 static int extract_header(AVCodecContext *const avctx,
205  const AVPacket *const avpkt)
206 {
207  IffContext *s = avctx->priv_data;
208  const uint8_t *buf;
209  unsigned buf_size = 0;
210  int i, palette_size;
211 
212  if (avctx->extradata_size < 2) {
213  av_log(avctx, AV_LOG_ERROR, "not enough extradata\n");
214  return AVERROR_INVALIDDATA;
215  }
216  palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
217 
218  if (avpkt && avctx->codec_tag == MKTAG('A', 'N', 'I', 'M')) {
219  uint32_t chunk_id;
220  uint64_t data_size;
221  GetByteContext *gb = &s->gb;
222 
223  bytestream2_skip(gb, 4);
224  while (bytestream2_get_bytes_left(gb) >= 1) {
225  chunk_id = bytestream2_get_le32(gb);
226  data_size = bytestream2_get_be32(gb);
227 
228  if (chunk_id == MKTAG('B', 'M', 'H', 'D')) {
229  bytestream2_skip(gb, data_size + (data_size & 1));
230  } else if (chunk_id == MKTAG('A', 'N', 'H', 'D')) {
231  unsigned extra;
232  if (data_size < 40)
233  return AVERROR_INVALIDDATA;
234 
235  s->compression = (bytestream2_get_byte(gb) << 8) | (s->compression & 0xFF);
236  bytestream2_skip(gb, 19);
237  extra = bytestream2_get_be32(gb);
238  s->is_short = !(extra & 1);
239  s->is_brush = extra == 2;
240  s->is_interlaced = !!(extra & 0x40);
241  data_size -= 24;
242  bytestream2_skip(gb, data_size + (data_size & 1));
243  } else if (chunk_id == MKTAG('D', 'L', 'T', 'A') ||
244  chunk_id == MKTAG('B', 'O', 'D', 'Y')) {
245  if (chunk_id == MKTAG('B','O','D','Y'))
246  s->compression &= 0xFF;
247  break;
248  } else if (chunk_id == MKTAG('C', 'M', 'A', 'P')) {
249  int count = data_size / 3;
250  uint32_t *pal = s->pal;
251 
252  if (count > 256)
253  return AVERROR_INVALIDDATA;
254  if (s->ham) {
255  for (i = 0; i < count; i++)
256  pal[i] = 0xFF000000 | bytestream2_get_le24(gb);
257  } else {
258  for (i = 0; i < count; i++)
259  pal[i] = 0xFF000000 | bytestream2_get_be24(gb);
260  }
261  bytestream2_skip(gb, data_size & 1);
262  } else {
263  bytestream2_skip(gb, data_size + (data_size&1));
264  }
265  }
266  } else if (!avpkt) {
267  buf = avctx->extradata;
268  buf_size = bytestream_get_be16(&buf);
269  if (buf_size <= 1 || palette_size < 0) {
270  av_log(avctx, AV_LOG_ERROR,
271  "Invalid palette size received: %u -> palette data offset: %d\n",
272  buf_size, palette_size);
273  return AVERROR_INVALIDDATA;
274  }
275  }
276 
277  if (buf_size >= 41) {
278  s->compression = bytestream_get_byte(&buf);
279  s->bpp = bytestream_get_byte(&buf);
280  s->ham = bytestream_get_byte(&buf);
281  s->flags = bytestream_get_byte(&buf);
282  s->transparency = bytestream_get_be16(&buf);
283  s->masking = bytestream_get_byte(&buf);
284  for (i = 0; i < 16; i++)
285  s->tvdc[i] = bytestream_get_be16(&buf);
286 
287  if (s->ham) {
288  if (s->bpp > 8) {
289  av_log(avctx, AV_LOG_ERROR, "Invalid number of hold bits for HAM: %u\n", s->ham);
290  return AVERROR_INVALIDDATA;
291  } else if (s->ham != (s->bpp > 6 ? 6 : 4)) {
292  av_log(avctx, AV_LOG_ERROR, "Invalid number of hold bits for HAM: %u, BPP: %u\n", s->ham, s->bpp);
293  return AVERROR_INVALIDDATA;
294  }
295  }
296 
297  if (s->masking == MASK_HAS_MASK) {
298  if (s->bpp >= 8 && !s->ham) {
299  avctx->pix_fmt = AV_PIX_FMT_RGB32;
300  av_freep(&s->mask_buf);
301  av_freep(&s->mask_palbuf);
302  s->mask_buf = av_malloc((s->planesize * 32) + AV_INPUT_BUFFER_PADDING_SIZE);
303  if (!s->mask_buf)
304  return AVERROR(ENOMEM);
305  if (s->bpp > 16) {
306  av_log(avctx, AV_LOG_ERROR, "bpp %d too large for palette\n", s->bpp);
307  av_freep(&s->mask_buf);
308  return AVERROR(ENOMEM);
309  }
310  s->mask_palbuf = av_malloc((2 << s->bpp) * sizeof(uint32_t) + AV_INPUT_BUFFER_PADDING_SIZE);
311  if (!s->mask_palbuf) {
312  av_freep(&s->mask_buf);
313  return AVERROR(ENOMEM);
314  }
315  }
316  s->bpp++;
317  } else if (s->masking != MASK_NONE && s->masking != MASK_HAS_TRANSPARENT_COLOR) {
318  av_log(avctx, AV_LOG_ERROR, "Masking not supported\n");
319  return AVERROR_PATCHWELCOME;
320  }
321  if (!s->bpp || s->bpp > 32) {
322  av_log(avctx, AV_LOG_ERROR, "Invalid number of bitplanes: %u\n", s->bpp);
323  return AVERROR_INVALIDDATA;
324  }
325  if (s->video_size && s->planesize * s->bpp * avctx->height > s->video_size)
326  return AVERROR_INVALIDDATA;
327 
328  av_freep(&s->ham_buf);
329  av_freep(&s->ham_palbuf);
330 
331  if (s->ham) {
332  int i, count = FFMIN(palette_size / 3, 1 << s->ham);
333  int ham_count;
334  const uint8_t *const palette = avctx->extradata + AV_RB16(avctx->extradata);
335  int extra_space = 1;
336 
337  if (avctx->codec_tag == MKTAG('P', 'B', 'M', ' ') && s->ham == 4)
338  extra_space = 4;
339 
340  s->ham_buf = av_malloc((s->planesize * 8) + AV_INPUT_BUFFER_PADDING_SIZE);
341  if (!s->ham_buf)
342  return AVERROR(ENOMEM);
343 
344  ham_count = 8 * (1 << s->ham);
345  s->ham_palbuf = av_malloc(extra_space * (ham_count << !!(s->masking == MASK_HAS_MASK)) * sizeof (uint32_t) + AV_INPUT_BUFFER_PADDING_SIZE);
346  if (!s->ham_palbuf) {
347  av_freep(&s->ham_buf);
348  return AVERROR(ENOMEM);
349  }
350 
351  if (count) { // HAM with color palette attached
352  // prefill with black and palette and set HAM take direct value mask to zero
353  memset(s->ham_palbuf, 0, (1 << s->ham) * 2 * sizeof (uint32_t));
354  for (i=0; i < count; i++) {
355  s->ham_palbuf[i*2+1] = 0xFF000000 | AV_RL24(palette + i*3);
356  }
357  count = 1 << s->ham;
358  } else { // HAM with grayscale color palette
359  count = 1 << s->ham;
360  for (i=0; i < count; i++) {
361  s->ham_palbuf[i*2] = 0xFF000000; // take direct color value from palette
362  s->ham_palbuf[i*2+1] = 0xFF000000 | av_le2ne32(gray2rgb((i * 255) >> s->ham));
363  }
364  }
365  for (i=0; i < count; i++) {
366  uint32_t tmp = i << (8 - s->ham);
367  tmp |= tmp >> s->ham;
368  s->ham_palbuf[(i+count)*2] = 0xFF00FFFF; // just modify blue color component
369  s->ham_palbuf[(i+count*2)*2] = 0xFFFFFF00; // just modify red color component
370  s->ham_palbuf[(i+count*3)*2] = 0xFFFF00FF; // just modify green color component
371  s->ham_palbuf[(i+count)*2+1] = 0xFF000000 | tmp << 16;
372  s->ham_palbuf[(i+count*2)*2+1] = 0xFF000000 | tmp;
373  s->ham_palbuf[(i+count*3)*2+1] = 0xFF000000 | tmp << 8;
374  }
375  if (s->masking == MASK_HAS_MASK) {
376  for (i = 0; i < ham_count; i++)
377  s->ham_palbuf[(1 << s->bpp) + i] = s->ham_palbuf[i] | 0xFF000000;
378  }
379  }
380  }
381 
382  return 0;
383 }
384 
386 {
387  IffContext *s = avctx->priv_data;
388  av_freep(&s->planebuf);
389  av_freep(&s->ham_buf);
390  av_freep(&s->ham_palbuf);
391  av_freep(&s->mask_buf);
392  av_freep(&s->mask_palbuf);
393  av_freep(&s->video[0]);
394  av_freep(&s->video[1]);
395  av_freep(&s->pal);
396  return 0;
397 }
398 
400 {
401  IffContext *s = avctx->priv_data;
402  int err;
403 
404  if (avctx->bits_per_coded_sample <= 8) {
405  int palette_size;
406 
407  if (avctx->extradata_size >= 2)
408  palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
409  else
410  palette_size = 0;
411  avctx->pix_fmt = (avctx->bits_per_coded_sample < 8) ||
412  (avctx->extradata_size >= 2 && palette_size) ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
413  } else if (avctx->bits_per_coded_sample <= 32) {
414  if (avctx->codec_tag == MKTAG('R', 'G', 'B', '8')) {
415  avctx->pix_fmt = AV_PIX_FMT_RGB32;
416  } else if (avctx->codec_tag == MKTAG('R', 'G', 'B', 'N')) {
417  avctx->pix_fmt = AV_PIX_FMT_RGB444;
418  } else if (avctx->codec_tag != MKTAG('D', 'E', 'E', 'P')) {
419  if (avctx->bits_per_coded_sample == 24) {
420  avctx->pix_fmt = AV_PIX_FMT_0BGR32;
421  } else if (avctx->bits_per_coded_sample == 32) {
422  avctx->pix_fmt = AV_PIX_FMT_BGR32;
423  } else {
424  avpriv_request_sample(avctx, "unknown bits_per_coded_sample");
425  return AVERROR_PATCHWELCOME;
426  }
427  }
428  } else {
429  return AVERROR_INVALIDDATA;
430  }
431 
432  if ((err = av_image_check_size(avctx->width, avctx->height, 0, avctx)))
433  return err;
434  s->planesize = FFALIGN(avctx->width, 16) >> 3; // Align plane size in bits to word-boundary
435  s->planebuf = av_malloc(s->planesize * avctx->height + AV_INPUT_BUFFER_PADDING_SIZE);
436  if (!s->planebuf)
437  return AVERROR(ENOMEM);
438 
439  s->bpp = avctx->bits_per_coded_sample;
440 
441  if (avctx->codec_tag == MKTAG('A', 'N', 'I', 'M')) {
442  s->video_size = FFALIGN(avctx->width, 2) * avctx->height * s->bpp;
443  if (!s->video_size)
444  return AVERROR_INVALIDDATA;
445  s->video[0] = av_calloc(FFALIGN(avctx->width, 2) * avctx->height, s->bpp);
446  s->video[1] = av_calloc(FFALIGN(avctx->width, 2) * avctx->height, s->bpp);
447  s->pal = av_calloc(256, sizeof(*s->pal));
448  if (!s->video[0] || !s->video[1] || !s->pal)
449  return AVERROR(ENOMEM);
450  }
451 
452  if ((err = extract_header(avctx, NULL)) < 0)
453  return err;
454 
455  return 0;
456 }
457 
458 /**
459  * Decode interleaved plane buffer up to 8bpp
460  * @param dst Destination buffer
461  * @param buf Source buffer
462  * @param buf_size
463  * @param plane plane number to decode as
464  */
465 static void decodeplane8(uint8_t *dst, const uint8_t *buf, int buf_size, int plane)
466 {
467  const uint64_t *lut;
468  if (plane >= 8) {
469  av_log(NULL, AV_LOG_WARNING, "Ignoring extra planes beyond 8\n");
470  return;
471  }
472  lut = plane8_lut[plane];
473  do {
474  uint64_t v = AV_RN64A(dst) | lut[*buf++];
475  AV_WN64A(dst, v);
476  dst += 8;
477  } while (--buf_size);
478 }
479 
480 /**
481  * Decode interleaved plane buffer up to 24bpp
482  * @param dst Destination buffer
483  * @param buf Source buffer
484  * @param buf_size
485  * @param plane plane number to decode as
486  */
487 static void decodeplane32(uint32_t *dst, const uint8_t *buf, int buf_size, int plane)
488 {
489  const uint32_t *lut = plane32_lut[plane];
490  do {
491  unsigned mask = (*buf >> 2) & ~3;
492  dst[0] |= lut[mask++];
493  dst[1] |= lut[mask++];
494  dst[2] |= lut[mask++];
495  dst[3] |= lut[mask];
496  mask = (*buf++ << 2) & 0x3F;
497  dst[4] |= lut[mask++];
498  dst[5] |= lut[mask++];
499  dst[6] |= lut[mask++];
500  dst[7] |= lut[mask];
501  dst += 8;
502  } while (--buf_size);
503 }
504 
505 #define DECODE_HAM_PLANE32(x) \
506  first = buf[x] << 1; \
507  second = buf[(x)+1] << 1; \
508  delta &= pal[first++]; \
509  delta |= pal[first]; \
510  dst[x] = delta; \
511  delta &= pal[second++]; \
512  delta |= pal[second]; \
513  dst[(x)+1] = delta
514 
515 /**
516  * Converts one line of HAM6/8-encoded chunky buffer to 24bpp.
517  *
518  * @param dst the destination 24bpp buffer
519  * @param buf the source 8bpp chunky buffer
520  * @param pal the HAM decode table
521  * @param buf_size the plane size in bytes
522  */
523 static void decode_ham_plane32(uint32_t *dst, const uint8_t *buf,
524  const uint32_t *const pal, unsigned buf_size)
525 {
526  uint32_t delta = pal[1]; /* first palette entry */
527  do {
528  uint32_t first, second;
533  buf += 8;
534  dst += 8;
535  } while (--buf_size);
536 }
537 
538 static void lookup_pal_indicies(uint32_t *dst, const uint32_t *buf,
539  const uint32_t *const pal, unsigned width)
540 {
541  do {
542  *dst++ = pal[*buf++];
543  } while (--width);
544 }
545 
546 /**
547  * Decode one complete byterun1 encoded line.
548  *
549  * @param dst the destination buffer where to store decompressed bitstream
550  * @param dst_size the destination plane size in bytes
551  * @param buf the source byterun1 compressed bitstream
552  * @param buf_end the EOF of source byterun1 compressed bitstream
553  * @return number of consumed bytes in byterun1 compressed bitstream
554  */
555 static int decode_byterun(uint8_t *dst, int dst_size,
556  GetByteContext *gb)
557 {
558  unsigned x;
559  for (x = 0; x < dst_size && bytestream2_get_bytes_left(gb) > 0;) {
560  unsigned length;
561  const int8_t value = bytestream2_get_byte(gb);
562  if (value >= 0) {
563  length = FFMIN3(value + 1, dst_size - x, bytestream2_get_bytes_left(gb));
564  bytestream2_get_buffer(gb, dst + x, length);
565  if (length < value + 1)
566  bytestream2_skip(gb, value + 1 - length);
567  } else if (value > -128) {
568  length = FFMIN(-value + 1, dst_size - x);
569  memset(dst + x, bytestream2_get_byte(gb), length);
570  } else { // noop
571  continue;
572  }
573  x += length;
574  }
575  if (x < dst_size) {
576  av_log(NULL, AV_LOG_WARNING, "decode_byterun ended before plane size\n");
577  memset(dst+x, 0, dst_size - x);
578  }
579  return bytestream2_tell(gb);
580 }
581 
582 static int decode_byterun2(uint8_t *dst, int height, int line_size,
583  GetByteContext *gb)
584 {
586  unsigned count;
587  int i, y_pos = 0, x_pos = 0;
588 
589  if (bytestream2_get_be32(gb) != MKBETAG('V', 'D', 'A', 'T'))
590  return 0;
591 
592  bytestream2_skip(gb, 4);
593  count = bytestream2_get_be16(gb) - 2;
595  return 0;
596 
598  bytestream2_skip(gb, count);
599 
600  for (i = 0; i < count && x_pos < line_size; i++) {
601  int8_t cmd = bytestream2_get_byte(&cmds);
602  int l, r;
603 
604  if (cmd == 0) {
605  l = bytestream2_get_be16(gb);
606  while (l-- > 0 && x_pos < line_size) {
607  dst[x_pos + y_pos * line_size ] = bytestream2_get_byte(gb);
608  dst[x_pos + y_pos++ * line_size + 1] = bytestream2_get_byte(gb);
609  if (y_pos >= height) {
610  y_pos = 0;
611  x_pos += 2;
612  }
613  }
614  } else if (cmd < 0) {
615  l = -cmd;
616  while (l-- > 0 && x_pos < line_size) {
617  dst[x_pos + y_pos * line_size ] = bytestream2_get_byte(gb);
618  dst[x_pos + y_pos++ * line_size + 1] = bytestream2_get_byte(gb);
619  if (y_pos >= height) {
620  y_pos = 0;
621  x_pos += 2;
622  }
623  }
624  } else if (cmd == 1) {
625  l = bytestream2_get_be16(gb);
626  r = bytestream2_get_be16(gb);
627  while (l-- > 0 && x_pos < line_size) {
628  dst[x_pos + y_pos * line_size ] = r >> 8;
629  dst[x_pos + y_pos++ * line_size + 1] = r & 0xFF;
630  if (y_pos >= height) {
631  y_pos = 0;
632  x_pos += 2;
633  }
634  }
635  } else {
636  l = cmd;
637  r = bytestream2_get_be16(gb);
638  while (l-- > 0 && x_pos < line_size) {
639  dst[x_pos + y_pos * line_size ] = r >> 8;
640  dst[x_pos + y_pos++ * line_size + 1] = r & 0xFF;
641  if (y_pos >= height) {
642  y_pos = 0;
643  x_pos += 2;
644  }
645  }
646  }
647  }
648 
649  return bytestream2_tell(gb);
650 }
651 
652 #define DECODE_RGBX_COMMON(type) \
653  if (!length) { \
654  length = bytestream2_get_byte(gb); \
655  if (!length) { \
656  length = bytestream2_get_be16(gb); \
657  if (!length) \
658  return; \
659  } \
660  } \
661  for (i = 0; i < length; i++) { \
662  *(type *)(dst + y*linesize + x * sizeof(type)) = pixel; \
663  x += 1; \
664  if (x >= width) { \
665  y += 1; \
666  if (y >= height) \
667  return; \
668  x = 0; \
669  } \
670  }
671 
672 /**
673  * Decode RGB8 buffer
674  * @param[out] dst Destination buffer
675  * @param width Width of destination buffer (pixels)
676  * @param height Height of destination buffer (pixels)
677  * @param linesize Line size of destination buffer (bytes)
678  */
679 static void decode_rgb8(GetByteContext *gb, uint8_t *dst, int width, int height, int linesize)
680 {
681  int x = 0, y = 0, i, length;
682  while (bytestream2_get_bytes_left(gb) >= 4) {
683  uint32_t pixel = 0xFF000000 | bytestream2_get_be24(gb);
684  length = bytestream2_get_byte(gb) & 0x7F;
685  DECODE_RGBX_COMMON(uint32_t)
686  }
687 }
688 
689 /**
690  * Decode RGBN buffer
691  * @param[out] dst Destination buffer
692  * @param width Width of destination buffer (pixels)
693  * @param height Height of destination buffer (pixels)
694  * @param linesize Line size of destination buffer (bytes)
695  */
696 static void decode_rgbn(GetByteContext *gb, uint8_t *dst, int width, int height, int linesize)
697 {
698  int x = 0, y = 0, i, length;
699  while (bytestream2_get_bytes_left(gb) >= 2) {
700  uint32_t pixel = bytestream2_get_be16u(gb);
701  length = pixel & 0x7;
702  pixel >>= 4;
703  DECODE_RGBX_COMMON(uint16_t)
704  }
705 }
706 
707 /**
708  * Decode DEEP RLE 32-bit buffer
709  * @param[out] dst Destination buffer
710  * @param[in] src Source buffer
711  * @param src_size Source buffer size (bytes)
712  * @param width Width of destination buffer (pixels)
713  * @param height Height of destination buffer (pixels)
714  * @param linesize Line size of destination buffer (bytes)
715  */
716 static void decode_deep_rle32(uint8_t *dst, const uint8_t *src, int src_size, int width, int height, int linesize)
717 {
718  const uint8_t *src_end = src + src_size;
719  int x = 0, y = 0, i;
720  while (src_end - src >= 5) {
721  int opcode;
722  opcode = *(int8_t *)src++;
723  if (opcode >= 0) {
724  int size = opcode + 1;
725  for (i = 0; i < size; i++) {
726  int length = FFMIN(size - i, width - x);
727  if (src_end - src < length * 4)
728  return;
729  memcpy(dst + y*linesize + x * 4, src, length * 4);
730  src += length * 4;
731  x += length;
732  i += length;
733  if (x >= width) {
734  x = 0;
735  y += 1;
736  if (y >= height)
737  return;
738  }
739  }
740  } else {
741  int size = -opcode + 1;
742  uint32_t pixel = AV_RN32(src);
743  for (i = 0; i < size; i++) {
744  *(uint32_t *)(dst + y*linesize + x * 4) = pixel;
745  x += 1;
746  if (x >= width) {
747  x = 0;
748  y += 1;
749  if (y >= height)
750  return;
751  }
752  }
753  src += 4;
754  }
755  }
756 }
757 
758 /**
759  * Decode DEEP TVDC 32-bit buffer
760  * @param[out] dst Destination buffer
761  * @param[in] src Source buffer
762  * @param src_size Source buffer size (bytes)
763  * @param width Width of destination buffer (pixels)
764  * @param height Height of destination buffer (pixels)
765  * @param linesize Line size of destination buffer (bytes)
766  * @param[int] tvdc TVDC lookup table
767  */
768 static void decode_deep_tvdc32(uint8_t *dst, const uint8_t *src, int src_size, int width, int height, int linesize, const int16_t *tvdc)
769 {
770  int x = 0, y = 0, plane = 0;
771  int8_t pixel = 0;
772  int i, j;
773 
774  for (i = 0; i < src_size * 2;) {
775 #define GETNIBBLE ((i & 1) ? (src[i>>1] & 0xF) : (src[i>>1] >> 4))
776  int d = tvdc[GETNIBBLE];
777  i++;
778  if (d) {
779  pixel += d;
780  dst[y * linesize + x*4 + plane] = pixel;
781  x++;
782  } else {
783  if (i >= src_size * 2)
784  return;
785  d = GETNIBBLE + 1;
786  i++;
787  d = FFMIN(d, width - x);
788  for (j = 0; j < d; j++) {
789  dst[y * linesize + x*4 + plane] = pixel;
790  x++;
791  }
792  }
793  if (x >= width) {
794  plane++;
795  if (plane >= 4) {
796  y++;
797  if (y >= height)
798  return;
799  plane = 0;
800  }
801  x = 0;
802  pixel = 0;
803  i = (i + 1) & ~1;
804  }
805  }
806 }
807 
809  const uint8_t *buf, const uint8_t *buf_end,
810  int w, int bpp, int dst_size)
811 {
812  int planepitch = FFALIGN(w, 16) >> 3;
813  int pitch = planepitch * bpp;
814  GetByteContext ptrs, gb;
815  PutByteContext pb;
816  unsigned ofssrc, pos;
817  int i, k;
818 
819  bytestream2_init(&ptrs, buf, buf_end - buf);
820  bytestream2_init_writer(&pb, dst, dst_size);
821 
822  for (k = 0; k < bpp; k++) {
823  ofssrc = bytestream2_get_be32(&ptrs);
824  pos = 0;
825 
826  if (!ofssrc)
827  continue;
828 
829  if (ofssrc >= buf_end - buf)
830  continue;
831 
832  bytestream2_init(&gb, buf + ofssrc, buf_end - (buf + ofssrc));
833  while (bytestream2_peek_be16(&gb) != 0xFFFF && bytestream2_get_bytes_left(&gb) > 3) {
834  int16_t offset = bytestream2_get_be16(&gb);
835  unsigned noffset;
836 
837  if (offset >= 0) {
838  unsigned data = bytestream2_get_be16(&gb);
839 
840  pos += offset * 2;
841  noffset = (pos / planepitch) * pitch + (pos % planepitch) + k * planepitch;
842  bytestream2_seek_p(&pb, noffset, SEEK_SET);
843  bytestream2_put_be16(&pb, data);
844  } else {
845  uint16_t count = bytestream2_get_be16(&gb);
846 
847  pos += 2 * -(offset + 2);
848  for (i = 0; i < count; i++) {
849  uint16_t data = bytestream2_get_be16(&gb);
850 
851  pos += 2;
852  noffset = (pos / planepitch) * pitch + (pos % planepitch) + k * planepitch;
853  bytestream2_seek_p(&pb, noffset, SEEK_SET);
854  bytestream2_put_be16(&pb, data);
855  }
856  }
857  }
858  }
859 }
860 
862  const uint8_t *buf, const uint8_t *buf_end,
863  int w, int xor, int bpp, int dst_size)
864 {
865  int ncolumns = ((w + 15) / 16) * 2;
866  int dstpitch = ncolumns * bpp;
867  unsigned ofsdst, ofssrc, opcode, x;
868  GetByteContext ptrs, gb;
869  PutByteContext pb;
870  int i, j, k;
871 
872  bytestream2_init(&ptrs, buf, buf_end - buf);
873  bytestream2_init_writer(&pb, dst, dst_size);
874 
875  for (k = 0; k < bpp; k++) {
876  ofssrc = bytestream2_get_be32(&ptrs);
877 
878  if (!ofssrc)
879  continue;
880 
881  if (ofssrc >= buf_end - buf)
882  continue;
883 
884  bytestream2_init(&gb, buf + ofssrc, buf_end - (buf + ofssrc));
885  for (j = 0; j < ncolumns; j++) {
886  ofsdst = j + k * ncolumns;
887 
888  i = bytestream2_get_byte(&gb);
889  while (i > 0) {
890  opcode = bytestream2_get_byte(&gb);
891 
892  if (opcode == 0) {
893  opcode = bytestream2_get_byte(&gb);
894  x = bytestream2_get_byte(&gb);
895 
896  while (opcode) {
897  bytestream2_seek_p(&pb, ofsdst, SEEK_SET);
898  if (xor && ofsdst < dst_size) {
899  bytestream2_put_byte(&pb, dst[ofsdst] ^ x);
900  } else {
901  bytestream2_put_byte(&pb, x);
902  }
903  ofsdst += dstpitch;
904  opcode--;
905  }
906  } else if (opcode < 0x80) {
907  ofsdst += opcode * dstpitch;
908  } else {
909  opcode &= 0x7f;
910 
911  while (opcode) {
912  bytestream2_seek_p(&pb, ofsdst, SEEK_SET);
913  if (xor && ofsdst < dst_size) {
914  bytestream2_put_byte(&pb, dst[ofsdst] ^ bytestream2_get_byte(&gb));
915  } else {
916  bytestream2_put_byte(&pb, bytestream2_get_byte(&gb));
917  }
918  ofsdst += dstpitch;
919  opcode--;
920  }
921  }
922  i--;
923  }
924  }
925  }
926 }
927 
928 static void decode_delta_j(uint8_t *dst,
929  const uint8_t *buf, const uint8_t *buf_end,
930  int w, int h, int bpp, int dst_size)
931 {
932  int32_t pitch;
933  uint8_t *ptr;
934  uint32_t type, flag, cols, groups, rows, bytes;
935  uint32_t offset;
936  int planepitch_byte = (w + 7) / 8;
937  int planepitch = ((w + 15) / 16) * 2;
938  int kludge_j, b, g, r, d;
939  GetByteContext gb;
940 
941  pitch = planepitch * bpp;
942  kludge_j = w < 320 ? (320 - w) / 8 / 2 : 0;
943 
944  bytestream2_init(&gb, buf, buf_end - buf);
945 
946  while (bytestream2_get_bytes_left(&gb) >= 2) {
947  type = bytestream2_get_be16(&gb);
948 
949  switch (type) {
950  case 0:
951  return;
952  case 1:
953  flag = bytestream2_get_be16(&gb);
954  cols = bytestream2_get_be16(&gb);
955  groups = bytestream2_get_be16(&gb);
956 
957  for (g = 0; g < groups; g++) {
958  offset = bytestream2_get_be16(&gb);
959 
960  if (cols * bpp == 0 || bytestream2_get_bytes_left(&gb) < cols * bpp) {
961  av_log(NULL, AV_LOG_ERROR, "cols*bpp is invalid (%"PRId32"*%d)", cols, bpp);
962  return;
963  }
964 
965  if (kludge_j)
966  offset = ((offset / (320 / 8)) * pitch) + (offset % (320 / 8)) - kludge_j;
967  else
968  offset = ((offset / planepitch_byte) * pitch) + (offset % planepitch_byte);
969 
970  for (b = 0; b < cols; b++) {
971  for (d = 0; d < bpp; d++) {
972  uint8_t value = bytestream2_get_byte(&gb);
973 
974  if (offset >= dst_size)
975  return;
976  ptr = dst + offset;
977 
978  if (flag)
979  ptr[0] ^= value;
980  else
981  ptr[0] = value;
982 
983  offset += planepitch;
984  }
985  }
986  if ((cols * bpp) & 1)
987  bytestream2_skip(&gb, 1);
988  }
989  break;
990  case 2:
991  flag = bytestream2_get_be16(&gb);
992  rows = bytestream2_get_be16(&gb);
993  bytes = bytestream2_get_be16(&gb);
994  groups = bytestream2_get_be16(&gb);
995 
996  for (g = 0; g < groups; g++) {
997  offset = bytestream2_get_be16(&gb);
998 
999  if (kludge_j)
1000  offset = ((offset / (320 / 8)) * pitch) + (offset % (320/ 8)) - kludge_j;
1001  else
1002  offset = ((offset / planepitch_byte) * pitch) + (offset % planepitch_byte);
1003 
1004  for (r = 0; r < rows; r++) {
1005  for (d = 0; d < bpp; d++) {
1006  unsigned noffset = offset + (r * pitch) + d * planepitch;
1007 
1008  if (!bytes || bytestream2_get_bytes_left(&gb) < bytes) {
1009  av_log(NULL, AV_LOG_ERROR, "bytes %"PRId32" is invalid", bytes);
1010  return;
1011  }
1012 
1013  for (b = 0; b < bytes; b++) {
1014  uint8_t value = bytestream2_get_byte(&gb);
1015 
1016  if (noffset >= dst_size)
1017  return;
1018  ptr = dst + noffset;
1019 
1020  if (flag)
1021  ptr[0] ^= value;
1022  else
1023  ptr[0] = value;
1024 
1025  noffset++;
1026  }
1027  }
1028  }
1029  if ((rows * bytes * bpp) & 1)
1030  bytestream2_skip(&gb, 1);
1031  }
1032  break;
1033  default:
1034  return;
1035  }
1036  }
1037 }
1038 
1040  const uint8_t *buf, const uint8_t *buf_end,
1041  int w, int bpp, int dst_size)
1042 {
1043  int ncolumns = (w + 15) >> 4;
1044  int dstpitch = ncolumns * bpp * 2;
1045  unsigned ofsdst, ofssrc, ofsdata, opcode, x;
1046  GetByteContext ptrs, gb, dptrs, dgb;
1047  PutByteContext pb;
1048  int i, j, k;
1049 
1050  if (buf_end - buf <= 64)
1051  return;
1052 
1053  bytestream2_init(&ptrs, buf, buf_end - buf);
1054  bytestream2_init(&dptrs, buf + 32, (buf_end - buf) - 32);
1055  bytestream2_init_writer(&pb, dst, dst_size);
1056 
1057  for (k = 0; k < bpp; k++) {
1058  ofssrc = bytestream2_get_be32(&ptrs);
1059  ofsdata = bytestream2_get_be32(&dptrs);
1060 
1061  if (!ofssrc)
1062  continue;
1063 
1064  if (ofssrc >= buf_end - buf)
1065  return;
1066 
1067  if (ofsdata >= buf_end - buf)
1068  return;
1069 
1070  bytestream2_init(&gb, buf + ofssrc, buf_end - (buf + ofssrc));
1071  bytestream2_init(&dgb, buf + ofsdata, buf_end - (buf + ofsdata));
1072  for (j = 0; j < ncolumns; j++) {
1073  ofsdst = (j + k * ncolumns) * 2;
1074 
1075  i = bytestream2_get_byte(&gb);
1076  while (i > 0) {
1077  opcode = bytestream2_get_byte(&gb);
1078 
1079  if (opcode == 0) {
1080  opcode = bytestream2_get_byte(&gb);
1081  x = bytestream2_get_be16(&dgb);
1082 
1083  while (opcode) {
1084  bytestream2_seek_p(&pb, ofsdst, SEEK_SET);
1085  bytestream2_put_be16(&pb, x);
1086  ofsdst += dstpitch;
1087  opcode--;
1088  }
1089  } else if (opcode < 0x80) {
1090  ofsdst += opcode * dstpitch;
1091  } else {
1092  opcode &= 0x7f;
1093 
1094  while (opcode) {
1095  bytestream2_seek_p(&pb, ofsdst, SEEK_SET);
1096  bytestream2_put_be16(&pb, bytestream2_get_be16(&dgb));
1097  ofsdst += dstpitch;
1098  opcode--;
1099  }
1100  }
1101  i--;
1102  }
1103  }
1104  }
1105 }
1106 
1108  const uint8_t *buf, const uint8_t *buf_end,
1109  int w, int bpp, int dst_size)
1110 {
1111  int ncolumns = (w + 31) >> 5;
1112  int dstpitch = ((w + 15) / 16 * 2) * bpp;
1113  unsigned ofsdst, ofssrc, ofsdata, opcode, x;
1114  GetByteContext ptrs, gb, dptrs, dgb;
1115  PutByteContext pb;
1116  int i, j, k, h;
1117 
1118  if (buf_end - buf <= 64)
1119  return;
1120 
1121  h = (((w + 15) / 16 * 2) != ((w + 31) / 32 * 4)) ? 1 : 0;
1122  bytestream2_init(&ptrs, buf, buf_end - buf);
1123  bytestream2_init(&dptrs, buf + 32, (buf_end - buf) - 32);
1124  bytestream2_init_writer(&pb, dst, dst_size);
1125 
1126  for (k = 0; k < bpp; k++) {
1127  ofssrc = bytestream2_get_be32(&ptrs);
1128  ofsdata = bytestream2_get_be32(&dptrs);
1129 
1130  if (!ofssrc)
1131  continue;
1132 
1133  if (ofssrc >= buf_end - buf)
1134  return;
1135 
1136  if (ofsdata >= buf_end - buf)
1137  return;
1138 
1139  bytestream2_init(&gb, buf + ofssrc, buf_end - (buf + ofssrc));
1140  bytestream2_init(&dgb, buf + ofsdata, buf_end - (buf + ofsdata));
1141  for (j = 0; j < ncolumns; j++) {
1142  ofsdst = (j + k * ncolumns) * 4 - h * (2 * k);
1143 
1144  i = bytestream2_get_byte(&gb);
1145  while (i > 0) {
1146  opcode = bytestream2_get_byte(&gb);
1147 
1148  if (opcode == 0) {
1149  opcode = bytestream2_get_byte(&gb);
1150  if (h && (j == (ncolumns - 1))) {
1151  x = bytestream2_get_be16(&dgb);
1152  bytestream2_skip(&dgb, 2);
1153  } else {
1154  x = bytestream2_get_be32(&dgb);
1155  }
1156 
1157  if (ofsdst + (opcode - 1LL) * dstpitch > bytestream2_size_p(&pb))
1158  return;
1159 
1160  while (opcode) {
1161  bytestream2_seek_p(&pb, ofsdst, SEEK_SET);
1162  if (h && (j == (ncolumns - 1))) {
1163  bytestream2_put_be16(&pb, x);
1164  } else {
1165  bytestream2_put_be32(&pb, x);
1166  }
1167  ofsdst += dstpitch;
1168  opcode--;
1169  }
1170  } else if (opcode < 0x80) {
1171  ofsdst += opcode * dstpitch;
1172  } else {
1173  opcode &= 0x7f;
1174 
1175  while (opcode) {
1176  bytestream2_seek_p(&pb, ofsdst, SEEK_SET);
1177  if (h && (j == (ncolumns - 1))) {
1178  bytestream2_put_be16(&pb, bytestream2_get_be16(&dgb));
1179  bytestream2_skip(&dgb, 2);
1180  } else {
1181  bytestream2_put_be32(&pb, bytestream2_get_be32(&dgb));
1182  }
1183  ofsdst += dstpitch;
1184  opcode--;
1185  }
1186  }
1187  i--;
1188  }
1189  }
1190  }
1191 }
1192 
1194  const uint8_t *buf, const uint8_t *buf_end,
1195  int w, int bpp, int dst_size)
1196 {
1197  int ncolumns = (w + 15) >> 4;
1198  int dstpitch = ncolumns * bpp * 2;
1199  unsigned ofsdst, ofssrc, opcode, x;
1200  GetByteContext ptrs, gb;
1201  PutByteContext pb;
1202  int i, j, k;
1203 
1204  bytestream2_init(&ptrs, buf, buf_end - buf);
1205  bytestream2_init_writer(&pb, dst, dst_size);
1206 
1207  for (k = 0; k < bpp; k++) {
1208  ofssrc = bytestream2_get_be32(&ptrs);
1209 
1210  if (!ofssrc)
1211  continue;
1212 
1213  if (ofssrc >= buf_end - buf)
1214  continue;
1215 
1216  bytestream2_init(&gb, buf + ofssrc, buf_end - (buf + ofssrc));
1217  for (j = 0; j < ncolumns; j++) {
1218  ofsdst = (j + k * ncolumns) * 2;
1219 
1220  i = bytestream2_get_be16(&gb);
1221  while (i > 0 && bytestream2_get_bytes_left(&gb) > 4) {
1222  opcode = bytestream2_get_be16(&gb);
1223 
1224  if (opcode == 0) {
1225  opcode = bytestream2_get_be16(&gb);
1226  x = bytestream2_get_be16(&gb);
1227 
1228  while (opcode && bytestream2_get_bytes_left_p(&pb) > 1) {
1229  bytestream2_seek_p(&pb, ofsdst, SEEK_SET);
1230  bytestream2_put_be16(&pb, x);
1231  ofsdst += dstpitch;
1232  opcode--;
1233  }
1234  } else if (opcode < 0x8000) {
1235  ofsdst += opcode * dstpitch;
1236  } else {
1237  opcode &= 0x7fff;
1238 
1239  while (opcode && bytestream2_get_bytes_left(&gb) > 1 &&
1240  bytestream2_get_bytes_left_p(&pb) > 1) {
1241  bytestream2_seek_p(&pb, ofsdst, SEEK_SET);
1242  bytestream2_put_be16(&pb, bytestream2_get_be16(&gb));
1243  ofsdst += dstpitch;
1244  opcode--;
1245  }
1246  }
1247  i--;
1248  }
1249  }
1250  }
1251 }
1252 
1254  const uint8_t *buf, const uint8_t *buf_end,
1255  int w, int bpp, int dst_size)
1256 {
1257  int ncolumns = (w + 31) >> 5;
1258  int dstpitch = ((w + 15) / 16 * 2) * bpp;
1259  unsigned ofsdst, ofssrc, opcode, x;
1260  unsigned skip = 0x80000000, mask = skip - 1;
1261  GetByteContext ptrs, gb;
1262  PutByteContext pb;
1263  int i, j, k, h;
1264 
1265  h = (((w + 15) / 16 * 2) != ((w + 31) / 32 * 4)) ? 1 : 0;
1266  bytestream2_init(&ptrs, buf, buf_end - buf);
1267  bytestream2_init_writer(&pb, dst, dst_size);
1268 
1269  for (k = 0; k < bpp; k++) {
1270  ofssrc = bytestream2_get_be32(&ptrs);
1271 
1272  if (!ofssrc)
1273  continue;
1274 
1275  if (ofssrc >= buf_end - buf)
1276  continue;
1277 
1278  bytestream2_init(&gb, buf + ofssrc, buf_end - (buf + ofssrc));
1279  for (j = 0; j < ncolumns; j++) {
1280  ofsdst = (j + k * ncolumns) * 4 - h * (2 * k);
1281 
1282  if (h && (j == (ncolumns - 1))) {
1283  skip = 0x8000;
1284  mask = skip - 1;
1285  }
1286 
1287  i = bytestream2_get_be32(&gb);
1288  while (i > 0 && bytestream2_get_bytes_left(&gb) > 4) {
1289  opcode = bytestream2_get_be32(&gb);
1290 
1291  if (opcode == 0) {
1292  if (h && (j == ncolumns - 1)) {
1293  opcode = bytestream2_get_be16(&gb);
1294  x = bytestream2_get_be16(&gb);
1295  } else {
1296  opcode = bytestream2_get_be32(&gb);
1297  x = bytestream2_get_be32(&gb);
1298  }
1299 
1300  if (ofsdst + (opcode - 1LL) * dstpitch > bytestream2_size_p(&pb))
1301  return;
1302 
1303  while (opcode && bytestream2_get_bytes_left_p(&pb) > 1) {
1304  bytestream2_seek_p(&pb, ofsdst, SEEK_SET);
1305  if (h && (j == ncolumns - 1))
1306  bytestream2_put_be16(&pb, x);
1307  else
1308  bytestream2_put_be32(&pb, x);
1309  ofsdst += dstpitch;
1310  opcode--;
1311  }
1312  } else if (opcode < skip) {
1313  ofsdst += opcode * dstpitch;
1314  } else {
1315  opcode &= mask;
1316 
1317  while (opcode && bytestream2_get_bytes_left(&gb) > 1 &&
1318  bytestream2_get_bytes_left_p(&pb) > 1) {
1319  bytestream2_seek_p(&pb, ofsdst, SEEK_SET);
1320  if (h && (j == ncolumns - 1)) {
1321  bytestream2_put_be16(&pb, bytestream2_get_be16(&gb));
1322  } else {
1323  bytestream2_put_be32(&pb, bytestream2_get_be32(&gb));
1324  }
1325  ofsdst += dstpitch;
1326  opcode--;
1327  }
1328  }
1329  i--;
1330  }
1331  }
1332  }
1333 }
1334 
1335 static void decode_delta_d(uint8_t *dst,
1336  const uint8_t *buf, const uint8_t *buf_end,
1337  int w, int flag, int bpp, int dst_size)
1338 {
1339  int planepitch = FFALIGN(w, 16) >> 3;
1340  int pitch = planepitch * bpp;
1341  int planepitch_byte = (w + 7) / 8;
1342  unsigned entries, ofssrc;
1343  GetByteContext gb, ptrs;
1344  PutByteContext pb;
1345  int k;
1346 
1347  if (buf_end - buf <= 4 * bpp)
1348  return;
1349 
1350  bytestream2_init_writer(&pb, dst, dst_size);
1351  bytestream2_init(&ptrs, buf, bpp * 4);
1352 
1353  for (k = 0; k < bpp; k++) {
1354  ofssrc = bytestream2_get_be32(&ptrs);
1355 
1356  if (!ofssrc)
1357  continue;
1358 
1359  if (ofssrc >= buf_end - buf)
1360  continue;
1361 
1362  bytestream2_init(&gb, buf + ofssrc, buf_end - (buf + ofssrc));
1363 
1364  entries = bytestream2_get_be32(&gb);
1365  while (entries && bytestream2_get_bytes_left(&gb) >= 8) {
1366  int32_t opcode = bytestream2_get_be32(&gb);
1367  unsigned offset = bytestream2_get_be32(&gb);
1368 
1369  bytestream2_seek_p(&pb, (offset / planepitch_byte) * pitch + (offset % planepitch_byte) + k * planepitch, SEEK_SET);
1370  if (opcode >= 0) {
1371  uint32_t x = bytestream2_get_be32(&gb);
1372  if (opcode && 4 + (opcode - 1LL) * pitch > bytestream2_get_bytes_left_p(&pb))
1373  continue;
1374  while (opcode && bytestream2_get_bytes_left_p(&pb) > 0) {
1375  bytestream2_put_be32(&pb, x);
1376  bytestream2_skip_p(&pb, pitch - 4);
1377  opcode--;
1378  }
1379  } else {
1380  while (opcode && bytestream2_get_bytes_left(&gb) > 0) {
1381  bytestream2_put_be32(&pb, bytestream2_get_be32(&gb));
1382  bytestream2_skip_p(&pb, pitch - 4);
1383  opcode++;
1384  }
1385  }
1386  entries--;
1387  }
1388  }
1389 }
1390 
1391 static void decode_delta_e(uint8_t *dst,
1392  const uint8_t *buf, const uint8_t *buf_end,
1393  int w, int flag, int bpp, int dst_size)
1394 {
1395  int planepitch = FFALIGN(w, 16) >> 3;
1396  int pitch = planepitch * bpp;
1397  int planepitch_byte = (w + 7) / 8;
1398  unsigned entries, ofssrc;
1399  GetByteContext gb, ptrs;
1400  PutByteContext pb;
1401  int k;
1402 
1403  if (buf_end - buf <= 4 * bpp)
1404  return;
1405 
1406  bytestream2_init_writer(&pb, dst, dst_size);
1407  bytestream2_init(&ptrs, buf, bpp * 4);
1408 
1409  for (k = 0; k < bpp; k++) {
1410  ofssrc = bytestream2_get_be32(&ptrs);
1411 
1412  if (!ofssrc)
1413  continue;
1414 
1415  if (ofssrc >= buf_end - buf)
1416  continue;
1417 
1418  bytestream2_init(&gb, buf + ofssrc, buf_end - (buf + ofssrc));
1419 
1420  entries = bytestream2_get_be16(&gb);
1421  while (entries && bytestream2_get_bytes_left(&gb) >= 6) {
1422  int16_t opcode = bytestream2_get_be16(&gb);
1423  unsigned offset = bytestream2_get_be32(&gb);
1424 
1425  bytestream2_seek_p(&pb, (offset / planepitch_byte) * pitch + (offset % planepitch_byte) + k * planepitch, SEEK_SET);
1426  if (opcode >= 0) {
1427  uint16_t x = bytestream2_get_be16(&gb);
1428  while (opcode && bytestream2_get_bytes_left_p(&pb) > 0) {
1429  bytestream2_put_be16(&pb, x);
1430  bytestream2_skip_p(&pb, pitch - 2);
1431  opcode--;
1432  }
1433  } else {
1434  opcode = -opcode;
1435  while (opcode && bytestream2_get_bytes_left(&gb) > 0) {
1436  bytestream2_put_be16(&pb, bytestream2_get_be16(&gb));
1437  bytestream2_skip_p(&pb, pitch - 2);
1438  opcode--;
1439  }
1440  }
1441  entries--;
1442  }
1443  }
1444 }
1445 
1446 static void decode_delta_l(uint8_t *dst,
1447  const uint8_t *buf, const uint8_t *buf_end,
1448  int w, int flag, int bpp, int dst_size)
1449 {
1450  GetByteContext off0, off1, dgb, ogb;
1451  PutByteContext pb;
1452  unsigned poff0, poff1;
1453  int i, k, dstpitch;
1454  int planepitch_byte = (w + 7) / 8;
1455  int planepitch = ((w + 15) / 16) * 2;
1456  int pitch = planepitch * bpp;
1457 
1458  if (buf_end - buf <= 64)
1459  return;
1460 
1461  bytestream2_init(&off0, buf, buf_end - buf);
1462  bytestream2_init(&off1, buf + 32, buf_end - (buf + 32));
1463  bytestream2_init_writer(&pb, dst, dst_size);
1464 
1465  dstpitch = flag ? (((w + 7) / 8) * bpp): 2;
1466 
1467  for (k = 0; k < bpp; k++) {
1468  poff0 = bytestream2_get_be32(&off0);
1469  poff1 = bytestream2_get_be32(&off1);
1470 
1471  if (!poff0)
1472  continue;
1473 
1474  if (2LL * poff0 >= buf_end - buf)
1475  return;
1476 
1477  if (2LL * poff1 >= buf_end - buf)
1478  return;
1479 
1480  bytestream2_init(&dgb, buf + 2 * poff0, buf_end - (buf + 2 * poff0));
1481  bytestream2_init(&ogb, buf + 2 * poff1, buf_end - (buf + 2 * poff1));
1482 
1483  while (bytestream2_peek_be16(&ogb) != 0xFFFF && bytestream2_get_bytes_left(&ogb) >= 4) {
1484  uint32_t offset = bytestream2_get_be16(&ogb);
1485  int16_t cnt = bytestream2_get_be16(&ogb);
1486  uint16_t data;
1487 
1488  offset = ((2 * offset) / planepitch_byte) * pitch + ((2 * offset) % planepitch_byte) + k * planepitch;
1489  if (cnt < 0) {
1490  if (bytestream2_get_bytes_left(&dgb) < 2)
1491  break;
1492  bytestream2_seek_p(&pb, offset, SEEK_SET);
1493  cnt = -cnt;
1494  data = bytestream2_get_be16(&dgb);
1495  for (i = 0; i < cnt; i++) {
1496  bytestream2_put_be16(&pb, data);
1497  bytestream2_skip_p(&pb, dstpitch - 2);
1498  }
1499  } else {
1500  if (bytestream2_get_bytes_left(&dgb) < 2*cnt)
1501  break;
1502  bytestream2_seek_p(&pb, offset, SEEK_SET);
1503  for (i = 0; i < cnt; i++) {
1504  data = bytestream2_get_be16(&dgb);
1505  bytestream2_put_be16(&pb, data);
1506  bytestream2_skip_p(&pb, dstpitch - 2);
1507  }
1508  }
1509  }
1510  }
1511 }
1512 
1513 static int unsupported(AVCodecContext *avctx)
1514 {
1515  IffContext *s = avctx->priv_data;
1516  avpriv_request_sample(avctx, "bitmap (compression 0x%0x, bpp %i, ham %i, interlaced %i)", s->compression, s->bpp, s->ham, s->is_interlaced);
1517  return AVERROR_INVALIDDATA;
1518 }
1519 
1520 static int decode_frame(AVCodecContext *avctx,
1521  void *data, int *got_frame,
1522  AVPacket *avpkt)
1523 {
1524  IffContext *s = avctx->priv_data;
1525  AVFrame *frame = data;
1526  const uint8_t *buf = avpkt->data;
1527  int buf_size = avpkt->size;
1528  const uint8_t *buf_end = buf + buf_size;
1529  int y, plane, res;
1530  GetByteContext *gb = &s->gb;
1531  const AVPixFmtDescriptor *desc;
1532 
1533  bytestream2_init(gb, avpkt->data, avpkt->size);
1534 
1535  if ((res = extract_header(avctx, avpkt)) < 0)
1536  return res;
1537 
1538  if ((res = ff_get_buffer(avctx, frame, 0)) < 0)
1539  return res;
1540  s->frame = frame;
1541 
1542  buf += bytestream2_tell(gb);
1543  buf_size -= bytestream2_tell(gb);
1544  desc = av_pix_fmt_desc_get(avctx->pix_fmt);
1545 
1546  if (!s->init && avctx->bits_per_coded_sample <= 8 - (s->masking == MASK_HAS_MASK) &&
1547  avctx->pix_fmt == AV_PIX_FMT_PAL8) {
1548  if ((res = cmap_read_palette(avctx, (uint32_t *)frame->data[1])) < 0)
1549  return res;
1550  } else if (!s->init && avctx->bits_per_coded_sample <= 8 &&
1551  avctx->pix_fmt == AV_PIX_FMT_RGB32) {
1552  if ((res = cmap_read_palette(avctx, s->mask_palbuf)) < 0)
1553  return res;
1554  }
1555  s->init = 1;
1556 
1557  if (s->compression <= 0xff && (avctx->codec_tag == MKTAG('A', 'N', 'I', 'M'))) {
1558  if (avctx->pix_fmt == AV_PIX_FMT_PAL8)
1559  memcpy(s->pal, s->frame->data[1], 256 * 4);
1560  }
1561 
1562  switch (s->compression) {
1563  case 0x0:
1564  if (avctx->codec_tag == MKTAG('A', 'C', 'B', 'M')) {
1565  if (avctx->pix_fmt == AV_PIX_FMT_PAL8 || avctx->pix_fmt == AV_PIX_FMT_GRAY8) {
1566  memset(frame->data[0], 0, avctx->height * frame->linesize[0]);
1567  for (plane = 0; plane < s->bpp; plane++) {
1568  for (y = 0; y < avctx->height && buf < buf_end; y++) {
1569  uint8_t *row = &frame->data[0][y * frame->linesize[0]];
1570  decodeplane8(row, buf, FFMIN(s->planesize, buf_end - buf), plane);
1571  buf += s->planesize;
1572  }
1573  }
1574  } else if (s->ham) { // HAM to AV_PIX_FMT_BGR32
1575  memset(frame->data[0], 0, avctx->height * frame->linesize[0]);
1576  for (y = 0; y < avctx->height; y++) {
1577  uint8_t *row = &frame->data[0][y * frame->linesize[0]];
1578  memset(s->ham_buf, 0, s->planesize * 8);
1579  for (plane = 0; plane < s->bpp; plane++) {
1580  const uint8_t * start = buf + (plane * avctx->height + y) * s->planesize;
1581  if (start >= buf_end)
1582  break;
1583  decodeplane8(s->ham_buf, start, FFMIN(s->planesize, buf_end - start), plane);
1584  }
1585  decode_ham_plane32((uint32_t *)row, s->ham_buf, s->ham_palbuf, s->planesize);
1586  }
1587  } else
1588  return unsupported(avctx);
1589  } else if (avctx->codec_tag == MKTAG('D', 'E', 'E', 'P')) {
1590  int raw_width = avctx->width * (av_get_bits_per_pixel(desc) >> 3);
1591  int x;
1592  for (y = 0; y < avctx->height && buf < buf_end; y++) {
1593  uint8_t *row = &frame->data[0][y * frame->linesize[0]];
1594  memcpy(row, buf, FFMIN(raw_width, buf_end - buf));
1595  buf += raw_width;
1596  if (avctx->pix_fmt == AV_PIX_FMT_BGR32) {
1597  for (x = 0; x < avctx->width; x++)
1598  row[4 * x + 3] = row[4 * x + 3] & 0xF0 | (row[4 * x + 3] >> 4);
1599  }
1600  }
1601  } else if (avctx->codec_tag == MKTAG('I', 'L', 'B', 'M') || // interleaved
1602  avctx->codec_tag == MKTAG('A', 'N', 'I', 'M')) {
1603  if (avctx->codec_tag == MKTAG('A', 'N', 'I', 'M'))
1604  memcpy(s->video[0], buf, FFMIN(buf_end - buf, s->video_size));
1605  if (avctx->pix_fmt == AV_PIX_FMT_PAL8 || avctx->pix_fmt == AV_PIX_FMT_GRAY8) {
1606  for (y = 0; y < avctx->height; y++) {
1607  uint8_t *row = &frame->data[0][y * frame->linesize[0]];
1608  memset(row, 0, avctx->width);
1609  for (plane = 0; plane < s->bpp && buf < buf_end; plane++) {
1610  decodeplane8(row, buf, FFMIN(s->planesize, buf_end - buf), plane);
1611  buf += s->planesize;
1612  }
1613  }
1614  } else if (s->ham) { // HAM to AV_PIX_FMT_BGR32
1615  for (y = 0; y < avctx->height; y++) {
1616  uint8_t *row = &frame->data[0][y * frame->linesize[0]];
1617  memset(s->ham_buf, 0, s->planesize * 8);
1618  for (plane = 0; plane < s->bpp && buf < buf_end; plane++) {
1619  decodeplane8(s->ham_buf, buf, FFMIN(s->planesize, buf_end - buf), plane);
1620  buf += s->planesize;
1621  }
1622  decode_ham_plane32((uint32_t *)row, s->ham_buf, s->ham_palbuf, s->planesize);
1623  }
1624  } else { // AV_PIX_FMT_BGR32
1625  for (y = 0; y < avctx->height; y++) {
1626  uint8_t *row = &frame->data[0][y * frame->linesize[0]];
1627  memset(row, 0, avctx->width << 2);
1628  for (plane = 0; plane < s->bpp && buf < buf_end; plane++) {
1629  decodeplane32((uint32_t *)row, buf,
1630  FFMIN(s->planesize, buf_end - buf), plane);
1631  buf += s->planesize;
1632  }
1633  }
1634  }
1635  } else if (avctx->codec_tag == MKTAG('P', 'B', 'M', ' ')) { // IFF-PBM
1636  if (avctx->pix_fmt == AV_PIX_FMT_PAL8 || avctx->pix_fmt == AV_PIX_FMT_GRAY8) {
1637  for (y = 0; y < avctx->height && buf_end > buf; y++) {
1638  uint8_t *row = &frame->data[0][y * frame->linesize[0]];
1639  memcpy(row, buf, FFMIN(avctx->width, buf_end - buf));
1640  buf += avctx->width + (avctx->width % 2); // padding if odd
1641  }
1642  } else if (s->ham) { // IFF-PBM: HAM to AV_PIX_FMT_BGR32
1643  for (y = 0; y < avctx->height && buf_end > buf; y++) {
1644  uint8_t *row = &frame->data[0][y * frame->linesize[0]];
1645  memcpy(s->ham_buf, buf, FFMIN(avctx->width, buf_end - buf));
1646  buf += avctx->width + (avctx->width & 1); // padding if odd
1647  decode_ham_plane32((uint32_t *)row, s->ham_buf, s->ham_palbuf, s->planesize);
1648  }
1649  } else
1650  return unsupported(avctx);
1651  } else {
1652  return unsupported(avctx);
1653  }
1654  break;
1655  case 0x1:
1656  if (avctx->codec_tag == MKTAG('I', 'L', 'B', 'M') || // interleaved
1657  avctx->codec_tag == MKTAG('A', 'N', 'I', 'M')) {
1658  if (avctx->pix_fmt == AV_PIX_FMT_PAL8 || avctx->pix_fmt == AV_PIX_FMT_GRAY8) {
1659  uint8_t *video = s->video[0];
1660 
1661  for (y = 0; y < avctx->height; y++) {
1662  uint8_t *row = &frame->data[0][y * frame->linesize[0]];
1663  memset(row, 0, avctx->width);
1664  for (plane = 0; plane < s->bpp; plane++) {
1665  buf += decode_byterun(s->planebuf, s->planesize, gb);
1666  if (avctx->codec_tag == MKTAG('A', 'N', 'I', 'M')) {
1667  memcpy(video, s->planebuf, s->planesize);
1668  video += s->planesize;
1669  }
1670  decodeplane8(row, s->planebuf, s->planesize, plane);
1671  }
1672  }
1673  } else if (avctx->bits_per_coded_sample <= 8) { //8-bit (+ mask) to AV_PIX_FMT_BGR32
1674  for (y = 0; y < avctx->height; y++) {
1675  uint8_t *row = &frame->data[0][y * frame->linesize[0]];
1676  memset(s->mask_buf, 0, avctx->width * sizeof(uint32_t));
1677  for (plane = 0; plane < s->bpp; plane++) {
1678  buf += decode_byterun(s->planebuf, s->planesize, gb);
1679  decodeplane32(s->mask_buf, s->planebuf, s->planesize, plane);
1680  }
1681  lookup_pal_indicies((uint32_t *)row, s->mask_buf, s->mask_palbuf, avctx->width);
1682  }
1683  } else if (s->ham) { // HAM to AV_PIX_FMT_BGR32
1684  uint8_t *video = s->video[0];
1685  for (y = 0; y < avctx->height; y++) {
1686  uint8_t *row = &frame->data[0][y * frame->linesize[0]];
1687  memset(s->ham_buf, 0, s->planesize * 8);
1688  for (plane = 0; plane < s->bpp; plane++) {
1689  buf += decode_byterun(s->planebuf, s->planesize, gb);
1690  if (avctx->codec_tag == MKTAG('A', 'N', 'I', 'M')) {
1691  memcpy(video, s->planebuf, s->planesize);
1692  video += s->planesize;
1693  }
1694  decodeplane8(s->ham_buf, s->planebuf, s->planesize, plane);
1695  }
1696  decode_ham_plane32((uint32_t *)row, s->ham_buf, s->ham_palbuf, s->planesize);
1697  }
1698  } else { // AV_PIX_FMT_BGR32
1699  for (y = 0; y < avctx->height; y++) {
1700  uint8_t *row = &frame->data[0][y * frame->linesize[0]];
1701  memset(row, 0, avctx->width << 2);
1702  for (plane = 0; plane < s->bpp; plane++) {
1703  buf += decode_byterun(s->planebuf, s->planesize, gb);
1704  decodeplane32((uint32_t *)row, s->planebuf, s->planesize, plane);
1705  }
1706  }
1707  }
1708  } else if (avctx->codec_tag == MKTAG('P', 'B', 'M', ' ')) { // IFF-PBM
1709  if (avctx->pix_fmt == AV_PIX_FMT_PAL8 || avctx->pix_fmt == AV_PIX_FMT_GRAY8) {
1710  for (y = 0; y < avctx->height; y++) {
1711  uint8_t *row = &frame->data[0][y * frame->linesize[0]];
1712  buf += decode_byterun(row, avctx->width, gb);
1713  }
1714  } else if (s->ham) { // IFF-PBM: HAM to AV_PIX_FMT_BGR32
1715  for (y = 0; y < avctx->height; y++) {
1716  uint8_t *row = &frame->data[0][y * frame->linesize[0]];
1717  buf += decode_byterun(s->ham_buf, avctx->width, gb);
1718  decode_ham_plane32((uint32_t *)row, s->ham_buf, s->ham_palbuf, s->planesize);
1719  }
1720  } else
1721  return unsupported(avctx);
1722  } else if (avctx->codec_tag == MKTAG('D', 'E', 'E', 'P')) { // IFF-DEEP
1723  if (av_get_bits_per_pixel(desc) == 32)
1724  decode_deep_rle32(frame->data[0], buf, buf_size, avctx->width, avctx->height, frame->linesize[0]);
1725  else
1726  return unsupported(avctx);
1727  } else if (avctx->codec_tag == MKTAG('A', 'C', 'B', 'M')) {
1728  if (avctx->pix_fmt == AV_PIX_FMT_PAL8 || avctx->pix_fmt == AV_PIX_FMT_GRAY8) {
1729  memset(frame->data[0], 0, avctx->height * frame->linesize[0]);
1730  for (plane = 0; plane < s->bpp; plane++) {
1731  for (y = 0; y < avctx->height && buf < buf_end; y++) {
1732  uint8_t *row = &frame->data[0][y * frame->linesize[0]];
1733  decodeplane8(row, buf, FFMIN(s->planesize, buf_end - buf), plane);
1734  buf += s->planesize;
1735  }
1736  }
1737  } else if (s->ham) { // HAM to AV_PIX_FMT_BGR32
1738  memset(frame->data[0], 0, avctx->height * frame->linesize[0]);
1739  for (y = 0; y < avctx->height; y++) {
1740  uint8_t *row = &frame->data[0][y * frame->linesize[0]];
1741  memset(s->ham_buf, 0, s->planesize * 8);
1742  for (plane = 0; plane < s->bpp; plane++) {
1743  const uint8_t * start = buf + (plane * avctx->height + y) * s->planesize;
1744  if (start >= buf_end)
1745  break;
1746  decodeplane8(s->ham_buf, start, FFMIN(s->planesize, buf_end - start), plane);
1747  }
1748  decode_ham_plane32((uint32_t *)row, s->ham_buf, s->ham_palbuf, s->planesize);
1749  }
1750  } else {
1751  return unsupported(avctx);
1752  }
1753  } else {
1754  return unsupported(avctx);
1755  }
1756  break;
1757  case 0x2:
1758  if (avctx->codec_tag == MKTAG('I', 'L', 'B', 'M') && avctx->pix_fmt == AV_PIX_FMT_PAL8) {
1759  for (plane = 0; plane < s->bpp; plane++) {
1760  decode_byterun2(s->planebuf, avctx->height, s->planesize, gb);
1761  for (y = 0; y < avctx->height; y++) {
1762  uint8_t *row = &frame->data[0][y * frame->linesize[0]];
1763  decodeplane8(row, s->planebuf + s->planesize * y, s->planesize, plane);
1764  }
1765  }
1766  } else {
1767  return unsupported(avctx);
1768  }
1769  break;
1770  case 0x4:
1771  if (avctx->codec_tag == MKTAG('R', 'G', 'B', '8') && avctx->pix_fmt == AV_PIX_FMT_RGB32)
1772  decode_rgb8(gb, frame->data[0], avctx->width, avctx->height, frame->linesize[0]);
1773  else if (avctx->codec_tag == MKTAG('R', 'G', 'B', 'N') && avctx->pix_fmt == AV_PIX_FMT_RGB444)
1774  decode_rgbn(gb, frame->data[0], avctx->width, avctx->height, frame->linesize[0]);
1775  else
1776  return unsupported(avctx);
1777  break;
1778  case 0x5:
1779  if (avctx->codec_tag == MKTAG('D', 'E', 'E', 'P')) {
1780  if (av_get_bits_per_pixel(desc) == 32)
1781  decode_deep_tvdc32(frame->data[0], buf, buf_size, avctx->width, avctx->height, frame->linesize[0], s->tvdc);
1782  else
1783  return unsupported(avctx);
1784  } else
1785  return unsupported(avctx);
1786  break;
1787  case 0x300:
1788  case 0x301:
1789  decode_short_horizontal_delta(s->video[0], buf, buf_end, avctx->width, s->bpp, s->video_size);
1790  break;
1791  case 0x500:
1792  case 0x501:
1793  decode_byte_vertical_delta(s->video[0], buf, buf_end, avctx->width, s->is_brush, s->bpp, s->video_size);
1794  break;
1795  case 0x700:
1796  case 0x701:
1797  if (s->is_short)
1798  decode_short_vertical_delta(s->video[0], buf, buf_end, avctx->width, s->bpp, s->video_size);
1799  else
1800  decode_long_vertical_delta(s->video[0], buf, buf_end, avctx->width, s->bpp, s->video_size);
1801  break;
1802  case 0x800:
1803  case 0x801:
1804  if (s->is_short)
1805  decode_short_vertical_delta2(s->video[0], buf, buf_end, avctx->width, s->bpp, s->video_size);
1806  else
1807  decode_long_vertical_delta2(s->video[0], buf, buf_end, avctx->width, s->bpp, s->video_size);
1808  break;
1809  case 0x4a00:
1810  case 0x4a01:
1811  decode_delta_j(s->video[0], buf, buf_end, avctx->width, avctx->height, s->bpp, s->video_size);
1812  break;
1813  case 0x6400:
1814  case 0x6401:
1815  if (s->is_interlaced)
1816  return unsupported(avctx);
1817  decode_delta_d(s->video[0], buf, buf_end, avctx->width, s->is_interlaced, s->bpp, s->video_size);
1818  break;
1819  case 0x6500:
1820  case 0x6501:
1821  if (s->is_interlaced)
1822  return unsupported(avctx);
1823  decode_delta_e(s->video[0], buf, buf_end, avctx->width, s->is_interlaced, s->bpp, s->video_size);
1824  break;
1825  case 0x6c00:
1826  case 0x6c01:
1827  decode_delta_l(s->video[0], buf, buf_end, avctx->width, s->is_short, s->bpp, s->video_size);
1828  break;
1829  default:
1830  return unsupported(avctx);
1831  }
1832 
1833  if (s->compression <= 0xff && (avctx->codec_tag == MKTAG('A', 'N', 'I', 'M'))) {
1834  memcpy(s->video[1], s->video[0], s->video_size);
1835  }
1836 
1837  if (s->compression > 0xff) {
1838  if (avctx->pix_fmt == AV_PIX_FMT_PAL8 || avctx->pix_fmt == AV_PIX_FMT_GRAY8) {
1839  buf = s->video[0];
1840  for (y = 0; y < avctx->height; y++) {
1841  uint8_t *row = &frame->data[0][y * frame->linesize[0]];
1842  memset(row, 0, avctx->width);
1843  for (plane = 0; plane < s->bpp; plane++) {
1844  decodeplane8(row, buf, s->planesize, plane);
1845  buf += s->planesize;
1846  }
1847  }
1848  if (avctx->pix_fmt == AV_PIX_FMT_PAL8)
1849  memcpy(frame->data[1], s->pal, 256 * 4);
1850  } else if (s->ham) {
1851  int i, count = 1 << s->ham;
1852 
1853  buf = s->video[0];
1854  memset(s->ham_palbuf, 0, (1 << s->ham) * 2 * sizeof(uint32_t));
1855  for (i = 0; i < count; i++) {
1856  s->ham_palbuf[i*2+1] = s->pal[i];
1857  }
1858  for (i = 0; i < count; i++) {
1859  uint32_t tmp = i << (8 - s->ham);
1860  tmp |= tmp >> s->ham;
1861  s->ham_palbuf[(i+count)*2] = 0xFF00FFFF;
1862  s->ham_palbuf[(i+count*2)*2] = 0xFFFFFF00;
1863  s->ham_palbuf[(i+count*3)*2] = 0xFFFF00FF;
1864  s->ham_palbuf[(i+count)*2+1] = 0xFF000000 | tmp << 16;
1865  s->ham_palbuf[(i+count*2)*2+1] = 0xFF000000 | tmp;
1866  s->ham_palbuf[(i+count*3)*2+1] = 0xFF000000 | tmp << 8;
1867  }
1868  if (s->masking == MASK_HAS_MASK) {
1869  for (i = 0; i < 8 * (1 << s->ham); i++)
1870  s->ham_palbuf[(1 << s->bpp) + i] = s->ham_palbuf[i] | 0xFF000000;
1871  }
1872  for (y = 0; y < avctx->height; y++) {
1873  uint8_t *row = &frame->data[0][y * frame->linesize[0]];
1874  memset(s->ham_buf, 0, s->planesize * 8);
1875  for (plane = 0; plane < s->bpp; plane++) {
1876  decodeplane8(s->ham_buf, buf, s->planesize, plane);
1877  buf += s->planesize;
1878  }
1879  decode_ham_plane32((uint32_t *)row, s->ham_buf, s->ham_palbuf, s->planesize);
1880  }
1881  } else {
1882  return unsupported(avctx);
1883  }
1884 
1885  if (!s->is_brush) {
1886  FFSWAP(uint8_t *, s->video[0], s->video[1]);
1887  }
1888  }
1889 
1890  if (avpkt->flags & AV_PKT_FLAG_KEY) {
1891  frame->key_frame = 1;
1892  frame->pict_type = AV_PICTURE_TYPE_I;
1893  } else {
1894  frame->key_frame = 0;
1895  frame->pict_type = AV_PICTURE_TYPE_P;
1896  }
1897 
1898  *got_frame = 1;
1899 
1900  return buf_size;
1901 }
1902 
1903 #if CONFIG_IFF_ILBM_DECODER
1905  .name = "iff",
1906  .long_name = NULL_IF_CONFIG_SMALL("IFF ACBM/ANIM/DEEP/ILBM/PBM/RGB8/RGBN"),
1907  .type = AVMEDIA_TYPE_VIDEO,
1908  .id = AV_CODEC_ID_IFF_ILBM,
1909  .priv_data_size = sizeof(IffContext),
1910  .init = decode_init,
1911  .close = decode_end,
1912  .decode = decode_frame,
1913  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
1914  .capabilities = AV_CODEC_CAP_DR1,
1915 };
1916 #endif
AVCodec
AVCodec.
Definition: avcodec.h:3481
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
IffContext::ham
unsigned ham
0 if non-HAM or number of hold bits (6 for bpp > 6, 4 otherwise)
Definition: iff.c:59
decode_init
static av_cold int decode_init(AVCodecContext *avctx)
Definition: iff.c:399
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
r
const char * r
Definition: vf_curves.c:114
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
MASK_HAS_TRANSPARENT_COLOR
@ MASK_HAS_TRANSPARENT_COLOR
Definition: iff.c:42
FFSWAP
#define FFSWAP(type, a, b)
Definition: common.h:99
AV_PIX_FMT_BGR32
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:362
GetByteContext
Definition: bytestream.h:33
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
decode_deep_tvdc32
static void decode_deep_tvdc32(uint8_t *dst, const uint8_t *src, int src_size, int width, int height, int linesize, const int16_t *tvdc)
Decode DEEP TVDC 32-bit buffer.
Definition: iff.c:768
MKTAG
#define MKTAG(a, b, c, d)
Definition: common.h:366
IffContext::flags
unsigned flags
1 for EHB, 0 is no extra half darkening
Definition: iff.c:60
count
void INT64 INT64 count
Definition: avisynth_c.h:767
decode_short_vertical_delta2
static void decode_short_vertical_delta2(uint8_t *dst, const uint8_t *buf, const uint8_t *buf_end, int w, int bpp, int dst_size)
Definition: iff.c:1193
decode_short_horizontal_delta
static void decode_short_horizontal_delta(uint8_t *dst, const uint8_t *buf, const uint8_t *buf_end, int w, int bpp, int dst_size)
Definition: iff.c:808
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:26
IffContext::video
uint8_t * video[2]
Definition: iff.c:66
w
uint8_t w
Definition: llviddspenc.c:38
plane8_lut
static const uint64_t plane8_lut[8][256]
Definition: iff.c:109
internal.h
AVPacket::data
uint8_t * data
Definition: avcodec.h:1477
b
#define b
Definition: input.c:41
data
const char data[16]
Definition: mxf.c:91
ff_iff_ilbm_decoder
AVCodec ff_iff_ilbm_decoder
bytestream2_size_p
static av_always_inline int bytestream2_size_p(PutByteContext *p)
Definition: bytestream.h:203
av_get_bits_per_pixel
int av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc)
Return the number of bits per pixel used by the pixel format described by pixdesc.
Definition: pixdesc.c:2474
bytestream2_get_bytes_left_p
static av_always_inline unsigned int bytestream2_get_bytes_left_p(PutByteContext *p)
Definition: bytestream.h:159
gray2rgb
static av_always_inline uint32_t gray2rgb(const uint32_t x)
Definition: iff.c:146
IffContext::mask_palbuf
uint32_t * mask_palbuf
masking palette table
Definition: iff.c:53
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1509
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
bytestream2_get_bytes_left
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:154
GETNIBBLE
#define GETNIBBLE
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:164
IffContext::video_size
unsigned video_size
Definition: iff.c:67
LUT32
#define LUT32(plane)
Definition: iff.c:114
AV_CODEC_ID_IFF_ILBM
@ AV_CODEC_ID_IFF_ILBM
Definition: avcodec.h:354
start
void INT64 start
Definition: avisynth_c.h:767
plane
int plane
Definition: avisynth_c.h:384
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
FFMIN3
#define FFMIN3(a, b, c)
Definition: common.h:97
IffContext::is_short
unsigned is_short
short compression method used
Definition: iff.c:55
src
#define src
Definition: vp8dsp.c:254
decode_byterun2
static int decode_byterun2(uint8_t *dst, int height, int line_size, GetByteContext *gb)
Definition: iff.c:582
decode_end
static av_cold int decode_end(AVCodecContext *avctx)
Definition: iff.c:385
lookup_pal_indicies
static void lookup_pal_indicies(uint32_t *dst, const uint32_t *buf, const uint32_t *const pal, unsigned width)
Definition: iff.c:538
first
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But first
Definition: rate_distortion.txt:12
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
buf
void * buf
Definition: avisynth_c.h:766
av_cold
#define av_cold
Definition: attributes.h:84
IffContext::mask_buf
uint32_t * mask_buf
temporary buffer for palette indices
Definition: iff.c:52
bytestream2_init_writer
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:143
mask
static const uint16_t mask[17]
Definition: lzw.c:38
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:42
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:1667
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:257
g
const char * g
Definition: vf_curves.c:115
AV_PIX_FMT_0BGR32
#define AV_PIX_FMT_0BGR32
Definition: pixfmt.h:365
decode_long_vertical_delta2
static void decode_long_vertical_delta2(uint8_t *dst, const uint8_t *buf, const uint8_t *buf_end, int w, int bpp, int dst_size)
Definition: iff.c:1253
GetByteContext::buffer
const uint8_t * buffer
Definition: bytestream.h:34
decode_deep_rle32
static void decode_deep_rle32(uint8_t *dst, const uint8_t *src, int src_size, int width, int height, int linesize)
Decode DEEP RLE 32-bit buffer.
Definition: iff.c:716
IffContext::init
int init
Definition: iff.c:63
int32_t
int32_t
Definition: audio_convert.c:194
if
if(ret)
Definition: filter_design.txt:179
NULL
#define NULL
Definition: coverity.c:32
decode_delta_j
static void decode_delta_j(uint8_t *dst, const uint8_t *buf, const uint8_t *buf_end, int w, int h, int bpp, int dst_size)
Definition: iff.c:928
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
cmds
static const char *const cmds[]
Definition: jacosubdec.c:72
pixel
uint8_t pixel
Definition: tiny_ssim.c:42
decode_rgb8
static void decode_rgb8(GetByteContext *gb, uint8_t *dst, int width, int height, int linesize)
Decode RGB8 buffer.
Definition: iff.c:679
extract_header
static int extract_header(AVCodecContext *const avctx, const AVPacket *const avpkt)
Extracts the IFF extra context and updates internal decoder structures.
Definition: iff.c:204
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
AV_RN32
#define AV_RN32(p)
Definition: intreadwrite.h:364
IffContext::planesize
int planesize
Definition: iff.c:48
mathops.h
decode_rgbn
static void decode_rgbn(GetByteContext *gb, uint8_t *dst, int width, int height, int linesize)
Decode RGBN buffer.
Definition: iff.c:696
bytestream2_get_buffer
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:263
decode_delta_l
static void decode_delta_l(uint8_t *dst, const uint8_t *buf, const uint8_t *buf_end, int w, int flag, int bpp, int dst_size)
Definition: iff.c:1446
LUT8
#define LUT8(plane)
Definition: iff.c:89
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
PutByteContext
Definition: bytestream.h:37
decodeplane8
static void decodeplane8(uint8_t *dst, const uint8_t *buf, int buf_size, int plane)
Decode interleaved plane buffer up to 8bpp.
Definition: iff.c:465
decode_byte_vertical_delta
static void decode_byte_vertical_delta(uint8_t *dst, const uint8_t *buf, const uint8_t *buf_end, int w, int xor, int bpp, int dst_size)
Definition: iff.c:861
desc
const char * desc
Definition: nvenc.c:68
decode_frame
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: iff.c:1520
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1965
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:981
AVPacket::size
int size
Definition: avcodec.h:1478
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
size
int size
Definition: twinvq_data.h:11134
MKBETAG
#define MKBETAG(a, b, c, d)
Definition: common.h:367
AV_RN64A
#define AV_RN64A(p)
Definition: intreadwrite.h:530
AV_RL24
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_RL24
Definition: bytestream.h:89
height
#define height
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:360
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1483
MASK_LASSO
@ MASK_LASSO
Definition: iff.c:43
IffContext::tvdc
int16_t tvdc[16]
TVDC lookup table.
Definition: iff.c:64
IffContext::is_interlaced
unsigned is_interlaced
video is interlaced
Definition: iff.c:56
IffContext::frame
AVFrame * frame
Definition: iff.c:47
flag
#define flag(name)
Definition: cbs_av1.c:557
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:2789
decode_delta_d
static void decode_delta_d(uint8_t *dst, const uint8_t *buf, const uint8_t *buf_end, int w, int flag, int bpp, int dst_size)
Definition: iff.c:1335
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1666
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
bytestream2_skip_p
static av_always_inline void bytestream2_skip_p(PutByteContext *p, unsigned int size)
Definition: bytestream.h:176
IffContext
Definition: iff.c:46
delta
float delta
Definition: vorbis_enc_data.h:457
av_le2ne32
#define av_le2ne32(x)
Definition: bswap.h:96
av_always_inline
#define av_always_inline
Definition: attributes.h:43
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
IffContext::masking
unsigned masking
TODO: masking method used.
Definition: iff.c:62
uint8_t
uint8_t
Definition: audio_convert.c:194
decode_byterun
static int decode_byterun(uint8_t *dst, int dst_size, GetByteContext *gb)
Decode one complete byterun1 encoded line.
Definition: iff.c:555
IffContext::gb
GetByteContext gb
Definition: iff.c:65
AVCodec::name
const char * name
Name of the codec implementation.
Definition: avcodec.h:3488
AVCodecContext::height
int height
Definition: avcodec.h:1738
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1775
avcodec.h
mask_type
mask_type
Definition: iff.c:39
decode_delta_e
static void decode_delta_e(uint8_t *dst, const uint8_t *buf, const uint8_t *buf_end, int w, int flag, int bpp, int dst_size)
Definition: iff.c:1391
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
cmap_read_palette
static int cmap_read_palette(AVCodecContext *avctx, uint32_t *pal)
Convert CMAP buffer (stored in extradata) to lavc palette format.
Definition: iff.c:153
IffContext::ham_buf
uint8_t * ham_buf
temporary buffer for planar to chunky conversation
Definition: iff.c:50
IffContext::planebuf
uint8_t * planebuf
Definition: iff.c:49
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
IffContext::bpp
unsigned bpp
bits per plane to decode (differs from bits_per_coded_sample if HAM)
Definition: iff.c:58
decode_ham_plane32
static void decode_ham_plane32(uint32_t *dst, const uint8_t *buf, const uint32_t *const pal, unsigned buf_size)
Converts one line of HAM6/8-encoded chunky buffer to 24bpp.
Definition: iff.c:523
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: avcodec.h:790
MASK_HAS_MASK
@ MASK_HAS_MASK
Definition: iff.c:41
AV_WN64A
#define AV_WN64A(p, v)
Definition: intreadwrite.h:542
AVCodecContext
main external API structure.
Definition: avcodec.h:1565
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:244
bytestream2_seek_p
static av_always_inline int bytestream2_seek_p(PutByteContext *p, int offset, int whence)
Definition: bytestream.h:232
video
A Quick Description Of Rate Distortion Theory We want to encode a video
Definition: rate_distortion.txt:3
IffContext::compression
unsigned compression
delta compression method used
Definition: iff.c:54
MASK_NONE
@ MASK_NONE
Definition: iff.c:40
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
IffContext::pal
uint32_t * pal
Definition: iff.c:68
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:39
unsupported
static int unsupported(AVCodecContext *avctx)
Definition: iff.c:1513
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:1590
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
AVPacket
This structure stores compressed data.
Definition: avcodec.h:1454
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:1592
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
IffContext::transparency
unsigned transparency
TODO: transparency color index in palette.
Definition: iff.c:61
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:1738
bytestream.h
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
IffContext::ham_palbuf
uint32_t * ham_palbuf
HAM decode table.
Definition: iff.c:51
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
length
const char int length
Definition: avisynth_c.h:860
h
h
Definition: vp9dsp_template.c:2038
DECODE_RGBX_COMMON
#define DECODE_RGBX_COMMON(type)
Definition: iff.c:652
IffContext::is_brush
unsigned is_brush
video is in ANBR format
Definition: iff.c:57
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:282
DECODE_HAM_PLANE32
#define DECODE_HAM_PLANE32(x)
Definition: iff.c:505
AV_RB24
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
Definition: bytestream.h:93
decode_short_vertical_delta
static void decode_short_vertical_delta(uint8_t *dst, const uint8_t *buf, const uint8_t *buf_end, int w, int bpp, int dst_size)
Definition: iff.c:1039
decodeplane32
static void decodeplane32(uint32_t *dst, const uint8_t *buf, int buf_size, int plane)
Decode interleaved plane buffer up to 24bpp.
Definition: iff.c:487
decode_long_vertical_delta
static void decode_long_vertical_delta(uint8_t *dst, const uint8_t *buf, const uint8_t *buf_end, int w, int bpp, int dst_size)
Definition: iff.c:1107
AV_RB16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
Definition: bytestream.h:94
plane32_lut
static const uint32_t plane32_lut[32][16 *4]
Definition: iff.c:134
AV_PIX_FMT_RGB444
#define AV_PIX_FMT_RGB444
Definition: pixfmt.h:376