FFmpeg
xan.c
Go to the documentation of this file.
1 /*
2  * Wing Commander/Xan Video Decoder
3  * Copyright (C) 2003 The FFmpeg project
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Xan video decoder for Wing Commander III computer game
25  * by Mario Brito (mbrito@student.dei.uc.pt)
26  * and Mike Melanson (melanson@pcisys.net)
27  *
28  * The xan_wc3 decoder outputs PAL8 data.
29  */
30 
31 #include <stdio.h>
32 #include <stdlib.h>
33 #include <string.h>
34 
35 #include "libavutil/intreadwrite.h"
36 #include "libavutil/mem.h"
37 
38 #define BITSTREAM_READER_LE
39 #include "avcodec.h"
40 #include "bytestream.h"
41 #include "get_bits.h"
42 #include "internal.h"
43 
44 #define RUNTIME_GAMMA 0
45 
46 #define VGA__TAG MKTAG('V', 'G', 'A', ' ')
47 #define PALT_TAG MKTAG('P', 'A', 'L', 'T')
48 #define SHOT_TAG MKTAG('S', 'H', 'O', 'T')
49 #define PALETTE_COUNT 256
50 #define PALETTE_SIZE (PALETTE_COUNT * 3)
51 #define PALETTES_MAX 256
52 
53 typedef struct XanContext {
54 
57 
58  const uint8_t *buf;
59  int size;
60 
61  /* scratch space */
62  uint8_t *buffer1;
64  uint8_t *buffer2;
66 
67  unsigned *palettes;
70 
72 
73 } XanContext;
74 
76 {
77  XanContext *s = avctx->priv_data;
78 
79  av_frame_free(&s->last_frame);
80 
81  av_freep(&s->buffer1);
82  av_freep(&s->buffer2);
83  av_freep(&s->palettes);
84 
85  return 0;
86 }
87 
89 {
90  XanContext *s = avctx->priv_data;
91 
92  s->avctx = avctx;
93  s->frame_size = 0;
94 
95  avctx->pix_fmt = AV_PIX_FMT_PAL8;
96 
97  s->buffer1_size = avctx->width * avctx->height;
98  s->buffer1 = av_malloc(s->buffer1_size);
99  if (!s->buffer1)
100  return AVERROR(ENOMEM);
101  s->buffer2_size = avctx->width * avctx->height;
102  s->buffer2 = av_malloc(s->buffer2_size + 130);
103  if (!s->buffer2)
104  return AVERROR(ENOMEM);
105 
106  s->last_frame = av_frame_alloc();
107  if (!s->last_frame)
108  return AVERROR(ENOMEM);
109 
110  return 0;
111 }
112 
113 static int xan_huffman_decode(uint8_t *dest, int dest_len,
114  const uint8_t *src, int src_len)
115 {
116  uint8_t byte = *src++;
117  uint8_t ival = byte + 0x16;
118  const uint8_t * ptr = src + byte*2;
119  int ptr_len = src_len - 1 - byte*2;
120  uint8_t val = ival;
121  uint8_t *dest_end = dest + dest_len;
122  uint8_t *dest_start = dest;
123  int ret;
124  GetBitContext gb;
125 
126  if ((ret = init_get_bits8(&gb, ptr, ptr_len)) < 0)
127  return ret;
128 
129  while (val != 0x16) {
130  unsigned idx;
131  if (get_bits_left(&gb) < 1)
132  return AVERROR_INVALIDDATA;
133  idx = val - 0x17 + get_bits1(&gb) * byte;
134  if (idx >= 2 * byte)
135  return AVERROR_INVALIDDATA;
136  val = src[idx];
137 
138  if (val < 0x16) {
139  if (dest >= dest_end)
140  return dest_len;
141  *dest++ = val;
142  val = ival;
143  }
144  }
145 
146  return dest - dest_start;
147 }
148 
149 /**
150  * unpack simple compression
151  *
152  * @param dest destination buffer of dest_len, must be padded with at least 130 bytes
153  */
154 static void xan_unpack(uint8_t *dest, int dest_len,
155  const uint8_t *src, int src_len)
156 {
157  uint8_t opcode;
158  int size;
159  uint8_t *dest_org = dest;
160  uint8_t *dest_end = dest + dest_len;
162 
163  bytestream2_init(&ctx, src, src_len);
164  while (dest < dest_end && bytestream2_get_bytes_left(&ctx)) {
165  opcode = bytestream2_get_byte(&ctx);
166 
167  if (opcode < 0xe0) {
168  int size2, back;
169  if ((opcode & 0x80) == 0) {
170  size = opcode & 3;
171 
172  back = ((opcode & 0x60) << 3) + bytestream2_get_byte(&ctx) + 1;
173  size2 = ((opcode & 0x1c) >> 2) + 3;
174  } else if ((opcode & 0x40) == 0) {
175  size = bytestream2_peek_byte(&ctx) >> 6;
176 
177  back = (bytestream2_get_be16(&ctx) & 0x3fff) + 1;
178  size2 = (opcode & 0x3f) + 4;
179  } else {
180  size = opcode & 3;
181 
182  back = ((opcode & 0x10) << 12) + bytestream2_get_be16(&ctx) + 1;
183  size2 = ((opcode & 0x0c) << 6) + bytestream2_get_byte(&ctx) + 5;
184  }
185 
186  if (dest_end - dest < size + size2 ||
187  dest + size - dest_org < back ||
189  return;
191  dest += size;
192  av_memcpy_backptr(dest, back, size2);
193  dest += size2;
194  } else {
195  int finish = opcode >= 0xfc;
196  size = finish ? opcode & 3 : ((opcode & 0x1f) << 2) + 4;
197 
198  if (dest_end - dest < size || bytestream2_get_bytes_left(&ctx) < size)
199  return;
201  dest += size;
202  if (finish)
203  return;
204  }
205  }
206 }
207 
209  const uint8_t *pixel_buffer, int x, int y, int pixel_count)
210 {
211  int stride;
212  int line_inc;
213  int index;
214  int current_x;
215  int width = s->avctx->width;
216  uint8_t *palette_plane;
217 
218  palette_plane = frame->data[0];
219  stride = frame->linesize[0];
220  line_inc = stride - width;
221  index = y * stride + x;
222  current_x = x;
223  while (pixel_count && index < s->frame_size) {
224  int count = FFMIN(pixel_count, width - current_x);
225  memcpy(palette_plane + index, pixel_buffer, count);
226  pixel_count -= count;
227  index += count;
228  pixel_buffer += count;
229  current_x += count;
230 
231  if (current_x >= width) {
232  index += line_inc;
233  current_x = 0;
234  }
235  }
236 }
237 
239  int x, int y,
240  int pixel_count, int motion_x,
241  int motion_y)
242 {
243  int stride;
244  int line_inc;
245  int curframe_index, prevframe_index;
246  int curframe_x, prevframe_x;
247  int width = s->avctx->width;
248  uint8_t *palette_plane, *prev_palette_plane;
249 
250  if (y + motion_y < 0 || y + motion_y >= s->avctx->height ||
251  x + motion_x < 0 || x + motion_x >= s->avctx->width)
252  return;
253 
254  palette_plane = frame->data[0];
255  prev_palette_plane = s->last_frame->data[0];
256  if (!prev_palette_plane)
257  prev_palette_plane = palette_plane;
258  stride = frame->linesize[0];
259  line_inc = stride - width;
260  curframe_index = y * stride + x;
261  curframe_x = x;
262  prevframe_index = (y + motion_y) * stride + x + motion_x;
263  prevframe_x = x + motion_x;
264 
265  if (prev_palette_plane == palette_plane && FFABS(motion_x + width*motion_y) < pixel_count) {
266  avpriv_request_sample(s->avctx, "Overlapping copy");
267  return ;
268  }
269 
270  while (pixel_count &&
271  curframe_index < s->frame_size &&
272  prevframe_index < s->frame_size) {
273  int count = FFMIN3(pixel_count, width - curframe_x,
274  width - prevframe_x);
275 
276  memcpy(palette_plane + curframe_index,
277  prev_palette_plane + prevframe_index, count);
278  pixel_count -= count;
279  curframe_index += count;
280  prevframe_index += count;
281  curframe_x += count;
282  prevframe_x += count;
283 
284  if (curframe_x >= width) {
285  curframe_index += line_inc;
286  curframe_x = 0;
287  }
288 
289  if (prevframe_x >= width) {
290  prevframe_index += line_inc;
291  prevframe_x = 0;
292  }
293  }
294 }
295 
297 {
298 
299  int width = s->avctx->width;
300  int height = s->avctx->height;
301  int total_pixels = width * height;
302  uint8_t opcode;
303  uint8_t flag = 0;
304  int size = 0;
305  int motion_x, motion_y;
306  int x, y, ret;
307 
308  uint8_t *opcode_buffer = s->buffer1;
309  uint8_t *opcode_buffer_end = s->buffer1 + s->buffer1_size;
310  int opcode_buffer_size = s->buffer1_size;
311  const uint8_t *imagedata_buffer = s->buffer2;
312 
313  /* pointers to segments inside the compressed chunk */
314  const uint8_t *huffman_segment;
315  GetByteContext size_segment;
316  GetByteContext vector_segment;
317  const uint8_t *imagedata_segment;
318  int huffman_offset, size_offset, vector_offset, imagedata_offset,
319  imagedata_size;
320 
321  if (s->size < 8)
322  return AVERROR_INVALIDDATA;
323 
324  huffman_offset = AV_RL16(&s->buf[0]);
325  size_offset = AV_RL16(&s->buf[2]);
326  vector_offset = AV_RL16(&s->buf[4]);
327  imagedata_offset = AV_RL16(&s->buf[6]);
328 
329  if (huffman_offset >= s->size ||
330  size_offset >= s->size ||
331  vector_offset >= s->size ||
332  imagedata_offset >= s->size)
333  return AVERROR_INVALIDDATA;
334 
335  huffman_segment = s->buf + huffman_offset;
336  bytestream2_init(&size_segment, s->buf + size_offset, s->size - size_offset);
337  bytestream2_init(&vector_segment, s->buf + vector_offset, s->size - vector_offset);
338  imagedata_segment = s->buf + imagedata_offset;
339 
340  if ((ret = xan_huffman_decode(opcode_buffer, opcode_buffer_size,
341  huffman_segment, s->size - huffman_offset)) < 0)
342  return AVERROR_INVALIDDATA;
343  opcode_buffer_end = opcode_buffer + ret;
344 
345  if (imagedata_segment[0] == 2) {
346  xan_unpack(s->buffer2, s->buffer2_size,
347  &imagedata_segment[1], s->size - imagedata_offset - 1);
348  imagedata_size = s->buffer2_size;
349  } else {
350  imagedata_size = s->size - imagedata_offset - 1;
351  imagedata_buffer = &imagedata_segment[1];
352  }
353 
354  /* use the decoded data segments to build the frame */
355  x = y = 0;
356  while (total_pixels && opcode_buffer < opcode_buffer_end) {
357 
358  opcode = *opcode_buffer++;
359  size = 0;
360 
361  switch (opcode) {
362 
363  case 0:
364  flag ^= 1;
365  continue;
366 
367  case 1:
368  case 2:
369  case 3:
370  case 4:
371  case 5:
372  case 6:
373  case 7:
374  case 8:
375  size = opcode;
376  break;
377 
378  case 12:
379  case 13:
380  case 14:
381  case 15:
382  case 16:
383  case 17:
384  case 18:
385  size += (opcode - 10);
386  break;
387 
388  case 9:
389  case 19:
390  if (bytestream2_get_bytes_left(&size_segment) < 1) {
391  av_log(s->avctx, AV_LOG_ERROR, "size_segment overread\n");
392  return AVERROR_INVALIDDATA;
393  }
394  size = bytestream2_get_byte(&size_segment);
395  break;
396 
397  case 10:
398  case 20:
399  if (bytestream2_get_bytes_left(&size_segment) < 2) {
400  av_log(s->avctx, AV_LOG_ERROR, "size_segment overread\n");
401  return AVERROR_INVALIDDATA;
402  }
403  size = bytestream2_get_be16(&size_segment);
404  break;
405 
406  case 11:
407  case 21:
408  if (bytestream2_get_bytes_left(&size_segment) < 3) {
409  av_log(s->avctx, AV_LOG_ERROR, "size_segment overread\n");
410  return AVERROR_INVALIDDATA;
411  }
412  size = bytestream2_get_be24(&size_segment);
413  break;
414  }
415 
416  if (size > total_pixels)
417  break;
418 
419  if (opcode < 12) {
420  flag ^= 1;
421  if (flag) {
422  /* run of (size) pixels is unchanged from last frame */
423  xan_wc3_copy_pixel_run(s, frame, x, y, size, 0, 0);
424  } else {
425  /* output a run of pixels from imagedata_buffer */
426  if (imagedata_size < size)
427  break;
428  xan_wc3_output_pixel_run(s, frame, imagedata_buffer, x, y, size);
429  imagedata_buffer += size;
430  imagedata_size -= size;
431  }
432  } else {
433  uint8_t vector;
434  if (bytestream2_get_bytes_left(&vector_segment) <= 0) {
435  av_log(s->avctx, AV_LOG_ERROR, "vector_segment overread\n");
436  return AVERROR_INVALIDDATA;
437  }
438  /* run-based motion compensation from last frame */
439  vector = bytestream2_get_byte(&vector_segment);
440  motion_x = sign_extend(vector >> 4, 4);
441  motion_y = sign_extend(vector & 0xF, 4);
442 
443  /* copy a run of pixels from the previous frame */
444  xan_wc3_copy_pixel_run(s, frame, x, y, size, motion_x, motion_y);
445 
446  flag = 0;
447  }
448 
449  /* coordinate accounting */
450  total_pixels -= size;
451  y += (x + size) / width;
452  x = (x + size) % width;
453  }
454  return 0;
455 }
456 
457 #if RUNTIME_GAMMA
458 static inline unsigned mul(unsigned a, unsigned b)
459 {
460  return (a * b) >> 16;
461 }
462 
463 static inline unsigned pow4(unsigned a)
464 {
465  unsigned square = mul(a, a);
466  return mul(square, square);
467 }
468 
469 static inline unsigned pow5(unsigned a)
470 {
471  return mul(pow4(a), a);
472 }
473 
474 static uint8_t gamma_corr(uint8_t in) {
475  unsigned lo, hi = 0xff40, target;
476  int i = 15;
477  in = (in << 2) | (in >> 6);
478  /* equivalent float code:
479  if (in >= 252)
480  return 253;
481  return round(pow(in / 256.0, 0.8) * 256);
482  */
483  lo = target = in << 8;
484  do {
485  unsigned mid = (lo + hi) >> 1;
486  unsigned pow = pow5(mid);
487  if (pow > target) hi = mid;
488  else lo = mid;
489  } while (--i);
490  return (pow4((lo + hi) >> 1) + 0x80) >> 8;
491 }
492 #else
493 /**
494  * This is a gamma correction that xan3 applies to all palette entries.
495  *
496  * There is a peculiarity, namely that the values are clamped to 253 -
497  * it seems likely that this table was calculated by a buggy fixed-point
498  * implementation, the one above under RUNTIME_GAMMA behaves like this for
499  * example.
500  * The exponent value of 0.8 can be explained by this as well, since 0.8 = 4/5
501  * and thus pow(x, 0.8) is still easy to calculate.
502  * Also, the input values are first rotated to the left by 2.
503  */
504 static const uint8_t gamma_lookup[256] = {
505  0x00, 0x09, 0x10, 0x16, 0x1C, 0x21, 0x27, 0x2C,
506  0x31, 0x35, 0x3A, 0x3F, 0x43, 0x48, 0x4C, 0x50,
507  0x54, 0x59, 0x5D, 0x61, 0x65, 0x69, 0x6D, 0x71,
508  0x75, 0x79, 0x7D, 0x80, 0x84, 0x88, 0x8C, 0x8F,
509  0x93, 0x97, 0x9A, 0x9E, 0xA2, 0xA5, 0xA9, 0xAC,
510  0xB0, 0xB3, 0xB7, 0xBA, 0xBE, 0xC1, 0xC5, 0xC8,
511  0xCB, 0xCF, 0xD2, 0xD5, 0xD9, 0xDC, 0xDF, 0xE3,
512  0xE6, 0xE9, 0xED, 0xF0, 0xF3, 0xF6, 0xFA, 0xFD,
513  0x03, 0x0B, 0x12, 0x18, 0x1D, 0x23, 0x28, 0x2D,
514  0x32, 0x36, 0x3B, 0x40, 0x44, 0x49, 0x4D, 0x51,
515  0x56, 0x5A, 0x5E, 0x62, 0x66, 0x6A, 0x6E, 0x72,
516  0x76, 0x7A, 0x7D, 0x81, 0x85, 0x89, 0x8D, 0x90,
517  0x94, 0x98, 0x9B, 0x9F, 0xA2, 0xA6, 0xAA, 0xAD,
518  0xB1, 0xB4, 0xB8, 0xBB, 0xBF, 0xC2, 0xC5, 0xC9,
519  0xCC, 0xD0, 0xD3, 0xD6, 0xDA, 0xDD, 0xE0, 0xE4,
520  0xE7, 0xEA, 0xED, 0xF1, 0xF4, 0xF7, 0xFA, 0xFD,
521  0x05, 0x0D, 0x13, 0x19, 0x1F, 0x24, 0x29, 0x2E,
522  0x33, 0x38, 0x3C, 0x41, 0x45, 0x4A, 0x4E, 0x52,
523  0x57, 0x5B, 0x5F, 0x63, 0x67, 0x6B, 0x6F, 0x73,
524  0x77, 0x7B, 0x7E, 0x82, 0x86, 0x8A, 0x8D, 0x91,
525  0x95, 0x99, 0x9C, 0xA0, 0xA3, 0xA7, 0xAA, 0xAE,
526  0xB2, 0xB5, 0xB9, 0xBC, 0xBF, 0xC3, 0xC6, 0xCA,
527  0xCD, 0xD0, 0xD4, 0xD7, 0xDA, 0xDE, 0xE1, 0xE4,
528  0xE8, 0xEB, 0xEE, 0xF1, 0xF5, 0xF8, 0xFB, 0xFD,
529  0x07, 0x0E, 0x15, 0x1A, 0x20, 0x25, 0x2A, 0x2F,
530  0x34, 0x39, 0x3D, 0x42, 0x46, 0x4B, 0x4F, 0x53,
531  0x58, 0x5C, 0x60, 0x64, 0x68, 0x6C, 0x70, 0x74,
532  0x78, 0x7C, 0x7F, 0x83, 0x87, 0x8B, 0x8E, 0x92,
533  0x96, 0x99, 0x9D, 0xA1, 0xA4, 0xA8, 0xAB, 0xAF,
534  0xB2, 0xB6, 0xB9, 0xBD, 0xC0, 0xC4, 0xC7, 0xCB,
535  0xCE, 0xD1, 0xD5, 0xD8, 0xDB, 0xDF, 0xE2, 0xE5,
536  0xE9, 0xEC, 0xEF, 0xF2, 0xF6, 0xF9, 0xFC, 0xFD
537 };
538 #endif
539 
541  void *data, int *got_frame,
542  AVPacket *avpkt)
543 {
544  AVFrame *frame = data;
545  const uint8_t *buf = avpkt->data;
546  int ret, buf_size = avpkt->size;
547  XanContext *s = avctx->priv_data;
549  int tag = 0;
550 
551  bytestream2_init(&ctx, buf, buf_size);
552  while (bytestream2_get_bytes_left(&ctx) > 8 && tag != VGA__TAG) {
553  unsigned *tmpptr;
554  uint32_t new_pal;
555  int size;
556  int i;
557  tag = bytestream2_get_le32(&ctx);
558  size = bytestream2_get_be32(&ctx);
559  if (size < 0) {
560  av_log(avctx, AV_LOG_ERROR, "Invalid tag size %d\n", size);
561  return AVERROR_INVALIDDATA;
562  }
564  switch (tag) {
565  case PALT_TAG:
566  if (size < PALETTE_SIZE)
567  return AVERROR_INVALIDDATA;
568  if (s->palettes_count >= PALETTES_MAX)
569  return AVERROR_INVALIDDATA;
570  tmpptr = av_realloc_array(s->palettes,
571  s->palettes_count + 1, AVPALETTE_SIZE);
572  if (!tmpptr)
573  return AVERROR(ENOMEM);
574  s->palettes = tmpptr;
575  tmpptr += s->palettes_count * AVPALETTE_COUNT;
576  for (i = 0; i < PALETTE_COUNT; i++) {
577 #if RUNTIME_GAMMA
578  int r = gamma_corr(bytestream2_get_byteu(&ctx));
579  int g = gamma_corr(bytestream2_get_byteu(&ctx));
580  int b = gamma_corr(bytestream2_get_byteu(&ctx));
581 #else
582  int r = gamma_lookup[bytestream2_get_byteu(&ctx)];
583  int g = gamma_lookup[bytestream2_get_byteu(&ctx)];
584  int b = gamma_lookup[bytestream2_get_byteu(&ctx)];
585 #endif
586  *tmpptr++ = (0xFFU << 24) | (r << 16) | (g << 8) | b;
587  }
588  s->palettes_count++;
589  break;
590  case SHOT_TAG:
591  if (size < 4)
592  return AVERROR_INVALIDDATA;
593  new_pal = bytestream2_get_le32(&ctx);
594  if (new_pal < s->palettes_count) {
595  s->cur_palette = new_pal;
596  } else
597  av_log(avctx, AV_LOG_ERROR, "Invalid palette selected\n");
598  break;
599  case VGA__TAG:
600  break;
601  default:
603  break;
604  }
605  }
606  buf_size = bytestream2_get_bytes_left(&ctx);
607 
608  if (s->palettes_count <= 0) {
609  av_log(s->avctx, AV_LOG_ERROR, "No palette found\n");
610  return AVERROR_INVALIDDATA;
611  }
612 
613  if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
614  return ret;
615 
616  if (!s->frame_size)
617  s->frame_size = frame->linesize[0] * s->avctx->height;
618 
619  memcpy(frame->data[1],
620  s->palettes + s->cur_palette * AVPALETTE_COUNT, AVPALETTE_SIZE);
621 
622  s->buf = ctx.buffer;
623  s->size = buf_size;
624 
625  if (xan_wc3_decode_frame(s, frame) < 0)
626  return AVERROR_INVALIDDATA;
627 
628  av_frame_unref(s->last_frame);
629  if ((ret = av_frame_ref(s->last_frame, frame)) < 0)
630  return ret;
631 
632  *got_frame = 1;
633 
634  /* always report that the buffer was completely consumed */
635  return buf_size;
636 }
637 
639  .name = "xan_wc3",
640  .long_name = NULL_IF_CONFIG_SMALL("Wing Commander III / Xan"),
641  .type = AVMEDIA_TYPE_VIDEO,
642  .id = AV_CODEC_ID_XAN_WC3,
643  .priv_data_size = sizeof(XanContext),
645  .close = xan_decode_end,
647  .capabilities = AV_CODEC_CAP_DR1,
649 };
gamma_lookup
static const uint8_t gamma_lookup[256]
This is a gamma correction that xan3 applies to all palette entries.
Definition: xan.c:504
AVCodec
AVCodec.
Definition: codec.h:202
stride
int stride
Definition: mace.c:144
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:42
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:850
r
const char * r
Definition: vf_curves.c:116
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
GetByteContext
Definition: bytestream.h:33
XanContext::last_frame
AVFrame * last_frame
Definition: xan.c:56
xan_wc3_copy_pixel_run
static void xan_wc3_copy_pixel_run(XanContext *s, AVFrame *frame, int x, int y, int pixel_count, int motion_x, int motion_y)
Definition: xan.c:238
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:109
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
index
fg index
Definition: ffmpeg_filter.c:167
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:373
b
#define b
Definition: input.c:40
data
const char data[16]
Definition: mxf.c:143
XanContext::buffer1
uint8_t * buffer1
Definition: xan.c:62
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
init
static int init
Definition: av_tx.c:47
xan_unpack
static void xan_unpack(uint8_t *dest, int dest_len, const uint8_t *src, int src_len)
unpack simple compression
Definition: xan.c:154
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
return
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a it should return
Definition: filter_design.txt:264
finish
static void finish(void)
Definition: movenc.c:342
U
#define U(x)
Definition: vp56_arith.h:37
GetBitContext
Definition: get_bits.h:62
SHOT_TAG
#define SHOT_TAG
Definition: xan.c:48
val
static double val(void *priv, double ch)
Definition: aeval.c:76
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:97
xan_decode_end
static av_cold int xan_decode_end(AVCodecContext *avctx)
Definition: xan.c:75
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
XanContext::buf
const uint8_t * buf
Definition: xan.c:58
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:678
ff_xan_wc3_decoder
const AVCodec ff_xan_wc3_decoder
Definition: xan.c:638
av_memcpy_backptr
void av_memcpy_backptr(uint8_t *dst, int back, int cnt)
Overlapping memcpy() implementation.
Definition: mem.c:454
XanContext::palettes
unsigned * palettes
Definition: xan.c:67
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
width
#define width
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:257
av_realloc_array
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Definition: mem.c:224
g
const char * g
Definition: vf_curves.c:117
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:361
frame_size
int frame_size
Definition: mxfenc.c:2199
XanContext::frame_size
int frame_size
Definition: xan.c:71
ctx
AVFormatContext * ctx
Definition: movenc.c:48
get_bits.h
AV_RL16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:94
mul
static float mul(float src0, float src1)
Definition: dnn_backend_native_layer_mathbinary.c:39
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:65
AVPALETTE_SIZE
#define AVPALETTE_SIZE
Definition: pixfmt.h:32
PALETTE_COUNT
#define PALETTE_COUNT
Definition: xan.c:49
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:499
src
#define src
Definition: vp8dsp.c:255
XanContext::buffer2_size
int buffer2_size
Definition: xan.c:65
bytestream2_get_buffer
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:267
PALT_TAG
#define PALT_TAG
Definition: xan.c:47
VGA__TAG
#define VGA__TAG
Definition: xan.c:46
AVPALETTE_COUNT
#define AVPALETTE_COUNT
Definition: pixfmt.h:33
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1652
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:374
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
byte
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_WB16 unsigned int_TMPL byte
Definition: bytestream.h:99
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:325
xan_decode_frame
static int xan_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: xan.c:540
size
int size
Definition: twinvq_data.h:10344
XanContext::avctx
AVCodecContext * avctx
Definition: xan.c:55
PALETTE_SIZE
#define PALETTE_SIZE
Definition: xan.c:50
xan_huffman_decode
static int xan_huffman_decode(uint8_t *dest, int dest_len, const uint8_t *src, int src_len)
Definition: xan.c:113
xan_decode_init
static av_cold int xan_decode_init(AVCodecContext *avctx)
Definition: xan.c:88
height
#define height
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
AV_CODEC_ID_XAN_WC3
@ AV_CODEC_ID_XAN_WC3
Definition: codec_id.h:90
flag
#define flag(name)
Definition: cbs_av1.c:553
XanContext::size
int size
Definition: xan.c:59
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
XanContext
Definition: xan.c:53
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:50
FFMIN3
#define FFMIN3(a, b, c)
Definition: macros.h:50
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:435
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:209
AVCodecContext::height
int height
Definition: avcodec.h:556
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:593
avcodec.h
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
tag
uint32_t tag
Definition: movenc.c:1596
ret
ret
Definition: filter_design.txt:187
xan_wc3_decode_frame
static int xan_wc3_decode_frame(XanContext *s, AVFrame *frame)
Definition: xan.c:296
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
square
static int square(int x)
Definition: roqvideoenc.c:195
XanContext::buffer1_size
int buffer1_size
Definition: xan.c:63
AVCodecContext
main external API structure.
Definition: avcodec.h:383
xan_wc3_output_pixel_run
static void xan_wc3_output_pixel_run(XanContext *s, AVFrame *frame, const uint8_t *pixel_buffer, int x, int y, int pixel_count)
Definition: xan.c:208
sign_extend
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:130
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
mem.h
XanContext::cur_palette
int cur_palette
Definition: xan.c:69
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:37
AVPacket
This structure stores compressed data.
Definition: packet.h:350
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:410
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
XanContext::palettes_count
int palettes_count
Definition: xan.c:68
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:556
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
XanContext::buffer2
uint8_t * buffer2
Definition: xan.c:64
PALETTES_MAX
#define PALETTES_MAX
Definition: xan.c:51