FFmpeg
fic.c
Go to the documentation of this file.
1 /*
2  * Mirillis FIC decoder
3  *
4  * Copyright (c) 2014 Konstantin Shishkov
5  * Copyright (c) 2014 Derek Buitenhuis
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include "libavutil/common.h"
25 #include "libavutil/opt.h"
26 #include "avcodec.h"
27 #include "internal.h"
28 #include "get_bits.h"
29 #include "golomb.h"
30 
31 typedef struct FICThreadContext {
32  DECLARE_ALIGNED(16, int16_t, block)[64];
34  int slice_h;
35  int src_size;
36  int y_off;
37  int p_frame;
39 
40 typedef struct FICContext {
41  AVClass *class;
45 
48 
49  const uint8_t *qmat;
50 
51  enum AVPictureType cur_frame_type;
52 
53  int aligned_width, aligned_height;
54  int num_slices, slice_h;
55 
56  uint8_t cursor_buf[4096];
58 } FICContext;
59 
60 static const uint8_t fic_qmat_hq[64] = {
61  1, 2, 2, 2, 3, 3, 3, 4,
62  2, 2, 2, 3, 3, 3, 4, 4,
63  2, 2, 3, 3, 3, 4, 4, 4,
64  2, 2, 3, 3, 3, 4, 4, 5,
65  2, 3, 3, 3, 4, 4, 5, 6,
66  3, 3, 3, 4, 4, 5, 6, 7,
67  3, 3, 3, 4, 4, 5, 7, 7,
68  3, 3, 4, 4, 5, 7, 7, 7,
69 };
70 
71 static const uint8_t fic_qmat_lq[64] = {
72  1, 5, 6, 7, 8, 9, 9, 11,
73  5, 5, 7, 8, 9, 9, 11, 12,
74  6, 7, 8, 9, 9, 11, 11, 12,
75  7, 7, 8, 9, 9, 11, 12, 13,
76  7, 8, 9, 9, 10, 11, 13, 16,
77  8, 9, 9, 10, 11, 13, 16, 19,
78  8, 9, 9, 11, 12, 15, 18, 23,
79  9, 9, 11, 12, 15, 18, 23, 27
80 };
81 
82 static const uint8_t fic_header[7] = { 0, 0, 1, 'F', 'I', 'C', 'V' };
83 
84 #define FIC_HEADER_SIZE 27
85 #define CURSOR_OFFSET 59
86 
87 static av_always_inline void fic_idct(int16_t *blk, int step, int shift, int rnd)
88 {
89  const unsigned t0 = 27246 * blk[3 * step] + 18405 * blk[5 * step];
90  const unsigned t1 = 27246 * blk[5 * step] - 18405 * blk[3 * step];
91  const unsigned t2 = 6393 * blk[7 * step] + 32139 * blk[1 * step];
92  const unsigned t3 = 6393 * blk[1 * step] - 32139 * blk[7 * step];
93  const unsigned t4 = 5793U * ((int)(t2 + t0 + 0x800) >> 12);
94  const unsigned t5 = 5793U * ((int)(t3 + t1 + 0x800) >> 12);
95  const unsigned t6 = t2 - t0;
96  const unsigned t7 = t3 - t1;
97  const unsigned t8 = 17734 * blk[2 * step] - 42813 * blk[6 * step];
98  const unsigned t9 = 17734 * blk[6 * step] + 42814 * blk[2 * step];
99  const unsigned tA = (blk[0 * step] - blk[4 * step]) * 32768 + rnd;
100  const unsigned tB = (blk[0 * step] + blk[4 * step]) * 32768 + rnd;
101  blk[0 * step] = (int)( t4 + t9 + tB) >> shift;
102  blk[1 * step] = (int)( t6 + t7 + t8 + tA) >> shift;
103  blk[2 * step] = (int)( t6 - t7 - t8 + tA) >> shift;
104  blk[3 * step] = (int)( t5 - t9 + tB) >> shift;
105  blk[4 * step] = (int)( -t5 - t9 + tB) >> shift;
106  blk[5 * step] = (int)(-(t6 - t7) - t8 + tA) >> shift;
107  blk[6 * step] = (int)(-(t6 + t7) + t8 + tA) >> shift;
108  blk[7 * step] = (int)( -t4 + t9 + tB) >> shift;
109 }
110 
111 static void fic_idct_put(uint8_t *dst, int stride, int16_t *block)
112 {
113  int i, j;
114  int16_t *ptr;
115 
116  ptr = block;
117  fic_idct(ptr++, 8, 13, (1 << 12) + (1 << 17));
118  for (i = 1; i < 8; i++) {
119  fic_idct(ptr, 8, 13, 1 << 12);
120  ptr++;
121  }
122 
123  ptr = block;
124  for (i = 0; i < 8; i++) {
125  fic_idct(ptr, 1, 20, 0);
126  ptr += 8;
127  }
128 
129  ptr = block;
130  for (j = 0; j < 8; j++) {
131  for (i = 0; i < 8; i++)
132  dst[i] = av_clip_uint8(ptr[i]);
133  dst += stride;
134  ptr += 8;
135  }
136 }
138  uint8_t *dst, int stride, int16_t *block, int *is_p)
139 {
140  int i, num_coeff;
141 
142  if (get_bits_left(gb) < 8)
143  return AVERROR_INVALIDDATA;
144 
145  /* Is it a skip block? */
146  if (get_bits1(gb)) {
147  *is_p = 1;
148  return 0;
149  }
150 
151  memset(block, 0, sizeof(*block) * 64);
152 
153  num_coeff = get_bits(gb, 7);
154  if (num_coeff > 64)
155  return AVERROR_INVALIDDATA;
156 
157  for (i = 0; i < num_coeff; i++) {
158  int v = get_se_golomb(gb);
159  if (v < -2048 || v > 2048)
160  return AVERROR_INVALIDDATA;
161  block[ff_zigzag_direct[i]] = v *
162  ctx->qmat[ff_zigzag_direct[i]];
163  }
164 
165  fic_idct_put(dst, stride, block);
166 
167  return 0;
168 }
169 
170 static int fic_decode_slice(AVCodecContext *avctx, void *tdata)
171 {
172  FICContext *ctx = avctx->priv_data;
173  FICThreadContext *tctx = tdata;
174  GetBitContext gb;
175  uint8_t *src = tctx->src;
176  int slice_h = tctx->slice_h;
177  int src_size = tctx->src_size;
178  int y_off = tctx->y_off;
179  int x, y, p, ret;
180 
181  ret = init_get_bits8(&gb, src, src_size);
182  if (ret < 0)
183  return ret;
184 
185  for (p = 0; p < 3; p++) {
186  int stride = ctx->frame->linesize[p];
187  uint8_t* dst = ctx->frame->data[p] + (y_off >> !!p) * stride;
188 
189  for (y = 0; y < (slice_h >> !!p); y += 8) {
190  for (x = 0; x < (ctx->aligned_width >> !!p); x += 8) {
191  int ret;
192 
193  if ((ret = fic_decode_block(ctx, &gb, dst + x, stride,
194  tctx->block, &tctx->p_frame)) != 0)
195  return ret;
196  }
197 
198  dst += 8 * stride;
199  }
200  }
201 
202  return 0;
203 }
204 
206  int size, uint8_t *alpha)
207 {
208  int i;
209 
210  for (i = 0; i < size; i++)
211  dst[i] += ((src[i] - dst[i]) * alpha[i]) >> 8;
212 }
213 
214 static void fic_draw_cursor(AVCodecContext *avctx, int cur_x, int cur_y)
215 {
216  FICContext *ctx = avctx->priv_data;
217  uint8_t *ptr = ctx->cursor_buf;
218  uint8_t *dstptr[3];
219  uint8_t planes[4][1024];
220  uint8_t chroma[3][256];
221  int i, j, p;
222 
223  /* Convert to YUVA444. */
224  for (i = 0; i < 1024; i++) {
225  planes[0][i] = (( 25 * ptr[0] + 129 * ptr[1] + 66 * ptr[2]) / 255) + 16;
226  planes[1][i] = ((-38 * ptr[0] + 112 * ptr[1] + -74 * ptr[2]) / 255) + 128;
227  planes[2][i] = ((-18 * ptr[0] + 112 * ptr[1] + -94 * ptr[2]) / 255) + 128;
228  planes[3][i] = ptr[3];
229 
230  ptr += 4;
231  }
232 
233  /* Subsample chroma. */
234  for (i = 0; i < 32; i += 2)
235  for (j = 0; j < 32; j += 2)
236  for (p = 0; p < 3; p++)
237  chroma[p][16 * (i / 2) + j / 2] = (planes[p + 1][32 * i + j ] +
238  planes[p + 1][32 * i + j + 1] +
239  planes[p + 1][32 * (i + 1) + j ] +
240  planes[p + 1][32 * (i + 1) + j + 1]) / 4;
241 
242  /* Seek to x/y pos of cursor. */
243  for (i = 0; i < 3; i++)
244  dstptr[i] = ctx->final_frame->data[i] +
245  (ctx->final_frame->linesize[i] * (cur_y >> !!i)) +
246  (cur_x >> !!i) + !!i;
247 
248  /* Copy. */
249  for (i = 0; i < FFMIN(32, avctx->height - cur_y) - 1; i += 2) {
250  int lsize = FFMIN(32, avctx->width - cur_x);
251  int csize = lsize / 2;
252 
253  fic_alpha_blend(dstptr[0],
254  planes[0] + i * 32, lsize, planes[3] + i * 32);
255  fic_alpha_blend(dstptr[0] + ctx->final_frame->linesize[0],
256  planes[0] + (i + 1) * 32, lsize, planes[3] + (i + 1) * 32);
257  fic_alpha_blend(dstptr[1],
258  chroma[0] + (i / 2) * 16, csize, chroma[2] + (i / 2) * 16);
259  fic_alpha_blend(dstptr[2],
260  chroma[1] + (i / 2) * 16, csize, chroma[2] + (i / 2) * 16);
261 
262  dstptr[0] += ctx->final_frame->linesize[0] * 2;
263  dstptr[1] += ctx->final_frame->linesize[1];
264  dstptr[2] += ctx->final_frame->linesize[2];
265  }
266 }
267 
268 static int fic_decode_frame(AVCodecContext *avctx, void *data,
269  int *got_frame, AVPacket *avpkt)
270 {
271  FICContext *ctx = avctx->priv_data;
272  uint8_t *src = avpkt->data;
273  int ret;
274  int slice, nslices;
275  int msize;
276  int tsize;
277  int cur_x, cur_y;
278  int skip_cursor = ctx->skip_cursor;
279  uint8_t *sdata;
280 
281  if ((ret = ff_reget_buffer(avctx, ctx->frame, 0)) < 0)
282  return ret;
283 
284  /* Header + at least one slice (4) */
285  if (avpkt->size < FIC_HEADER_SIZE + 4) {
286  av_log(avctx, AV_LOG_ERROR, "Frame data is too small.\n");
287  return AVERROR_INVALIDDATA;
288  }
289 
290  /* Check for header. */
291  if (memcmp(src, fic_header, 7))
292  av_log(avctx, AV_LOG_WARNING, "Invalid FIC Header.\n");
293 
294  /* Is it a skip frame? */
295  if (src[17]) {
296  if (!ctx->final_frame) {
297  av_log(avctx, AV_LOG_WARNING, "Initial frame is skipped\n");
298  return AVERROR_INVALIDDATA;
299  }
300  goto skip;
301  }
302 
303  nslices = src[13];
304  if (!nslices) {
305  av_log(avctx, AV_LOG_ERROR, "Zero slices found.\n");
306  return AVERROR_INVALIDDATA;
307  }
308 
309  /* High or Low Quality Matrix? */
310  ctx->qmat = src[23] ? fic_qmat_hq : fic_qmat_lq;
311 
312  /* Skip cursor data. */
313  tsize = AV_RB24(src + 24);
314  if (tsize > avpkt->size - FIC_HEADER_SIZE) {
315  av_log(avctx, AV_LOG_ERROR,
316  "Packet is too small to contain cursor (%d vs %d bytes).\n",
317  tsize, avpkt->size - FIC_HEADER_SIZE);
318  return AVERROR_INVALIDDATA;
319  }
320 
321  if (!tsize || !AV_RL16(src + 37) || !AV_RL16(src + 39))
322  skip_cursor = 1;
323 
324  if (!skip_cursor && tsize < 32) {
325  av_log(avctx, AV_LOG_WARNING,
326  "Cursor data too small. Skipping cursor.\n");
327  skip_cursor = 1;
328  }
329 
330  /* Cursor position. */
331  cur_x = AV_RL16(src + 33);
332  cur_y = AV_RL16(src + 35);
333  if (!skip_cursor && (cur_x > avctx->width || cur_y > avctx->height)) {
334  av_log(avctx, AV_LOG_DEBUG,
335  "Invalid cursor position: (%d,%d). Skipping cursor.\n",
336  cur_x, cur_y);
337  skip_cursor = 1;
338  }
339 
340  if (!skip_cursor && (AV_RL16(src + 37) != 32 || AV_RL16(src + 39) != 32)) {
341  av_log(avctx, AV_LOG_WARNING,
342  "Invalid cursor size. Skipping cursor.\n");
343  skip_cursor = 1;
344  }
345 
346  if (!skip_cursor && avpkt->size < CURSOR_OFFSET + sizeof(ctx->cursor_buf)) {
347  skip_cursor = 1;
348  }
349 
350  /* Slice height for all but the last slice. */
351  ctx->slice_h = 16 * (ctx->aligned_height >> 4) / nslices;
352  if (ctx->slice_h % 16)
353  ctx->slice_h = FFALIGN(ctx->slice_h - 16, 16);
354 
355  /* First slice offset and remaining data. */
356  sdata = src + tsize + FIC_HEADER_SIZE + 4 * nslices;
357  msize = avpkt->size - nslices * 4 - tsize - FIC_HEADER_SIZE;
358 
359  if (msize <= ctx->aligned_width/8 * (ctx->aligned_height/8) / 8) {
360  av_log(avctx, AV_LOG_ERROR, "Not enough frame data to decode.\n");
361  return AVERROR_INVALIDDATA;
362  }
363 
364  /* Allocate slice data. */
366  nslices * sizeof(ctx->slice_data[0]));
367  if (!ctx->slice_data_size) {
368  av_log(avctx, AV_LOG_ERROR, "Could not allocate slice data.\n");
369  return AVERROR(ENOMEM);
370  }
371  memset(ctx->slice_data, 0, nslices * sizeof(ctx->slice_data[0]));
372 
373  for (slice = 0; slice < nslices; slice++) {
374  unsigned slice_off = AV_RB32(src + tsize + FIC_HEADER_SIZE + slice * 4);
375  unsigned slice_size;
376  int y_off = ctx->slice_h * slice;
377  int slice_h = ctx->slice_h;
378 
379  /*
380  * Either read the slice size, or consume all data left.
381  * Also, special case the last slight height.
382  */
383  if (slice == nslices - 1) {
384  slice_size = msize;
385  slice_h = FFALIGN(avctx->height - ctx->slice_h * (nslices - 1), 16);
386  } else {
387  slice_size = AV_RB32(src + tsize + FIC_HEADER_SIZE + slice * 4 + 4);
388  if (slice_size < slice_off)
389  return AVERROR_INVALIDDATA;
390  }
391 
392  if (slice_size < slice_off || slice_size > msize)
393  continue;
394 
395  slice_size -= slice_off;
396 
397  ctx->slice_data[slice].src = sdata + slice_off;
398  ctx->slice_data[slice].src_size = slice_size;
399  ctx->slice_data[slice].slice_h = slice_h;
400  ctx->slice_data[slice].y_off = y_off;
401  }
402 
403  if ((ret = avctx->execute(avctx, fic_decode_slice, ctx->slice_data,
404  NULL, nslices, sizeof(ctx->slice_data[0]))) < 0)
405  return ret;
406 
407  ctx->frame->key_frame = 1;
409  for (slice = 0; slice < nslices; slice++) {
410  if (ctx->slice_data[slice].p_frame) {
411  ctx->frame->key_frame = 0;
413  break;
414  }
415  }
416  av_frame_free(&ctx->final_frame);
417  ctx->final_frame = av_frame_clone(ctx->frame);
418  if (!ctx->final_frame) {
419  av_log(avctx, AV_LOG_ERROR, "Could not clone frame buffer.\n");
420  return AVERROR(ENOMEM);
421  }
422 
423  /* Make sure we use a user-supplied buffer. */
424  if ((ret = ff_reget_buffer(avctx, ctx->final_frame, 0)) < 0) {
425  av_log(avctx, AV_LOG_ERROR, "Could not make frame writable.\n");
426  return ret;
427  }
428 
429  /* Draw cursor. */
430  if (!skip_cursor) {
431  memcpy(ctx->cursor_buf, src + CURSOR_OFFSET, sizeof(ctx->cursor_buf));
432  fic_draw_cursor(avctx, cur_x, cur_y);
433  }
434 
435 skip:
436  *got_frame = 1;
437  if ((ret = av_frame_ref(data, ctx->final_frame)) < 0)
438  return ret;
439 
440  return avpkt->size;
441 }
442 
444 {
445  FICContext *ctx = avctx->priv_data;
446 
447  av_freep(&ctx->slice_data);
448  av_frame_free(&ctx->final_frame);
449  av_frame_free(&ctx->frame);
450 
451  return 0;
452 }
453 
455 {
456  FICContext *ctx = avctx->priv_data;
457 
458  /* Initialize various context values */
459  ctx->avctx = avctx;
460  ctx->aligned_width = FFALIGN(avctx->width, 16);
461  ctx->aligned_height = FFALIGN(avctx->height, 16);
462 
463  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
464  avctx->bits_per_raw_sample = 8;
465 
466  ctx->frame = av_frame_alloc();
467  if (!ctx->frame)
468  return AVERROR(ENOMEM);
469 
470  return 0;
471 }
472 
473 static const AVOption options[] = {
474 { "skip_cursor", "skip the cursor", offsetof(FICContext, skip_cursor), AV_OPT_TYPE_BOOL, {.i64 = 0 }, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
475 { NULL },
476 };
477 
478 static const AVClass fic_decoder_class = {
479  .class_name = "FIC decoder",
480  .item_name = av_default_item_name,
481  .option = options,
482  .version = LIBAVUTIL_VERSION_INT,
483 };
484 
486  .name = "fic",
487  .long_name = NULL_IF_CONFIG_SMALL("Mirillis FIC"),
488  .type = AVMEDIA_TYPE_VIDEO,
489  .id = AV_CODEC_ID_FIC,
490  .priv_data_size = sizeof(FICContext),
493  .close = fic_decode_close,
495  .priv_class = &fic_decoder_class,
496 };
#define NULL
Definition: coverity.c:32
static av_always_inline void fic_alpha_blend(uint8_t *dst, uint8_t *src, int size, uint8_t *alpha)
Definition: fic.c:205
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static int shift(int a, int b)
Definition: sonic.c:82
#define t9
Definition: regdef.h:54
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
AVOption.
Definition: opt.h:246
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:239
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
int slice_h
Definition: fic.c:54
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
int size
Definition: avcodec.h:1479
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:191
const uint8_t * qmat
Definition: fic.c:49
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1776
AVFrame * final_frame
Definition: fic.c:44
int aligned_height
Definition: fic.c:53
#define t8
Definition: regdef.h:53
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:2797
#define blk(i)
Definition: sha.c:185
int aligned_width
Definition: fic.c:53
AVCodec.
Definition: avcodec.h:3482
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:87
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
static av_cold int fic_decode_close(AVCodecContext *avctx)
Definition: fic.c:443
#define t7
Definition: regdef.h:35
int skip_cursor
Definition: fic.c:57
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
uint8_t
#define av_cold
Definition: attributes.h:82
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
AVOptions.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:87
int src_size
Definition: fic.c:35
#define t0
Definition: regdef.h:28
static const uint8_t fic_header[7]
Definition: fic.c:82
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
Definition: mem.h:112
uint8_t * data
Definition: avcodec.h:1478
bitstream reader API header.
ptrdiff_t size
Definition: opengl_enc.c:100
#define FFALIGN(x, a)
Definition: macros.h:48
#define av_log(a,...)
#define U(x)
Definition: vp56_arith.h:37
Definition: fic.c:40
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
static void fic_idct_put(uint8_t *dst, int stride, int16_t *block)
Definition: fic.c:111
static av_always_inline void chroma(WaveformContext *s, AVFrame *in, AVFrame *out, int component, int intensity, int offset_y, int offset_x, int column, int mirror, int jobnr, int nb_jobs)
Definition: vf_waveform.c:1511
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Identical in function to ff_get_buffer(), except it reuses the existing buffer if available...
Definition: decode.c:2010
static const uint8_t fic_qmat_lq[64]
Definition: fic.c:71
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
#define t1
Definition: regdef.h:29
const char * name
Name of the codec implementation.
Definition: avcodec.h:3489
#define t3
Definition: regdef.h:31
AVCodec ff_fic_decoder
Definition: fic.c:485
static const AVOption options[]
Definition: fic.c:473
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:500
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:378
#define FFMIN(a, b)
Definition: common.h:96
static const AVClass fic_decoder_class
Definition: fic.c:478
int width
picture width / height.
Definition: avcodec.h:1739
AVFormatContext * ctx
Definition: movenc.c:48
static av_cold int fic_decode_init(AVCodecContext *avctx)
Definition: fic.c:454
int p_frame
Definition: fic.c:37
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:540
static const struct @316 planes[]
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: avcodec.h:1042
uint8_t * src
Definition: fic.c:33
Libavcodec external API header.
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
Definition: bytestream.h:87
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
static const int16_t alpha[]
Definition: ilbcdata.h:55
#define AV_OPT_FLAG_VIDEO_PARAM
Definition: opt.h:279
main external API structure.
Definition: avcodec.h:1566
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
#define CURSOR_OFFSET
Definition: fic.c:85
Describe the class of an AVClass context structure.
Definition: log.h:67
int slice_data_size
Definition: fic.c:47
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:277
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
int y_off
Definition: fic.c:36
AVPictureType
Definition: avutil.h:272
#define t5
Definition: regdef.h:33
FICThreadContext * slice_data
Definition: fic.c:46
static const uint8_t fic_qmat_hq[64]
Definition: fic.c:60
static av_always_inline void fic_idct(int16_t *blk, int step, int shift, int rnd)
Definition: fic.c:87
static int fic_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: fic.c:268
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
AVCodecContext * avctx
Definition: fic.c:42
int16_t block[64]
Definition: fic.c:32
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
int
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
common internal api header.
#define FIC_HEADER_SIZE
Definition: fic.c:84
common internal and external API header
#define t6
Definition: regdef.h:34
#define rnd()
Definition: checkasm.h:105
int slice_h
Definition: fic.c:34
static int fic_decode_block(FICContext *ctx, GetBitContext *gb, uint8_t *dst, int stride, int16_t *block, int *is_p)
Definition: fic.c:137
void * priv_data
Definition: avcodec.h:1593
#define t4
Definition: regdef.h:32
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:373
static void fic_draw_cursor(AVCodecContext *avctx, int cur_x, int cur_y)
Definition: fic.c:214
AVFrame * frame
Definition: fic.c:43
uint8_t cursor_buf[4096]
Definition: fic.c:56
#define av_freep(p)
#define av_always_inline
Definition: attributes.h:39
#define stride
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
static int fic_decode_slice(AVCodecContext *avctx, void *tdata)
Definition: fic.c:170
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:2865
exp golomb vlc stuff
This structure stores compressed data.
Definition: avcodec.h:1455
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:982
#define t2
Definition: regdef.h:30
Predicted.
Definition: avutil.h:275
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step