FFmpeg
fic.c
Go to the documentation of this file.
1 /*
2  * Mirillis FIC decoder
3  *
4  * Copyright (c) 2014 Konstantin Shishkov
5  * Copyright (c) 2014 Derek Buitenhuis
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include "libavutil/common.h"
25 #include "libavutil/mem_internal.h"
26 #include "libavutil/opt.h"
27 #include "avcodec.h"
28 #include "internal.h"
29 #include "get_bits.h"
30 #include "golomb.h"
31 
32 typedef struct FICThreadContext {
33  DECLARE_ALIGNED(16, int16_t, block)[64];
35  int slice_h;
36  int src_size;
37  int y_off;
38  int p_frame;
40 
41 typedef struct FICContext {
42  AVClass *class;
46 
49 
50  const uint8_t *qmat;
51 
52  enum AVPictureType cur_frame_type;
53 
54  int aligned_width, aligned_height;
55  int num_slices, slice_h;
56 
57  uint8_t cursor_buf[4096];
59 } FICContext;
60 
61 static const uint8_t fic_qmat_hq[64] = {
62  1, 2, 2, 2, 3, 3, 3, 4,
63  2, 2, 2, 3, 3, 3, 4, 4,
64  2, 2, 3, 3, 3, 4, 4, 4,
65  2, 2, 3, 3, 3, 4, 4, 5,
66  2, 3, 3, 3, 4, 4, 5, 6,
67  3, 3, 3, 4, 4, 5, 6, 7,
68  3, 3, 3, 4, 4, 5, 7, 7,
69  3, 3, 4, 4, 5, 7, 7, 7,
70 };
71 
72 static const uint8_t fic_qmat_lq[64] = {
73  1, 5, 6, 7, 8, 9, 9, 11,
74  5, 5, 7, 8, 9, 9, 11, 12,
75  6, 7, 8, 9, 9, 11, 11, 12,
76  7, 7, 8, 9, 9, 11, 12, 13,
77  7, 8, 9, 9, 10, 11, 13, 16,
78  8, 9, 9, 10, 11, 13, 16, 19,
79  8, 9, 9, 11, 12, 15, 18, 23,
80  9, 9, 11, 12, 15, 18, 23, 27
81 };
82 
83 static const uint8_t fic_header[7] = { 0, 0, 1, 'F', 'I', 'C', 'V' };
84 
85 #define FIC_HEADER_SIZE 27
86 #define CURSOR_OFFSET 59
87 
88 static av_always_inline void fic_idct(int16_t *blk, int step, int shift, int rnd)
89 {
90  const unsigned t0 = 27246 * blk[3 * step] + 18405 * blk[5 * step];
91  const unsigned t1 = 27246 * blk[5 * step] - 18405 * blk[3 * step];
92  const unsigned t2 = 6393 * blk[7 * step] + 32139 * blk[1 * step];
93  const unsigned t3 = 6393 * blk[1 * step] - 32139 * blk[7 * step];
94  const unsigned t4 = 5793U * ((int)(t2 + t0 + 0x800) >> 12);
95  const unsigned t5 = 5793U * ((int)(t3 + t1 + 0x800) >> 12);
96  const unsigned t6 = t2 - t0;
97  const unsigned t7 = t3 - t1;
98  const unsigned t8 = 17734 * blk[2 * step] - 42813 * blk[6 * step];
99  const unsigned t9 = 17734 * blk[6 * step] + 42814 * blk[2 * step];
100  const unsigned tA = (blk[0 * step] - blk[4 * step]) * 32768 + rnd;
101  const unsigned tB = (blk[0 * step] + blk[4 * step]) * 32768 + rnd;
102  blk[0 * step] = (int)( t4 + t9 + tB) >> shift;
103  blk[1 * step] = (int)( t6 + t7 + t8 + tA) >> shift;
104  blk[2 * step] = (int)( t6 - t7 - t8 + tA) >> shift;
105  blk[3 * step] = (int)( t5 - t9 + tB) >> shift;
106  blk[4 * step] = (int)( -t5 - t9 + tB) >> shift;
107  blk[5 * step] = (int)(-(t6 - t7) - t8 + tA) >> shift;
108  blk[6 * step] = (int)(-(t6 + t7) + t8 + tA) >> shift;
109  blk[7 * step] = (int)( -t4 + t9 + tB) >> shift;
110 }
111 
112 static void fic_idct_put(uint8_t *dst, int stride, int16_t *block)
113 {
114  int i, j;
115  int16_t *ptr;
116 
117  ptr = block;
118  fic_idct(ptr++, 8, 13, (1 << 12) + (1 << 17));
119  for (i = 1; i < 8; i++) {
120  fic_idct(ptr, 8, 13, 1 << 12);
121  ptr++;
122  }
123 
124  ptr = block;
125  for (i = 0; i < 8; i++) {
126  fic_idct(ptr, 1, 20, 0);
127  ptr += 8;
128  }
129 
130  ptr = block;
131  for (j = 0; j < 8; j++) {
132  for (i = 0; i < 8; i++)
133  dst[i] = av_clip_uint8(ptr[i]);
134  dst += stride;
135  ptr += 8;
136  }
137 }
139  uint8_t *dst, int stride, int16_t *block, int *is_p)
140 {
141  int i, num_coeff;
142 
143  if (get_bits_left(gb) < 8)
144  return AVERROR_INVALIDDATA;
145 
146  /* Is it a skip block? */
147  if (get_bits1(gb)) {
148  *is_p = 1;
149  return 0;
150  }
151 
152  memset(block, 0, sizeof(*block) * 64);
153 
154  num_coeff = get_bits(gb, 7);
155  if (num_coeff > 64)
156  return AVERROR_INVALIDDATA;
157 
158  for (i = 0; i < num_coeff; i++) {
159  int v = get_se_golomb(gb);
160  if (v < -2048 || v > 2048)
161  return AVERROR_INVALIDDATA;
162  block[ff_zigzag_direct[i]] = v *
163  ctx->qmat[ff_zigzag_direct[i]];
164  }
165 
166  fic_idct_put(dst, stride, block);
167 
168  return 0;
169 }
170 
171 static int fic_decode_slice(AVCodecContext *avctx, void *tdata)
172 {
173  FICContext *ctx = avctx->priv_data;
174  FICThreadContext *tctx = tdata;
175  GetBitContext gb;
176  uint8_t *src = tctx->src;
177  int slice_h = tctx->slice_h;
178  int src_size = tctx->src_size;
179  int y_off = tctx->y_off;
180  int x, y, p, ret;
181 
182  ret = init_get_bits8(&gb, src, src_size);
183  if (ret < 0)
184  return ret;
185 
186  for (p = 0; p < 3; p++) {
187  int stride = ctx->frame->linesize[p];
188  uint8_t* dst = ctx->frame->data[p] + (y_off >> !!p) * stride;
189 
190  for (y = 0; y < (slice_h >> !!p); y += 8) {
191  for (x = 0; x < (ctx->aligned_width >> !!p); x += 8) {
192  int ret;
193 
194  if ((ret = fic_decode_block(ctx, &gb, dst + x, stride,
195  tctx->block, &tctx->p_frame)) != 0)
196  return ret;
197  }
198 
199  dst += 8 * stride;
200  }
201  }
202 
203  return 0;
204 }
205 
207  int size, uint8_t *alpha)
208 {
209  int i;
210 
211  for (i = 0; i < size; i++)
212  dst[i] += ((src[i] - dst[i]) * alpha[i]) >> 8;
213 }
214 
215 static void fic_draw_cursor(AVCodecContext *avctx, int cur_x, int cur_y)
216 {
217  FICContext *ctx = avctx->priv_data;
218  uint8_t *ptr = ctx->cursor_buf;
219  uint8_t *dstptr[3];
220  uint8_t planes[4][1024];
221  uint8_t chroma[3][256];
222  int i, j, p;
223 
224  /* Convert to YUVA444. */
225  for (i = 0; i < 1024; i++) {
226  planes[0][i] = (( 25 * ptr[0] + 129 * ptr[1] + 66 * ptr[2]) / 255) + 16;
227  planes[1][i] = ((-38 * ptr[0] + 112 * ptr[1] + -74 * ptr[2]) / 255) + 128;
228  planes[2][i] = ((-18 * ptr[0] + 112 * ptr[1] + -94 * ptr[2]) / 255) + 128;
229  planes[3][i] = ptr[3];
230 
231  ptr += 4;
232  }
233 
234  /* Subsample chroma. */
235  for (i = 0; i < 32; i += 2)
236  for (j = 0; j < 32; j += 2)
237  for (p = 0; p < 3; p++)
238  chroma[p][16 * (i / 2) + j / 2] = (planes[p + 1][32 * i + j ] +
239  planes[p + 1][32 * i + j + 1] +
240  planes[p + 1][32 * (i + 1) + j ] +
241  planes[p + 1][32 * (i + 1) + j + 1]) / 4;
242 
243  /* Seek to x/y pos of cursor. */
244  for (i = 0; i < 3; i++)
245  dstptr[i] = ctx->final_frame->data[i] +
246  (ctx->final_frame->linesize[i] * (cur_y >> !!i)) +
247  (cur_x >> !!i) + !!i;
248 
249  /* Copy. */
250  for (i = 0; i < FFMIN(32, avctx->height - cur_y) - 1; i += 2) {
251  int lsize = FFMIN(32, avctx->width - cur_x);
252  int csize = lsize / 2;
253 
254  fic_alpha_blend(dstptr[0],
255  planes[0] + i * 32, lsize, planes[3] + i * 32);
256  fic_alpha_blend(dstptr[0] + ctx->final_frame->linesize[0],
257  planes[0] + (i + 1) * 32, lsize, planes[3] + (i + 1) * 32);
258  fic_alpha_blend(dstptr[1],
259  chroma[0] + (i / 2) * 16, csize, chroma[2] + (i / 2) * 16);
260  fic_alpha_blend(dstptr[2],
261  chroma[1] + (i / 2) * 16, csize, chroma[2] + (i / 2) * 16);
262 
263  dstptr[0] += ctx->final_frame->linesize[0] * 2;
264  dstptr[1] += ctx->final_frame->linesize[1];
265  dstptr[2] += ctx->final_frame->linesize[2];
266  }
267 }
268 
269 static int fic_decode_frame(AVCodecContext *avctx, void *data,
270  int *got_frame, AVPacket *avpkt)
271 {
272  FICContext *ctx = avctx->priv_data;
273  uint8_t *src = avpkt->data;
274  int ret;
275  int slice, nslices;
276  int msize;
277  int tsize;
278  int cur_x, cur_y;
279  int skip_cursor = ctx->skip_cursor;
280  uint8_t *sdata;
281 
282  if ((ret = ff_reget_buffer(avctx, ctx->frame, 0)) < 0)
283  return ret;
284 
285  /* Header + at least one slice (4) */
286  if (avpkt->size < FIC_HEADER_SIZE + 4) {
287  av_log(avctx, AV_LOG_ERROR, "Frame data is too small.\n");
288  return AVERROR_INVALIDDATA;
289  }
290 
291  /* Check for header. */
292  if (memcmp(src, fic_header, 7))
293  av_log(avctx, AV_LOG_WARNING, "Invalid FIC Header.\n");
294 
295  /* Is it a skip frame? */
296  if (src[17]) {
297  if (!ctx->final_frame) {
298  av_log(avctx, AV_LOG_WARNING, "Initial frame is skipped\n");
299  return AVERROR_INVALIDDATA;
300  }
301  goto skip;
302  }
303 
304  nslices = src[13];
305  if (!nslices) {
306  av_log(avctx, AV_LOG_ERROR, "Zero slices found.\n");
307  return AVERROR_INVALIDDATA;
308  }
309 
310  /* High or Low Quality Matrix? */
311  ctx->qmat = src[23] ? fic_qmat_hq : fic_qmat_lq;
312 
313  /* Skip cursor data. */
314  tsize = AV_RB24(src + 24);
315  if (tsize > avpkt->size - FIC_HEADER_SIZE) {
316  av_log(avctx, AV_LOG_ERROR,
317  "Packet is too small to contain cursor (%d vs %d bytes).\n",
318  tsize, avpkt->size - FIC_HEADER_SIZE);
319  return AVERROR_INVALIDDATA;
320  }
321 
322  if (!tsize || !AV_RL16(src + 37) || !AV_RL16(src + 39))
323  skip_cursor = 1;
324 
325  if (!skip_cursor && tsize < 32) {
326  av_log(avctx, AV_LOG_WARNING,
327  "Cursor data too small. Skipping cursor.\n");
328  skip_cursor = 1;
329  }
330 
331  /* Cursor position. */
332  cur_x = AV_RL16(src + 33);
333  cur_y = AV_RL16(src + 35);
334  if (!skip_cursor && (cur_x > avctx->width || cur_y > avctx->height)) {
335  av_log(avctx, AV_LOG_DEBUG,
336  "Invalid cursor position: (%d,%d). Skipping cursor.\n",
337  cur_x, cur_y);
338  skip_cursor = 1;
339  }
340 
341  if (!skip_cursor && (AV_RL16(src + 37) != 32 || AV_RL16(src + 39) != 32)) {
342  av_log(avctx, AV_LOG_WARNING,
343  "Invalid cursor size. Skipping cursor.\n");
344  skip_cursor = 1;
345  }
346 
347  if (!skip_cursor && avpkt->size < CURSOR_OFFSET + sizeof(ctx->cursor_buf)) {
348  skip_cursor = 1;
349  }
350 
351  /* Slice height for all but the last slice. */
352  ctx->slice_h = 16 * (ctx->aligned_height >> 4) / nslices;
353  if (ctx->slice_h % 16)
354  ctx->slice_h = FFALIGN(ctx->slice_h - 16, 16);
355 
356  /* First slice offset and remaining data. */
357  sdata = src + tsize + FIC_HEADER_SIZE + 4 * nslices;
358  msize = avpkt->size - nslices * 4 - tsize - FIC_HEADER_SIZE;
359 
360  if (msize <= ctx->aligned_width/8 * (ctx->aligned_height/8) / 8) {
361  av_log(avctx, AV_LOG_ERROR, "Not enough frame data to decode.\n");
362  return AVERROR_INVALIDDATA;
363  }
364 
365  /* Allocate slice data. */
367  nslices * sizeof(ctx->slice_data[0]));
368  if (!ctx->slice_data_size) {
369  av_log(avctx, AV_LOG_ERROR, "Could not allocate slice data.\n");
370  return AVERROR(ENOMEM);
371  }
372  memset(ctx->slice_data, 0, nslices * sizeof(ctx->slice_data[0]));
373 
374  for (slice = 0; slice < nslices; slice++) {
375  unsigned slice_off = AV_RB32(src + tsize + FIC_HEADER_SIZE + slice * 4);
376  unsigned slice_size;
377  int y_off = ctx->slice_h * slice;
378  int slice_h = ctx->slice_h;
379 
380  /*
381  * Either read the slice size, or consume all data left.
382  * Also, special case the last slight height.
383  */
384  if (slice == nslices - 1) {
385  slice_size = msize;
386  slice_h = FFALIGN(avctx->height - ctx->slice_h * (nslices - 1), 16);
387  } else {
388  slice_size = AV_RB32(src + tsize + FIC_HEADER_SIZE + slice * 4 + 4);
389  if (slice_size < slice_off)
390  return AVERROR_INVALIDDATA;
391  }
392 
393  if (slice_size < slice_off || slice_size > msize)
394  continue;
395 
396  slice_size -= slice_off;
397 
398  ctx->slice_data[slice].src = sdata + slice_off;
399  ctx->slice_data[slice].src_size = slice_size;
400  ctx->slice_data[slice].slice_h = slice_h;
401  ctx->slice_data[slice].y_off = y_off;
402  }
403 
404  if ((ret = avctx->execute(avctx, fic_decode_slice, ctx->slice_data,
405  NULL, nslices, sizeof(ctx->slice_data[0]))) < 0)
406  return ret;
407 
408  ctx->frame->key_frame = 1;
410  for (slice = 0; slice < nslices; slice++) {
411  if (ctx->slice_data[slice].p_frame) {
412  ctx->frame->key_frame = 0;
414  break;
415  }
416  }
417  av_frame_free(&ctx->final_frame);
418  ctx->final_frame = av_frame_clone(ctx->frame);
419  if (!ctx->final_frame) {
420  av_log(avctx, AV_LOG_ERROR, "Could not clone frame buffer.\n");
421  return AVERROR(ENOMEM);
422  }
423 
424  /* Make sure we use a user-supplied buffer. */
425  if ((ret = ff_reget_buffer(avctx, ctx->final_frame, 0)) < 0) {
426  av_log(avctx, AV_LOG_ERROR, "Could not make frame writable.\n");
427  return ret;
428  }
429 
430  /* Draw cursor. */
431  if (!skip_cursor) {
432  memcpy(ctx->cursor_buf, src + CURSOR_OFFSET, sizeof(ctx->cursor_buf));
433  fic_draw_cursor(avctx, cur_x, cur_y);
434  }
435 
436 skip:
437  *got_frame = 1;
438  if ((ret = av_frame_ref(data, ctx->final_frame)) < 0)
439  return ret;
440 
441  return avpkt->size;
442 }
443 
445 {
446  FICContext *ctx = avctx->priv_data;
447 
448  av_freep(&ctx->slice_data);
449  av_frame_free(&ctx->final_frame);
450  av_frame_free(&ctx->frame);
451 
452  return 0;
453 }
454 
456 {
457  FICContext *ctx = avctx->priv_data;
458 
459  /* Initialize various context values */
460  ctx->avctx = avctx;
461  ctx->aligned_width = FFALIGN(avctx->width, 16);
462  ctx->aligned_height = FFALIGN(avctx->height, 16);
463 
464  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
465  avctx->bits_per_raw_sample = 8;
466 
467  ctx->frame = av_frame_alloc();
468  if (!ctx->frame)
469  return AVERROR(ENOMEM);
470 
471  return 0;
472 }
473 
474 static const AVOption options[] = {
475 { "skip_cursor", "skip the cursor", offsetof(FICContext, skip_cursor), AV_OPT_TYPE_BOOL, {.i64 = 0 }, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
476 { NULL },
477 };
478 
479 static const AVClass fic_decoder_class = {
480  .class_name = "FIC decoder",
481  .item_name = av_default_item_name,
482  .option = options,
483  .version = LIBAVUTIL_VERSION_INT,
484 };
485 
487  .name = "fic",
488  .long_name = NULL_IF_CONFIG_SMALL("Mirillis FIC"),
489  .type = AVMEDIA_TYPE_VIDEO,
490  .id = AV_CODEC_ID_FIC,
491  .priv_data_size = sizeof(FICContext),
494  .close = fic_decode_close,
496  .priv_class = &fic_decoder_class,
497 };
#define NULL
Definition: coverity.c:32
static av_always_inline void fic_alpha_blend(uint8_t *dst, uint8_t *src, int size, uint8_t *alpha)
Definition: fic.c:206
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static int shift(int a, int b)
Definition: sonic.c:82
#define t9
Definition: regdef.h:54
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
AVOption.
Definition: opt.h:248
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:241
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
int slice_h
Definition: fic.c:55
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
int size
Definition: packet.h:370
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
const uint8_t * qmat
Definition: fic.c:50
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:746
AVFrame * final_frame
Definition: fic.c:45
int aligned_height
Definition: fic.c:54
#define t8
Definition: regdef.h:53
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1747
#define blk(i)
Definition: sha.c:185
int aligned_width
Definition: fic.c:54
AVCodec.
Definition: codec.h:197
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:91
static const struct @322 planes[]
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
static av_cold int fic_decode_close(AVCodecContext *avctx)
Definition: fic.c:444
#define t7
Definition: regdef.h:35
int skip_cursor
Definition: fic.c:58
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
uint8_t
#define av_cold
Definition: attributes.h:88
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:191
AVOptions.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:444
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:91
int src_size
Definition: fic.c:36
#define t0
Definition: regdef.h:28
static const uint8_t fic_header[7]
Definition: fic.c:83
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
Definition: mem.h:117
uint8_t * data
Definition: packet.h:369
bitstream reader API header.
ptrdiff_t size
Definition: opengl_enc.c:100
#define FFALIGN(x, a)
Definition: macros.h:48
#define av_log(a,...)
#define U(x)
Definition: vp56_arith.h:37
Definition: fic.c:41
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
static void fic_idct_put(uint8_t *dst, int stride, int16_t *block)
Definition: fic.c:112
static av_always_inline void chroma(WaveformContext *s, AVFrame *in, AVFrame *out, int component, int intensity, int offset_y, int offset_x, int column, int mirror, int jobnr, int nb_jobs)
Definition: vf_waveform.c:1624
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:204
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:117
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Identical in function to ff_get_buffer(), except it reuses the existing buffer if available...
Definition: decode.c:2000
static const uint8_t fic_qmat_lq[64]
Definition: fic.c:72
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
#define t1
Definition: regdef.h:29
const char * name
Name of the codec implementation.
Definition: codec.h:204
#define t3
Definition: regdef.h:31
AVCodec ff_fic_decoder
Definition: fic.c:486
static const AVOption options[]
Definition: fic.c:474
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:502
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:401
#define FFMIN(a, b)
Definition: common.h:105
static const AVClass fic_decoder_class
Definition: fic.c:479
int width
picture width / height.
Definition: avcodec.h:709
AVFormatContext * ctx
Definition: movenc.c:48
static av_cold int fic_decode_init(AVCodecContext *avctx)
Definition: fic.c:455
int p_frame
Definition: fic.c:38
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:541
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:112
uint8_t * src
Definition: fic.c:34
Libavcodec external API header.
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
Definition: bytestream.h:91
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:349
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
static const int16_t alpha[]
Definition: ilbcdata.h:55
#define AV_OPT_FLAG_VIDEO_PARAM
Definition: opt.h:281
main external API structure.
Definition: avcodec.h:536
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
#define CURSOR_OFFSET
Definition: fic.c:86
Describe the class of an AVClass context structure.
Definition: log.h:67
int slice_data_size
Definition: fic.c:48
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:279
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
int y_off
Definition: fic.c:37
AVPictureType
Definition: avutil.h:272
#define t5
Definition: regdef.h:33
FICThreadContext * slice_data
Definition: fic.c:47
static const uint8_t fic_qmat_hq[64]
Definition: fic.c:61
static av_always_inline void fic_idct(int16_t *blk, int step, int shift, int rnd)
Definition: fic.c:88
static int fic_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: fic.c:269
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:332
AVCodecContext * avctx
Definition: fic.c:43
int16_t block[64]
Definition: fic.c:33
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
int
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
common internal api header.
#define FIC_HEADER_SIZE
Definition: fic.c:85
common internal and external API header
#define t6
Definition: regdef.h:34
#define rnd()
Definition: checkasm.h:117
int slice_h
Definition: fic.c:35
static int fic_decode_block(FICContext *ctx, GetBitContext *gb, uint8_t *dst, int stride, int16_t *block, int *is_p)
Definition: fic.c:138
void * priv_data
Definition: avcodec.h:563
#define t4
Definition: regdef.h:32
#define av_clip_uint8
Definition: common.h:128
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:396
static void fic_draw_cursor(AVCodecContext *avctx, int cur_x, int cur_y)
Definition: fic.c:215
AVFrame * frame
Definition: fic.c:44
uint8_t cursor_buf[4096]
Definition: fic.c:57
#define av_freep(p)
#define av_always_inline
Definition: attributes.h:45
#define stride
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
static int fic_decode_slice(AVCodecContext *avctx, void *tdata)
Definition: fic.c:171
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:1824
exp golomb vlc stuff
This structure stores compressed data.
Definition: packet.h:346
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators...
Definition: codec.h:52
#define t2
Definition: regdef.h:30
int i
Definition: input.c:407
Predicted.
Definition: avutil.h:275
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step