FFmpeg
mv30.c
Go to the documentation of this file.
1 /*
2  * MidiVid MV30 decoder
3  *
4  * Copyright (c) 2020 Paul B Mahol
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 
27 #include "libavutil/thread.h"
28 
29 #include "avcodec.h"
30 #include "bytestream.h"
31 #include "copy_block.h"
32 #include "mathops.h"
33 #include "blockdsp.h"
34 #include "get_bits.h"
35 #include "internal.h"
36 #include "aandcttab.h"
37 
38 typedef struct MV30Context {
40 
43  int is_inter;
44  int mode_size;
46 
47  int block[6][64];
48  int16_t *mvectors;
49  unsigned int mvectors_size;
50  int16_t *coeffs;
51  unsigned int coeffs_size;
52 
53  int16_t intraq_tab[2][64];
54  int16_t interq_tab[2][64];
55 
58 } MV30Context;
59 
60 static VLC cbp_tab;
61 
62 static const uint8_t luma_tab[] = {
63  12, 12, 15, 19, 25, 34, 40, 48,
64  12, 12, 18, 22, 27, 44, 47, 46,
65  17, 18, 21, 26, 35, 46, 52, 47,
66  18, 20, 24, 28, 40, 61, 59, 51,
67  20, 24, 32, 43, 50, 72, 72, 63,
68  25, 31, 42, 48, 58, 72, 81, 75,
69  38, 46, 54, 61, 71, 84, 88, 85,
70  50, 61, 65, 68, 79, 78, 86, 91,
71 };
72 
73 static const uint8_t chroma_tab[] = {
74  12, 16, 24, 47, 99, 99, 99, 99,
75  16, 21, 26, 66, 99, 99, 99, 99,
76  24, 26, 56, 99, 99, 99, 99, 99,
77  47, 66, 99, 99, 99, 99, 99, 99,
78  99, 99, 99, 99, 99, 99, 99, 99,
79  99, 99, 99, 99, 99, 99, 99, 99,
80  99, 99, 99, 99, 99, 99, 99, 99,
81  99, 99, 99, 99, 99, 99, 99, 99,
82 };
83 
84 static const uint8_t zigzag[] = {
85  0, 1, 8, 9, 16, 2, 3, 10,
86  17, 24, 32, 25, 18, 11, 4, 5,
87  12, 19, 26, 33, 40, 48, 41, 34,
88  27, 20, 13, 6, 7, 14, 21, 28,
89  35, 42, 49, 56, 57, 50, 43, 36,
90  29, 22, 15, 23, 30, 37, 44, 51,
91  58, 59, 52, 45, 38, 31, 39, 46,
92  53, 60, 61, 54, 47, 55, 62, 63,
93 };
94 
95 static void get_qtable(int16_t *table, int quant, const uint8_t *quant_tab)
96 {
97  int factor = quant < 50 ? 5000 / FFMAX(quant, 1) : 200 - FFMIN(quant, 100) * 2;
98 
99  for (int i = 0; i < 64; i++) {
100  table[i] = av_clip((quant_tab[i] * factor + 0x32) / 100, 1, 0x7fff);
101  table[i] = ((int)ff_aanscales[i] * (int)table[i] + 0x800) >> 12;
102  }
103 }
104 
105 static inline void idct_1d(unsigned *blk, int step)
106 {
107  const unsigned t0 = blk[0 * step] + blk[4 * step];
108  const unsigned t1 = blk[0 * step] - blk[4 * step];
109  const unsigned t2 = blk[2 * step] + blk[6 * step];
110  const unsigned t3 = ((int)((blk[2 * step] - blk[6 * step]) * 362U) >> 8) - t2;
111  const unsigned t4 = t0 + t2;
112  const unsigned t5 = t0 - t2;
113  const unsigned t6 = t1 + t3;
114  const unsigned t7 = t1 - t3;
115  const unsigned t8 = blk[5 * step] + blk[3 * step];
116  const unsigned t9 = blk[5 * step] - blk[3 * step];
117  const unsigned tA = blk[1 * step] + blk[7 * step];
118  const unsigned tB = blk[1 * step] - blk[7 * step];
119  const unsigned tC = t8 + tA;
120  const unsigned tD = (int)((tB + t9) * 473U) >> 8;
121  const unsigned tE = (((int)(t9 * -669U) >> 8) - tC) + tD;
122  const unsigned tF = ((int)((tA - t8) * 362U) >> 8) - tE;
123  const unsigned t10 = (((int)(tB * 277U) >> 8) - tD) + tF;
124 
125  blk[0 * step] = t4 + tC;
126  blk[1 * step] = t6 + tE;
127  blk[2 * step] = t7 + tF;
128  blk[3 * step] = t5 - t10;
129  blk[4 * step] = t5 + t10;
130  blk[5 * step] = t7 - tF;
131  blk[6 * step] = t6 - tE;
132  blk[7 * step] = t4 - tC;
133 }
134 
135 static void idct_put(uint8_t *dst, int stride, int *block)
136 {
137  for (int i = 0; i < 8; i++) {
138  if ((block[0x08 + i] |
139  block[0x10 + i] |
140  block[0x18 + i] |
141  block[0x20 + i] |
142  block[0x28 + i] |
143  block[0x30 + i] |
144  block[0x38 + i]) == 0) {
145  block[0x08 + i] = block[i];
146  block[0x10 + i] = block[i];
147  block[0x18 + i] = block[i];
148  block[0x20 + i] = block[i];
149  block[0x28 + i] = block[i];
150  block[0x30 + i] = block[i];
151  block[0x38 + i] = block[i];
152  } else {
153  idct_1d(block + i, 8);
154  }
155  }
156 
157  for (int i = 0; i < 8; i++) {
158  idct_1d(block, 1);
159  for (int j = 0; j < 8; j++)
160  dst[j] = av_clip_uint8((block[j] >> 5) + 128);
161  block += 8;
162  dst += stride;
163  }
164 }
165 
166 static void idct_add(uint8_t *dst, int stride,
167  const uint8_t *src, int in_linesize, int *block)
168 {
169  for (int i = 0; i < 8; i++) {
170  if ((block[0x08 + i] |
171  block[0x10 + i] |
172  block[0x18 + i] |
173  block[0x20 + i] |
174  block[0x28 + i] |
175  block[0x30 + i] |
176  block[0x38 + i]) == 0) {
177  block[0x08 + i] = block[i];
178  block[0x10 + i] = block[i];
179  block[0x18 + i] = block[i];
180  block[0x20 + i] = block[i];
181  block[0x28 + i] = block[i];
182  block[0x30 + i] = block[i];
183  block[0x38 + i] = block[i];
184  } else {
185  idct_1d(block + i, 8);
186  }
187  }
188 
189  for (int i = 0; i < 8; i++) {
190  idct_1d(block, 1);
191  for (int j = 0; j < 8; j++)
192  dst[j] = av_clip_uint8((block[j] >> 5) + src[j]);
193  block += 8;
194  dst += stride;
195  src += in_linesize;
196  }
197 }
198 
199 static inline void idct2_1d(int *blk, int step)
200 {
201  const unsigned int t0 = blk[0 * step];
202  const unsigned int t1 = blk[1 * step];
203  const unsigned int t2 = (int)(t1 * 473U) >> 8;
204  const unsigned int t3 = t2 - t1;
205  const unsigned int t4 = ((int)(t1 * 362U) >> 8) - t3;
206  const unsigned int t5 = (((int)(t1 * 277U) >> 8) - t2) + t4;
207 
208  blk[0 * step] = t1 + t0;
209  blk[1 * step] = t0 + t3;
210  blk[2 * step] = t4 + t0;
211  blk[3 * step] = t0 - t5;
212  blk[4 * step] = t5 + t0;
213  blk[5 * step] = t0 - t4;
214  blk[6 * step] = t0 - t3;
215  blk[7 * step] = t0 - t1;
216 }
217 
218 static void idct2_put(uint8_t *dst, int stride, int *block)
219 {
220  for (int i = 0; i < 2; i++) {
221  if ((block[0x08 + i]) == 0) {
222  block[0x08 + i] = block[i];
223  block[0x10 + i] = block[i];
224  block[0x18 + i] = block[i];
225  block[0x20 + i] = block[i];
226  block[0x28 + i] = block[i];
227  block[0x30 + i] = block[i];
228  block[0x38 + i] = block[i];
229  } else {
230  idct2_1d(block + i, 8);
231  }
232  }
233 
234  for (int i = 0; i < 8; i++) {
235  if (block[1] == 0) {
236  for (int j = 0; j < 8; j++)
237  dst[j] = av_clip_uint8((block[0] >> 5) + 128);
238  } else {
239  idct2_1d(block, 1);
240  for (int j = 0; j < 8; j++)
241  dst[j] = av_clip_uint8((block[j] >> 5) + 128);
242  }
243  block += 8;
244  dst += stride;
245  }
246 }
247 
248 static void idct2_add(uint8_t *dst, int stride,
249  const uint8_t *src, int in_linesize,
250  int *block)
251 {
252  for (int i = 0; i < 2; i++) {
253  if ((block[0x08 + i]) == 0) {
254  block[0x08 + i] = block[i];
255  block[0x10 + i] = block[i];
256  block[0x18 + i] = block[i];
257  block[0x20 + i] = block[i];
258  block[0x28 + i] = block[i];
259  block[0x30 + i] = block[i];
260  block[0x38 + i] = block[i];
261  } else {
262  idct2_1d(block + i, 8);
263  }
264  }
265 
266  for (int i = 0; i < 8; i++) {
267  if (block[1] == 0) {
268  for (int j = 0; j < 8; j++)
269  dst[j] = av_clip_uint8((block[0] >> 5) + src[j]);
270  } else {
271  idct2_1d(block, 1);
272  for (int j = 0; j < 8; j++)
273  dst[j] = av_clip_uint8((block[j] >> 5) + src[j]);
274  }
275  block += 8;
276  dst += stride;
277  src += in_linesize;
278  }
279 }
280 
281 static void update_inter_block(uint8_t *dst, int stride,
282  const uint8_t *src, int in_linesize,
283  int block)
284 {
285  for (int i = 0; i < 8; i++) {
286  for (int j = 0; j < 8; j++)
287  dst[j] = av_clip_uint8(block + src[j]);
288  dst += stride;
289  src += in_linesize;
290  }
291 }
292 
293 static int decode_intra_block(AVCodecContext *avctx, int mode,
294  GetByteContext *gbyte, int16_t *qtab,
295  int *block, int *pfill,
296  uint8_t *dst, int linesize)
297 {
298  MV30Context *s = avctx->priv_data;
299  int fill;
300 
301  switch (mode) {
302  case 0:
303  s->bdsp.fill_block_tab[1](dst, 128, linesize, 8);
304  break;
305  case 1:
306  fill = sign_extend(bytestream2_get_ne16(gbyte), 16);
307  pfill[0] += fill;
308  block[0] = ((int)((unsigned)pfill[0] * qtab[0]) >> 5) + 128;
309  s->bdsp.fill_block_tab[1](dst, block[0], linesize, 8);
310  break;
311  case 2:
312  memset(block, 0, sizeof(*block) * 64);
313  fill = sign_extend(bytestream2_get_ne16(gbyte), 16);
314  pfill[0] += fill;
315  block[0] = (unsigned)pfill[0] * qtab[0];
316  block[1] = sign_extend(bytestream2_get_ne16(gbyte), 16) * qtab[1];
317  block[8] = sign_extend(bytestream2_get_ne16(gbyte), 16) * qtab[8];
318  block[9] = sign_extend(bytestream2_get_ne16(gbyte), 16) * qtab[9];
319  idct2_put(dst, linesize, block);
320  break;
321  case 3:
322  fill = sign_extend(bytestream2_get_ne16(gbyte), 16);
323  pfill[0] += fill;
324  block[0] = (unsigned)pfill[0] * qtab[0];
325  for (int i = 1; i < 64; i++)
326  block[zigzag[i]] = sign_extend(bytestream2_get_ne16(gbyte), 16) * qtab[zigzag[i]];
327  idct_put(dst, linesize, block);
328  break;
329  }
330 
331  return 0;
332 }
333 
334 static int decode_inter_block(AVCodecContext *avctx, int mode,
335  GetByteContext *gbyte, int16_t *qtab,
336  int *block, int *pfill,
337  uint8_t *dst, int linesize,
338  const uint8_t *src, int in_linesize)
339 {
340  int fill;
341 
342  switch (mode) {
343  case 0:
344  copy_block8(dst, src, linesize, in_linesize, 8);
345  break;
346  case 1:
347  fill = sign_extend(bytestream2_get_ne16(gbyte), 16);
348  pfill[0] += fill;
349  block[0] = (int)((unsigned)pfill[0] * qtab[0]) >> 5;
350  update_inter_block(dst, linesize, src, in_linesize, block[0]);
351  break;
352  case 2:
353  memset(block, 0, sizeof(*block) * 64);
354  fill = sign_extend(bytestream2_get_ne16(gbyte), 16);
355  pfill[0] += fill;
356  block[0] = (unsigned)pfill[0] * qtab[0];
357  block[1] = sign_extend(bytestream2_get_ne16(gbyte), 16) * qtab[1];
358  block[8] = sign_extend(bytestream2_get_ne16(gbyte), 16) * qtab[8];
359  block[9] = sign_extend(bytestream2_get_ne16(gbyte), 16) * qtab[9];
360  idct2_add(dst, linesize, src, in_linesize, block);
361  break;
362  case 3:
363  fill = sign_extend(bytestream2_get_ne16(gbyte), 16);
364  pfill[0] += fill;
365  block[0] = (unsigned)pfill[0] * qtab[0];
366  for (int i = 1; i < 64; i++)
367  block[zigzag[i]] = sign_extend(bytestream2_get_ne16(gbyte), 16) * qtab[zigzag[i]];
368  idct_add(dst, linesize, src, in_linesize, block);
369  break;
370  }
371 
372  return 0;
373 }
374 
375 static int decode_coeffs(GetBitContext *gb, int16_t *coeffs, int nb_codes)
376 {
377  memset(coeffs, 0, nb_codes * sizeof(*coeffs));
378 
379  for (int i = 0; i < nb_codes;) {
380  int value = get_vlc2(gb, cbp_tab.table, cbp_tab.bits, 1);
381 
382  if (value > 0) {
383  int x = get_bits(gb, value);
384 
385  if (x < (1 << value) / 2) {
386  x = (1 << (value - 1)) + (x & ((1 << value) - 1 >> 1));
387  } else {
388  x = -(1 << (value - 1)) - (x & ((1 << value) - 1 >> 1));
389  }
390  coeffs[i++] = x;
391  } else {
392  int flag = get_bits1(gb);
393 
394  i += get_bits(gb, 3 + flag * 3) + 1 + flag * 8;
395  }
396  }
397 
398  return 0;
399 }
400 
402 {
403  MV30Context *s = avctx->priv_data;
404  GetBitContext mgb;
405  uint8_t *dst[6];
406  int linesize[6];
407  int ret;
408 
409  mgb = *gb;
410  if (get_bits_left(gb) < s->mode_size * 8)
411  return AVERROR_INVALIDDATA;
412 
413  skip_bits_long(gb, s->mode_size * 8);
414 
415  linesize[0] = frame->linesize[0];
416  linesize[1] = frame->linesize[0];
417  linesize[2] = frame->linesize[0];
418  linesize[3] = frame->linesize[0];
419  linesize[4] = frame->linesize[1];
420  linesize[5] = frame->linesize[2];
421 
422  for (int y = 0; y < avctx->height; y += 16) {
423  GetByteContext gbyte;
424  int pfill[3][1] = { {0} };
425  int nb_codes = get_bits(gb, 16);
426 
427  av_fast_padded_malloc(&s->coeffs, &s->coeffs_size, nb_codes * sizeof(*s->coeffs));
428  if (!s->coeffs)
429  return AVERROR(ENOMEM);
430  ret = decode_coeffs(gb, s->coeffs, nb_codes);
431  if (ret < 0)
432  return ret;
433 
434  bytestream2_init(&gbyte, (uint8_t *)s->coeffs, nb_codes * sizeof(*s->coeffs));
435 
436  for (int x = 0; x < avctx->width; x += 16) {
437  dst[0] = frame->data[0] + linesize[0] * y + x;
438  dst[1] = frame->data[0] + linesize[0] * y + x + 8;
439  dst[2] = frame->data[0] + linesize[0] * (y + 8) + x;
440  dst[3] = frame->data[0] + linesize[0] * (y + 8) + x + 8;
441  dst[4] = frame->data[1] + linesize[4] * (y >> 1) + (x >> 1);
442  dst[5] = frame->data[2] + linesize[5] * (y >> 1) + (x >> 1);
443 
444  for (int b = 0; b < 6; b++) {
445  int mode = get_bits_le(&mgb, 2);
446 
447  ret = decode_intra_block(avctx, mode, &gbyte, s->intraq_tab[b >= 4],
448  s->block[b],
449  pfill[(b >= 4) + (b >= 5)],
450  dst[b], linesize[b]);
451  if (ret < 0)
452  return ret;
453  }
454  }
455  }
456 
457  return 0;
458 }
459 
461  AVFrame *frame, AVFrame *prev)
462 {
463  MV30Context *s = avctx->priv_data;
465  GetBitContext mgb;
467  const int mask_size = ((avctx->height >> 4) * (avctx->width >> 4) * 2 + 7) / 8;
468  uint8_t *dst[6], *src[6];
469  int in_linesize[6];
470  int linesize[6];
471  int ret, cnt = 0;
472  int flags = 0;
473 
474  in_linesize[0] = prev->linesize[0];
475  in_linesize[1] = prev->linesize[0];
476  in_linesize[2] = prev->linesize[0];
477  in_linesize[3] = prev->linesize[0];
478  in_linesize[4] = prev->linesize[1];
479  in_linesize[5] = prev->linesize[2];
480 
481  linesize[0] = frame->linesize[0];
482  linesize[1] = frame->linesize[0];
483  linesize[2] = frame->linesize[0];
484  linesize[3] = frame->linesize[0];
485  linesize[4] = frame->linesize[1];
486  linesize[5] = frame->linesize[2];
487 
488  av_fast_padded_malloc(&s->mvectors, &s->mvectors_size, 2 * s->nb_mvectors * sizeof(*s->mvectors));
489  if (!s->mvectors) {
490  ret = AVERROR(ENOMEM);
491  goto fail;
492  }
493 
494  mask = *gb;
495  skip_bits_long(gb, mask_size * 8);
496  mgb = *gb;
497  skip_bits_long(gb, s->mode_size * 8);
498 
499  ret = decode_coeffs(gb, s->mvectors, 2 * s->nb_mvectors);
500  if (ret < 0)
501  goto fail;
502 
503  bytestream2_init(&mv, (uint8_t *)s->mvectors, 2 * s->nb_mvectors * sizeof(*s->mvectors));
504 
505  for (int y = 0; y < avctx->height; y += 16) {
506  GetByteContext gbyte;
507  int pfill[3][1] = { {0} };
508  int nb_codes = get_bits(gb, 16);
509 
510  skip_bits(gb, 8);
511  if (get_bits_left(gb) < 0) {
512  ret = AVERROR_INVALIDDATA;
513  goto fail;
514  }
515 
516  av_fast_padded_malloc(&s->coeffs, &s->coeffs_size, nb_codes * sizeof(*s->coeffs));
517  if (!s->coeffs) {
518  ret = AVERROR(ENOMEM);
519  goto fail;
520  }
521 
522  ret = decode_coeffs(gb, s->coeffs, nb_codes);
523  if (ret < 0)
524  goto fail;
525 
526  bytestream2_init(&gbyte, (uint8_t *)s->coeffs, nb_codes * sizeof(*s->coeffs));
527 
528  for (int x = 0; x < avctx->width; x += 16) {
529  if (cnt >= 4)
530  cnt = 0;
531  if (cnt == 0) {
532  if (get_bits_left(&mask) < 8) {
533  ret = AVERROR_INVALIDDATA;
534  goto fail;
535  }
536  flags = get_bits(&mask, 8);
537  }
538 
539  dst[0] = frame->data[0] + linesize[0] * y + x;
540  dst[1] = frame->data[0] + linesize[0] * y + x + 8;
541  dst[2] = frame->data[0] + linesize[0] * (y + 8) + x;
542  dst[3] = frame->data[0] + linesize[0] * (y + 8) + x + 8;
543  dst[4] = frame->data[1] + linesize[4] * (y >> 1) + (x >> 1);
544  dst[5] = frame->data[2] + linesize[5] * (y >> 1) + (x >> 1);
545 
546  if ((flags >> (cnt)) & 1) {
547  int mv_x = sign_extend(bytestream2_get_ne16(&mv), 16);
548  int mv_y = sign_extend(bytestream2_get_ne16(&mv), 16);
549 
550  int px = x + mv_x;
551  int py = y + mv_y;
552 
553  if (px < 0 || px > FFALIGN(avctx->width , 16) - 16 ||
554  py < 0 || py > FFALIGN(avctx->height, 16) - 16)
555  return AVERROR_INVALIDDATA;
556 
557  src[0] = prev->data[0] + in_linesize[0] * py + px;
558  src[1] = prev->data[0] + in_linesize[0] * py + px + 8;
559  src[2] = prev->data[0] + in_linesize[0] * (py + 8) + px;
560  src[3] = prev->data[0] + in_linesize[0] * (py + 8) + px + 8;
561  src[4] = prev->data[1] + in_linesize[4] * (py >> 1) + (px >> 1);
562  src[5] = prev->data[2] + in_linesize[5] * (py >> 1) + (px >> 1);
563 
564  if ((flags >> (cnt + 4)) & 1) {
565  for (int b = 0; b < 6; b++)
566  copy_block8(dst[b], src[b], linesize[b], in_linesize[b], 8);
567  } else {
568  for (int b = 0; b < 6; b++) {
569  int mode = get_bits_le(&mgb, 2);
570 
571  ret = decode_inter_block(avctx, mode, &gbyte, s->interq_tab[b >= 4],
572  s->block[b],
573  pfill[(b >= 4) + (b >= 5)],
574  dst[b], linesize[b],
575  src[b], in_linesize[b]);
576  if (ret < 0)
577  goto fail;
578  }
579  }
580  } else {
581  for (int b = 0; b < 6; b++) {
582  int mode = get_bits_le(&mgb, 2);
583 
584  ret = decode_intra_block(avctx, mode, &gbyte, s->intraq_tab[b >= 4],
585  s->block[b],
586  pfill[(b >= 4) + (b >= 5)],
587  dst[b], linesize[b]);
588  if (ret < 0)
589  goto fail;
590  }
591  }
592 
593  cnt++;
594  }
595  }
596 
597 fail:
598  return ret;
599 }
600 
601 static int decode_frame(AVCodecContext *avctx, void *data,
602  int *got_frame, AVPacket *avpkt)
603 {
604  MV30Context *s = avctx->priv_data;
605  GetBitContext *gb = &s->gb;
606  AVFrame *frame = data;
607  int ret;
608 
609  if ((ret = init_get_bits8(gb, avpkt->data, avpkt->size)) < 0)
610  return ret;
611 
612  if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
613  return ret;
614 
615  s->intra_quant = get_bits(gb, 8);
616  s->inter_quant = s->intra_quant + get_sbits(gb, 8);
617  s->is_inter = get_bits_le(gb, 16);
618  s->mode_size = get_bits_le(gb, 16);
619  if (s->is_inter)
620  s->nb_mvectors = get_bits_le(gb, 16);
621 
624 
625  frame->key_frame = s->is_inter == 0;
626 
627  if (frame->key_frame) {
628  ret = decode_intra(avctx, gb, frame);
629  if (ret < 0)
630  return ret;
631  } else {
634 
635  if (!s->prev_frame->data[0]) {
636  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
637  return AVERROR_INVALIDDATA;
638  }
639 
640  ret = decode_inter(avctx, gb, frame, s->prev_frame);
641  if (ret < 0)
642  return ret;
643  }
644 
646  if ((ret = av_frame_ref(s->prev_frame, frame)) < 0)
647  return ret;
648 
649  *got_frame = 1;
650 
651  return avpkt->size;
652 }
653 
654 static const uint16_t cbp_codes[] = {
655  0, 1, 4, 5, 6, 0xE, 0x1E, 0x3E, 0x7E, 0xFE, 0x1FE, 0x1FF,
656 };
657 
658 static const uint8_t cbp_bits[] = {
659  2, 2, 3, 3, 3, 4, 5, 6, 7, 8, 9, 9,
660 };
661 
662 static av_cold void init_static_data(void)
663 {
665  cbp_bits, 1, 1, cbp_codes, 2, 2, NULL, 0, 0, 512);
666 }
667 
669 {
670  MV30Context *s = avctx->priv_data;
671  static AVOnce init_static_once = AV_ONCE_INIT;
672 
673  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
674  avctx->color_range = AVCOL_RANGE_JPEG;
675 
676  ff_blockdsp_init(&s->bdsp, avctx);
677 
678  s->prev_frame = av_frame_alloc();
679  if (!s->prev_frame)
680  return AVERROR(ENOMEM);
681 
682  ff_thread_once(&init_static_once, init_static_data);
683 
684  return 0;
685 }
686 
687 static void decode_flush(AVCodecContext *avctx)
688 {
689  MV30Context *s = avctx->priv_data;
690 
692 }
693 
695 {
696  MV30Context *s = avctx->priv_data;
697 
699  av_freep(&s->coeffs);
700  s->coeffs_size = 0;
701  av_freep(&s->mvectors);
702  s->mvectors_size = 0;
703 
704  return 0;
705 }
706 
708  .name = "mv30",
709  .long_name = NULL_IF_CONFIG_SMALL("MidiVid 3.0"),
710  .type = AVMEDIA_TYPE_VIDEO,
711  .id = AV_CODEC_ID_MV30,
712  .priv_data_size = sizeof(MV30Context),
713  .init = decode_init,
714  .close = decode_close,
715  .decode = decode_frame,
716  .flush = decode_flush,
717  .capabilities = AV_CODEC_CAP_DR1,
718  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
720 };
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
#define NULL
Definition: coverity.c:32
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: mv30.c:601
static const uint8_t cbp_bits[]
Definition: mv30.c:658
#define t9
Definition: regdef.h:54
This structure describes decoded (raw) audio or video data.
Definition: frame.h:314
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
static void flush(AVCodecContext *avctx)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:291
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
int mode_size
Definition: mv30.c:44
static const uint8_t zigzag[]
Definition: mv30.c:84
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1166
int size
Definition: packet.h:364
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
Definition: blockdsp.c:60
int intra_quant
Definition: mv30.c:41
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:741
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:72
#define t8
Definition: regdef.h:53
static const uint16_t cbp_codes[]
Definition: mv30.c:654
#define blk(i)
Definition: sha.c:185
static void idct_add(uint8_t *dst, int stride, const uint8_t *src, int in_linesize, int *block)
Definition: mv30.c:166
static void copy_block8(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:47
AVCodec.
Definition: codec.h:190
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:359
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
#define t7
Definition: regdef.h:35
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
uint8_t
#define av_cold
Definition: attributes.h:88
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
GLsizei GLboolean const GLfloat * value
Definition: opengl_enc.c:108
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
#define t0
Definition: regdef.h:28
static void idct_1d(unsigned *blk, int step)
Definition: mv30.c:105
int16_t interq_tab[2][64]
Definition: mv30.c:54
unsigned int coeffs_size
Definition: mv30.c:51
uint8_t * data
Definition: packet.h:363
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
bitstream reader API header.
static void idct2_add(uint8_t *dst, int stride, const uint8_t *src, int in_linesize, int *block)
Definition: mv30.c:248
#define AVOnce
Definition: thread.h:172
#define FFALIGN(x, a)
Definition: macros.h:48
#define av_log(a,...)
static const uint16_t table[]
Definition: prosumer.c:206
#define U(x)
Definition: vp56_arith.h:37
#define src
Definition: vp8dsp.c:254
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
static const uint16_t mask[17]
Definition: lzw.c:38
#define t10
Definition: regdef.h:55
AVFrame * prev_frame
Definition: mv30.c:57
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
#define bytestream2_get_ne16
Definition: bytestream.h:119
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:153
static void idct_put(uint8_t *dst, int stride, int *block)
Definition: mv30.c:135
#define t1
Definition: regdef.h:29
const char * name
Name of the codec implementation.
Definition: codec.h:197
int16_t * coeffs
Definition: mv30.c:50
#define t3
Definition: regdef.h:31
#define FFMAX(a, b)
Definition: common.h:94
int16_t intraq_tab[2][64]
Definition: mv30.c:53
static int decode_coeffs(GetBitContext *gb, int16_t *coeffs, int nb_codes)
Definition: mv30.c:375
#define fail()
Definition: checkasm.h:123
int block[6][64]
Definition: mv30.c:47
Definition: vlc.h:26
static av_cold int decode_init(AVCodecContext *avctx)
Definition: mv30.c:668
#define b
Definition: input.c:41
static void get_qtable(int16_t *table, int quant, const uint8_t *quant_tab)
Definition: mv30.c:95
#define FFMIN(a, b)
Definition: common.h:96
unsigned int mvectors_size
Definition: mv30.c:49
int width
picture width / height.
Definition: avcodec.h:704
static av_cold void init_static_data(void)
Definition: mv30.c:662
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:797
static const uint8_t chroma_tab[]
Definition: mv30.c:73
#define FF_ARRAY_ELEMS(a)
Full range content.
Definition: pixfmt.h:586
int bits
Definition: vlc.h:27
static int decode_inter_block(AVCodecContext *avctx, int mode, GetByteContext *gbyte, int16_t *qtab, int *block, int *pfill, uint8_t *dst, int linesize, const uint8_t *src, int in_linesize)
Definition: mv30.c:334
static const int8_t mv[256][2]
Definition: 4xm.c:77
#define INIT_VLC_SPARSE_STATIC(vlc, bits, a, b, c, d, e, f, g, h, i, j, static_size)
Definition: vlc.h:72
#define AV_ONCE_INIT
Definition: thread.h:173
Libavcodec external API header.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:345
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
main external API structure.
Definition: avcodec.h:531
static VLC cbp_tab
Definition: mv30.c:60
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1879
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
static int decode_intra_block(AVCodecContext *avctx, int mode, GetByteContext *gbyte, int16_t *qtab, int *block, int *pfill, uint8_t *dst, int linesize)
Definition: mv30.c:293
static int decode_inter(AVCodecContext *avctx, GetBitContext *gb, AVFrame *frame, AVFrame *prev)
Definition: mv30.c:460
static const int factor[16]
Definition: vf_pp7.c:75
#define t5
Definition: regdef.h:33
int16_t * mvectors
Definition: mv30.c:48
const uint8_t * quant
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:130
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
#define flags(name, subs,...)
Definition: cbs_av1.c:560
static unsigned int get_bits_le(GetBitContext *s, int n)
Definition: get_bits.h:420
AAN (Arai, Agui and Nakajima) (I)DCT tables.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:328
int is_inter
Definition: mv30.c:43
static void decode_flush(AVCodecContext *avctx)
Definition: mv30.c:687
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
int
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
static void idct2_1d(int *blk, int step)
Definition: mv30.c:199
common internal api header.
#define t6
Definition: regdef.h:34
GetBitContext gb
Definition: mv30.c:39
static av_cold int decode_close(AVCodecContext *avctx)
Definition: mv30.c:694
#define flag(name)
Definition: cbs_av1.c:552
static void update_inter_block(uint8_t *dst, int stride, const uint8_t *src, int in_linesize, int block)
Definition: mv30.c:281
int inter_quant
Definition: mv30.c:42
void * priv_data
Definition: avcodec.h:558
#define t4
Definition: regdef.h:32
static const uint8_t luma_tab[]
Definition: mv30.c:62
static void idct2_put(uint8_t *dst, int stride, int *block)
Definition: mv30.c:218
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:175
VLC_TYPE(* table)[2]
code, bits
Definition: vlc.h:28
AVCodec ff_mv30_decoder
Definition: mv30.c:707
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:392
BlockDSPContext bdsp
Definition: mv30.c:56
#define av_freep(p)
static int decode_intra(AVCodecContext *avctx, GetBitContext *gb, AVFrame *frame)
Definition: mv30.c:401
#define stride
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int nb_mvectors
Definition: mv30.c:45
This structure stores compressed data.
Definition: packet.h:340
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:514
mode
Use these values in ebur128_init (or&#39;ed).
Definition: ebur128.h:83
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
#define t2
Definition: regdef.h:30
int i
Definition: input.c:407
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
op_fill_func fill_block_tab[2]
Definition: blockdsp.h:39