FFmpeg
agm.c
Go to the documentation of this file.
1 /*
2  * Amuse Graphics Movie decoder
3  *
4  * Copyright (c) 2018 Paul B Mahol
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include <string.h>
24 
25 #define BITSTREAM_READER_LE
26 
27 #include "libavutil/mem_internal.h"
28 
29 #include "avcodec.h"
30 #include "bytestream.h"
31 #include "codec_internal.h"
32 #include "copy_block.h"
33 #include "decode.h"
34 #include "get_bits.h"
35 #include "idctdsp.h"
36 #include "jpegquanttables.h"
37 
38 typedef struct MotionVector {
39  int16_t x, y;
40 } MotionVector;
41 
42 typedef struct AGMContext {
43  const AVClass *class;
47 
48  int key_frame;
51  int blocks_w;
52  int blocks_h;
53  int size[3];
54  int plus;
55  int dct;
56  int rgb;
57  unsigned flags;
58  unsigned fflags;
59 
60  uint8_t *output;
62  unsigned output_size;
63 
65  unsigned mvectors_size;
66 
68 
70 
73 
74  uint8_t permutated_scantable[64];
75  DECLARE_ALIGNED(32, int16_t, block)[64];
76 
77  int16_t *wblocks;
78  unsigned wblocks_size;
79 
80  int *map;
81  unsigned map_size;
82 
84 } AGMContext;
85 
86 static int read_code(GetBitContext *gb, int *oskip, int *level, int *map, int mode)
87 {
88  int len = 0, skip = 0, max;
89 
90  if (get_bits_left(gb) < 2)
91  return AVERROR_INVALIDDATA;
92 
93  if (show_bits(gb, 2)) {
94  switch (show_bits(gb, 4)) {
95  case 1:
96  case 9:
97  len = 1;
98  skip = 3;
99  break;
100  case 2:
101  len = 3;
102  skip = 4;
103  break;
104  case 3:
105  len = 7;
106  skip = 4;
107  break;
108  case 5:
109  case 13:
110  len = 2;
111  skip = 3;
112  break;
113  case 6:
114  len = 4;
115  skip = 4;
116  break;
117  case 7:
118  len = 8;
119  skip = 4;
120  break;
121  case 10:
122  len = 5;
123  skip = 4;
124  break;
125  case 11:
126  len = 9;
127  skip = 4;
128  break;
129  case 14:
130  len = 6;
131  skip = 4;
132  break;
133  case 15:
134  len = ((show_bits(gb, 5) & 0x10) | 0xA0) >> 4;
135  skip = 5;
136  break;
137  default:
138  return AVERROR_INVALIDDATA;
139  }
140 
141  skip_bits(gb, skip);
142  *level = get_bits(gb, len);
143  *map = 1;
144  *oskip = 0;
145  max = 1 << (len - 1);
146  if (*level < max)
147  *level = -(max + *level);
148  } else if (show_bits(gb, 3) & 4) {
149  skip_bits(gb, 3);
150  if (mode == 1) {
151  if (show_bits(gb, 4)) {
152  if (show_bits(gb, 4) == 1) {
153  skip_bits(gb, 4);
154  *oskip = get_bits(gb, 16);
155  } else {
156  *oskip = get_bits(gb, 4);
157  }
158  } else {
159  skip_bits(gb, 4);
160  *oskip = get_bits(gb, 10);
161  }
162  } else if (mode == 0) {
163  *oskip = get_bits(gb, 10);
164  }
165  *level = 0;
166  } else {
167  skip_bits(gb, 3);
168  if (mode == 0)
169  *oskip = get_bits(gb, 4);
170  else if (mode == 1)
171  *oskip = 0;
172  *level = 0;
173  }
174 
175  return 0;
176 }
177 
179  const int *quant_matrix, int *skip, int *dc_level)
180 {
181  const uint8_t *scantable = s->permutated_scantable;
182  int level, ret, map = 0;
183 
184  memset(s->wblocks, 0, s->wblocks_size);
185 
186  for (int i = 0; i < 64; i++) {
187  int16_t *block = s->wblocks + scantable[i];
188 
189  for (int j = 0; j < s->blocks_w;) {
190  if (*skip > 0) {
191  int rskip;
192 
193  rskip = FFMIN(*skip, s->blocks_w - j);
194  j += rskip;
195  if (i == 0) {
196  for (int k = 0; k < rskip; k++)
197  block[64 * k] = *dc_level * quant_matrix[0];
198  }
199  block += rskip * 64;
200  *skip -= rskip;
201  } else {
202  ret = read_code(gb, skip, &level, &map, s->flags & 1);
203  if (ret < 0)
204  return ret;
205 
206  if (i == 0)
207  *dc_level += level;
208 
209  block[0] = (i == 0 ? *dc_level : level) * quant_matrix[i];
210  block += 64;
211  j++;
212  }
213  }
214  }
215 
216  return 0;
217 }
218 
220  const int *quant_matrix, int *skip,
221  int *map)
222 {
223  const uint8_t *scantable = s->permutated_scantable;
224  int level, ret;
225 
226  memset(s->wblocks, 0, s->wblocks_size);
227  memset(s->map, 0, s->map_size);
228 
229  for (int i = 0; i < 64; i++) {
230  int16_t *block = s->wblocks + scantable[i];
231 
232  for (int j = 0; j < s->blocks_w;) {
233  if (*skip > 0) {
234  int rskip;
235 
236  rskip = FFMIN(*skip, s->blocks_w - j);
237  j += rskip;
238  block += rskip * 64;
239  *skip -= rskip;
240  } else {
241  ret = read_code(gb, skip, &level, &map[j], s->flags & 1);
242  if (ret < 0)
243  return ret;
244 
245  block[0] = level * quant_matrix[i];
246  block += 64;
247  j++;
248  }
249  }
250  }
251 
252  return 0;
253 }
254 
256  const int *quant_matrix, int *skip, int *dc_level)
257 {
258  const uint8_t *scantable = s->permutated_scantable;
259  const int offset = s->plus ? 0 : 1024;
260  int16_t *block = s->block;
261  int level, ret, map = 0;
262 
263  memset(block, 0, sizeof(s->block));
264 
265  if (*skip > 0) {
266  (*skip)--;
267  } else {
268  ret = read_code(gb, skip, &level, &map, s->flags & 1);
269  if (ret < 0)
270  return ret;
271  *dc_level += level;
272  }
273  block[scantable[0]] = offset + *dc_level * quant_matrix[0];
274 
275  for (int i = 1; i < 64;) {
276  if (*skip > 0) {
277  int rskip;
278 
279  rskip = FFMIN(*skip, 64 - i);
280  i += rskip;
281  *skip -= rskip;
282  } else {
283  ret = read_code(gb, skip, &level, &map, s->flags & 1);
284  if (ret < 0)
285  return ret;
286 
287  block[scantable[i]] = level * quant_matrix[i];
288  i++;
289  }
290  }
291 
292  return 0;
293 }
294 
296  const int *quant_matrix, AVFrame *frame,
297  int plane)
298 {
299  int ret, skip = 0, dc_level = 0;
300  const int offset = s->plus ? 0 : 1024;
301 
302  if ((ret = init_get_bits8(gb, s->gbyte.buffer, size)) < 0)
303  return ret;
304 
305  if (s->flags & 1) {
306  av_fast_padded_malloc(&s->wblocks, &s->wblocks_size,
307  64 * s->blocks_w * sizeof(*s->wblocks));
308  if (!s->wblocks)
309  return AVERROR(ENOMEM);
310 
311  for (int y = 0; y < s->blocks_h; y++) {
312  ret = decode_intra_blocks(s, gb, quant_matrix, &skip, &dc_level);
313  if (ret < 0)
314  return ret;
315 
316  for (int x = 0; x < s->blocks_w; x++) {
317  s->wblocks[64 * x] += offset;
318  s->idsp.idct_put(frame->data[plane] + (s->blocks_h - 1 - y) * 8 * frame->linesize[plane] + x * 8,
319  frame->linesize[plane], s->wblocks + 64 * x);
320  }
321  }
322  } else {
323  for (int y = 0; y < s->blocks_h; y++) {
324  for (int x = 0; x < s->blocks_w; x++) {
325  ret = decode_intra_block(s, gb, quant_matrix, &skip, &dc_level);
326  if (ret < 0)
327  return ret;
328 
329  s->idsp.idct_put(frame->data[plane] + (s->blocks_h - 1 - y) * 8 * frame->linesize[plane] + x * 8,
330  frame->linesize[plane], s->block);
331  }
332  }
333  }
334 
335  align_get_bits(gb);
336  if (get_bits_left(gb) < 0)
337  av_log(s->avctx, AV_LOG_WARNING, "overread\n");
338  if (get_bits_left(gb) > 0)
339  av_log(s->avctx, AV_LOG_WARNING, "underread: %d\n", get_bits_left(gb));
340 
341  return 0;
342 }
343 
345  const int *quant_matrix, int *skip,
346  int *map)
347 {
348  const uint8_t *scantable = s->permutated_scantable;
349  int16_t *block = s->block;
350  int level, ret;
351 
352  memset(block, 0, sizeof(s->block));
353 
354  for (int i = 0; i < 64;) {
355  if (*skip > 0) {
356  int rskip;
357 
358  rskip = FFMIN(*skip, 64 - i);
359  i += rskip;
360  *skip -= rskip;
361  } else {
362  ret = read_code(gb, skip, &level, map, s->flags & 1);
363  if (ret < 0)
364  return ret;
365 
366  block[scantable[i]] = level * quant_matrix[i];
367  i++;
368  }
369  }
370 
371  return 0;
372 }
373 
375  const int *quant_matrix, AVFrame *frame,
376  AVFrame *prev, int plane)
377 {
378  int ret, skip = 0;
379 
380  if ((ret = init_get_bits8(gb, s->gbyte.buffer, size)) < 0)
381  return ret;
382 
383  if (s->flags == 3) {
384  av_fast_padded_malloc(&s->wblocks, &s->wblocks_size,
385  64 * s->blocks_w * sizeof(*s->wblocks));
386  if (!s->wblocks)
387  return AVERROR(ENOMEM);
388 
389  av_fast_padded_malloc(&s->map, &s->map_size,
390  s->blocks_w * sizeof(*s->map));
391  if (!s->map)
392  return AVERROR(ENOMEM);
393 
394  for (int y = 0; y < s->blocks_h; y++) {
395  ret = decode_inter_blocks(s, gb, quant_matrix, &skip, s->map);
396  if (ret < 0)
397  return ret;
398 
399  for (int x = 0; x < s->blocks_w; x++) {
400  int shift = plane == 0;
401  int mvpos = (y >> shift) * (s->blocks_w >> shift) + (x >> shift);
402  int orig_mv_x = s->mvectors[mvpos].x;
403  int mv_x = s->mvectors[mvpos].x / (1 + !shift);
404  int mv_y = s->mvectors[mvpos].y / (1 + !shift);
405  int h = s->avctx->coded_height >> !shift;
406  int w = s->avctx->coded_width >> !shift;
407  int map = s->map[x];
408 
409  if (orig_mv_x >= -32) {
410  if (y * 8 + mv_y < 0 || y * 8 + mv_y + 8 > h ||
411  x * 8 + mv_x < 0 || x * 8 + mv_x + 8 > w)
412  return AVERROR_INVALIDDATA;
413 
414  copy_block8(frame->data[plane] + (s->blocks_h - 1 - y) * 8 * frame->linesize[plane] + x * 8,
415  prev->data[plane] + ((s->blocks_h - 1 - y) * 8 - mv_y) * prev->linesize[plane] + (x * 8 + mv_x),
416  frame->linesize[plane], prev->linesize[plane], 8);
417  if (map) {
418  s->idsp.idct(s->wblocks + x * 64);
419  for (int i = 0; i < 64; i++)
420  s->wblocks[i + x * 64] = (s->wblocks[i + x * 64] + 1) & 0xFFFC;
421  s->idsp.add_pixels_clamped(&s->wblocks[x*64], frame->data[plane] + (s->blocks_h - 1 - y) * 8 * frame->linesize[plane] + x * 8,
422  frame->linesize[plane]);
423  }
424  } else if (map) {
425  s->idsp.idct_put(frame->data[plane] + (s->blocks_h - 1 - y) * 8 * frame->linesize[plane] + x * 8,
426  frame->linesize[plane], s->wblocks + x * 64);
427  }
428  }
429  }
430  } else if (s->flags & 2) {
431  for (int y = 0; y < s->blocks_h; y++) {
432  for (int x = 0; x < s->blocks_w; x++) {
433  int shift = plane == 0;
434  int mvpos = (y >> shift) * (s->blocks_w >> shift) + (x >> shift);
435  int orig_mv_x = s->mvectors[mvpos].x;
436  int mv_x = s->mvectors[mvpos].x / (1 + !shift);
437  int mv_y = s->mvectors[mvpos].y / (1 + !shift);
438  int h = s->avctx->coded_height >> !shift;
439  int w = s->avctx->coded_width >> !shift;
440  int map = 0;
441 
442  ret = decode_inter_block(s, gb, quant_matrix, &skip, &map);
443  if (ret < 0)
444  return ret;
445 
446  if (orig_mv_x >= -32) {
447  if (y * 8 + mv_y < 0 || y * 8 + mv_y + 8 > h ||
448  x * 8 + mv_x < 0 || x * 8 + mv_x + 8 > w)
449  return AVERROR_INVALIDDATA;
450 
451  copy_block8(frame->data[plane] + (s->blocks_h - 1 - y) * 8 * frame->linesize[plane] + x * 8,
452  prev->data[plane] + ((s->blocks_h - 1 - y) * 8 - mv_y) * prev->linesize[plane] + (x * 8 + mv_x),
453  frame->linesize[plane], prev->linesize[plane], 8);
454  if (map) {
455  s->idsp.idct(s->block);
456  for (int i = 0; i < 64; i++)
457  s->block[i] = (s->block[i] + 1) & 0xFFFC;
458  s->idsp.add_pixels_clamped(s->block, frame->data[plane] + (s->blocks_h - 1 - y) * 8 * frame->linesize[plane] + x * 8,
459  frame->linesize[plane]);
460  }
461  } else if (map) {
462  s->idsp.idct_put(frame->data[plane] + (s->blocks_h - 1 - y) * 8 * frame->linesize[plane] + x * 8,
463  frame->linesize[plane], s->block);
464  }
465  }
466  }
467  } else if (s->flags & 1) {
468  av_fast_padded_malloc(&s->wblocks, &s->wblocks_size,
469  64 * s->blocks_w * sizeof(*s->wblocks));
470  if (!s->wblocks)
471  return AVERROR(ENOMEM);
472 
473  av_fast_padded_malloc(&s->map, &s->map_size,
474  s->blocks_w * sizeof(*s->map));
475  if (!s->map)
476  return AVERROR(ENOMEM);
477 
478  for (int y = 0; y < s->blocks_h; y++) {
479  ret = decode_inter_blocks(s, gb, quant_matrix, &skip, s->map);
480  if (ret < 0)
481  return ret;
482 
483  for (int x = 0; x < s->blocks_w; x++) {
484  if (!s->map[x])
485  continue;
486  s->idsp.idct_add(frame->data[plane] + (s->blocks_h - 1 - y) * 8 * frame->linesize[plane] + x * 8,
487  frame->linesize[plane], s->wblocks + 64 * x);
488  }
489  }
490  } else {
491  for (int y = 0; y < s->blocks_h; y++) {
492  for (int x = 0; x < s->blocks_w; x++) {
493  int map = 0;
494 
495  ret = decode_inter_block(s, gb, quant_matrix, &skip, &map);
496  if (ret < 0)
497  return ret;
498 
499  if (!map)
500  continue;
501  s->idsp.idct_add(frame->data[plane] + (s->blocks_h - 1 - y) * 8 * frame->linesize[plane] + x * 8,
502  frame->linesize[plane], s->block);
503  }
504  }
505  }
506 
507  align_get_bits(gb);
508  if (get_bits_left(gb) < 0)
509  av_log(s->avctx, AV_LOG_WARNING, "overread\n");
510  if (get_bits_left(gb) > 0)
511  av_log(s->avctx, AV_LOG_WARNING, "underread: %d\n", get_bits_left(gb));
512 
513  return 0;
514 }
515 
516 static void compute_quant_matrix(AGMContext *s, double qscale)
517 {
518  int luma[64], chroma[64];
519  double f = 1.0 - fabs(qscale);
520 
521  if (!s->key_frame && (s->flags & 2)) {
522  if (qscale >= 0.0) {
523  for (int i = 0; i < 64; i++) {
524  luma[i] = FFMAX(1, 16 * f);
525  chroma[i] = FFMAX(1, 16 * f);
526  }
527  } else {
528  for (int i = 0; i < 64; i++) {
529  luma[i] = FFMAX(1, 16 - qscale * 32);
530  chroma[i] = FFMAX(1, 16 - qscale * 32);
531  }
532  }
533  } else {
534  if (qscale >= 0.0) {
535  for (int i = 0; i < 64; i++) {
536  luma[i] = FFMAX(1, ff_mjpeg_std_luminance_quant_tbl [(i & 7) * 8 + (i >> 3)] * f);
537  chroma[i] = FFMAX(1, ff_mjpeg_std_chrominance_quant_tbl[(i & 7) * 8 + (i >> 3)] * f);
538  }
539  } else {
540  for (int i = 0; i < 64; i++) {
541  luma[i] = FFMAX(1, 255.0 - (255 - ff_mjpeg_std_luminance_quant_tbl [(i & 7) * 8 + (i >> 3)]) * f);
542  chroma[i] = FFMAX(1, 255.0 - (255 - ff_mjpeg_std_chrominance_quant_tbl[(i & 7) * 8 + (i >> 3)]) * f);
543  }
544  }
545  }
546 
547  for (int i = 0; i < 64; i++) {
548  int pos = ff_zigzag_direct[i];
549 
550  s->luma_quant_matrix[i] = luma[pos] * ((pos / 8) & 1 ? -1 : 1);
551  s->chroma_quant_matrix[i] = chroma[pos] * ((pos / 8) & 1 ? -1 : 1);
552  }
553 }
554 
556 {
557  uint8_t *dst = frame->data[0] + (avctx->height - 1) * frame->linesize[0];
558  uint8_t r = 0, g = 0, b = 0;
559 
560  if (bytestream2_get_bytes_left(gbyte) < 3 * avctx->width * avctx->height)
561  return AVERROR_INVALIDDATA;
562 
563  for (int y = 0; y < avctx->height; y++) {
564  for (int x = 0; x < avctx->width; x++) {
565  dst[x*3+0] = bytestream2_get_byteu(gbyte) + r;
566  r = dst[x*3+0];
567  dst[x*3+1] = bytestream2_get_byteu(gbyte) + g;
568  g = dst[x*3+1];
569  dst[x*3+2] = bytestream2_get_byteu(gbyte) + b;
570  b = dst[x*3+2];
571  }
572  dst -= frame->linesize[0];
573  }
574 
575  return 0;
576 }
577 
578 av_always_inline static int fill_pixels(uint8_t **y0, uint8_t **y1,
579  uint8_t **u, uint8_t **v,
580  int ylinesize, int ulinesize, int vlinesize,
581  uint8_t *fill,
582  int *nx, int *ny, int *np, int w, int h)
583 {
584  uint8_t *y0dst = *y0;
585  uint8_t *y1dst = *y1;
586  uint8_t *udst = *u;
587  uint8_t *vdst = *v;
588  int x = *nx, y = *ny, pos = *np;
589 
590  if (pos == 0) {
591  y0dst[2*x+0] += fill[0];
592  y0dst[2*x+1] += fill[1];
593  y1dst[2*x+0] += fill[2];
594  y1dst[2*x+1] += fill[3];
595  pos++;
596  } else if (pos == 1) {
597  udst[x] += fill[0];
598  vdst[x] += fill[1];
599  x++;
600  if (x >= w) {
601  x = 0;
602  y++;
603  if (y >= h)
604  return 1;
605  y0dst -= 2*ylinesize;
606  y1dst -= 2*ylinesize;
607  udst -= ulinesize;
608  vdst -= vlinesize;
609  }
610  y0dst[2*x+0] += fill[2];
611  y0dst[2*x+1] += fill[3];
612  pos++;
613  } else if (pos == 2) {
614  y1dst[2*x+0] += fill[0];
615  y1dst[2*x+1] += fill[1];
616  udst[x] += fill[2];
617  vdst[x] += fill[3];
618  x++;
619  if (x >= w) {
620  x = 0;
621  y++;
622  if (y >= h)
623  return 1;
624  y0dst -= 2*ylinesize;
625  y1dst -= 2*ylinesize;
626  udst -= ulinesize;
627  vdst -= vlinesize;
628  }
629  pos = 0;
630  }
631 
632  *y0 = y0dst;
633  *y1 = y1dst;
634  *u = udst;
635  *v = vdst;
636  *np = pos;
637  *nx = x;
638  *ny = y;
639 
640  return 0;
641 }
642 
644 {
645  uint8_t *dst = frame->data[0] + (avctx->height - 1) * frame->linesize[0];
646  int runlen, y = 0, x = 0;
647  uint8_t fill[4];
648  unsigned code;
649 
650  while (bytestream2_get_bytes_left(gbyte) > 0) {
651  code = bytestream2_peek_le32(gbyte);
652  runlen = code & 0xFFFFFF;
653 
654  if (code >> 24 == 0x77) {
655  bytestream2_skip(gbyte, 4);
656 
657  for (int i = 0; i < 4; i++)
658  fill[i] = bytestream2_get_byte(gbyte);
659 
660  while (runlen > 0) {
661  runlen--;
662 
663  for (int i = 0; i < 4; i++) {
664  dst[x] += fill[i];
665  x++;
666  if (x >= frame->width * 3) {
667  x = 0;
668  y++;
669  dst -= frame->linesize[0];
670  if (y >= frame->height)
671  return 0;
672  }
673  }
674  }
675  } else {
676  for (int i = 0; i < 4; i++)
677  fill[i] = bytestream2_get_byte(gbyte);
678 
679  for (int i = 0; i < 4; i++) {
680  dst[x] += fill[i];
681  x++;
682  if (x >= frame->width * 3) {
683  x = 0;
684  y++;
685  dst -= frame->linesize[0];
686  if (y >= frame->height)
687  return 0;
688  }
689  }
690  }
691  }
692 
693  return 0;
694 }
695 
697 {
698  uint8_t *y0dst = frame->data[0] + (avctx->height - 1) * frame->linesize[0];
699  uint8_t *y1dst = y0dst - frame->linesize[0];
700  uint8_t *udst = frame->data[1] + ((avctx->height >> 1) - 1) * frame->linesize[1];
701  uint8_t *vdst = frame->data[2] + ((avctx->height >> 1) - 1) * frame->linesize[2];
702  int runlen, y = 0, x = 0, pos = 0;
703  uint8_t fill[4];
704  unsigned code;
705 
706  while (bytestream2_get_bytes_left(gbyte) > 0) {
707  code = bytestream2_peek_le32(gbyte);
708  runlen = code & 0xFFFFFF;
709 
710  if (code >> 24 == 0x77) {
711  bytestream2_skip(gbyte, 4);
712 
713  for (int i = 0; i < 4; i++)
714  fill[i] = bytestream2_get_byte(gbyte);
715 
716  while (runlen > 0) {
717  runlen--;
718 
719  if (fill_pixels(&y0dst, &y1dst, &udst, &vdst,
720  frame->linesize[0],
721  frame->linesize[1],
722  frame->linesize[2],
723  fill, &x, &y, &pos,
724  avctx->width / 2,
725  avctx->height / 2))
726  return 0;
727  }
728  } else {
729  for (int i = 0; i < 4; i++)
730  fill[i] = bytestream2_get_byte(gbyte);
731 
732  if (fill_pixels(&y0dst, &y1dst, &udst, &vdst,
733  frame->linesize[0],
734  frame->linesize[1],
735  frame->linesize[2],
736  fill, &x, &y, &pos,
737  avctx->width / 2,
738  avctx->height / 2))
739  return 0;
740  }
741  }
742 
743  return 0;
744 }
745 
747 {
748  uint8_t *y0dst = frame->data[0] + (avctx->height - 1) * frame->linesize[0];
749  uint8_t *y1dst = y0dst - frame->linesize[0];
750  uint8_t *udst = frame->data[1] + ((avctx->height >> 1) - 1) * frame->linesize[1];
751  uint8_t *vdst = frame->data[2] + ((avctx->height >> 1) - 1) * frame->linesize[2];
752  uint8_t ly0 = 0, ly1 = 0, ly2 = 0, ly3 = 0, lu = 0, lv = 0;
753 
754  for (int y = 0; y < avctx->height / 2; y++) {
755  for (int x = 0; x < avctx->width / 2; x++) {
756  y0dst[x*2+0] = bytestream2_get_byte(gbyte) + ly0;
757  ly0 = y0dst[x*2+0];
758  y0dst[x*2+1] = bytestream2_get_byte(gbyte) + ly1;
759  ly1 = y0dst[x*2+1];
760  y1dst[x*2+0] = bytestream2_get_byte(gbyte) + ly2;
761  ly2 = y1dst[x*2+0];
762  y1dst[x*2+1] = bytestream2_get_byte(gbyte) + ly3;
763  ly3 = y1dst[x*2+1];
764  udst[x] = bytestream2_get_byte(gbyte) + lu;
765  lu = udst[x];
766  vdst[x] = bytestream2_get_byte(gbyte) + lv;
767  lv = vdst[x];
768  }
769 
770  y0dst -= 2*frame->linesize[0];
771  y1dst -= 2*frame->linesize[0];
772  udst -= frame->linesize[1];
773  vdst -= frame->linesize[2];
774  }
775 
776  return 0;
777 }
778 
780 {
781  AGMContext *s = avctx->priv_data;
782  int ret;
783 
784  compute_quant_matrix(s, (2 * s->compression - 100) / 100.0);
785 
786  s->blocks_w = avctx->coded_width >> 3;
787  s->blocks_h = avctx->coded_height >> 3;
788 
789  ret = decode_intra_plane(s, gb, s->size[0], s->luma_quant_matrix, frame, 0);
790  if (ret < 0)
791  return ret;
792 
793  bytestream2_skip(&s->gbyte, s->size[0]);
794 
795  s->blocks_w = avctx->coded_width >> 4;
796  s->blocks_h = avctx->coded_height >> 4;
797 
798  ret = decode_intra_plane(s, gb, s->size[1], s->chroma_quant_matrix, frame, 2);
799  if (ret < 0)
800  return ret;
801 
802  bytestream2_skip(&s->gbyte, s->size[1]);
803 
804  s->blocks_w = avctx->coded_width >> 4;
805  s->blocks_h = avctx->coded_height >> 4;
806 
807  ret = decode_intra_plane(s, gb, s->size[2], s->chroma_quant_matrix, frame, 1);
808  if (ret < 0)
809  return ret;
810 
811  return 0;
812 }
813 
815 {
816  AGMContext *s = avctx->priv_data;
817  int nb_mvs = ((avctx->coded_height + 15) >> 4) * ((avctx->coded_width + 15) >> 4);
818  int ret, skip = 0, value, map;
819 
820  av_fast_padded_malloc(&s->mvectors, &s->mvectors_size,
821  nb_mvs * sizeof(*s->mvectors));
822  if (!s->mvectors)
823  return AVERROR(ENOMEM);
824 
825  if ((ret = init_get_bits8(gb, s->gbyte.buffer, bytestream2_get_bytes_left(&s->gbyte) -
826  (s->size[0] + s->size[1] + s->size[2]))) < 0)
827  return ret;
828 
829  memset(s->mvectors, 0, sizeof(*s->mvectors) * nb_mvs);
830 
831  for (int i = 0; i < nb_mvs; i++) {
832  ret = read_code(gb, &skip, &value, &map, 1);
833  if (ret < 0)
834  return ret;
835  s->mvectors[i].x = value;
836  i += skip;
837  }
838 
839  for (int i = 0; i < nb_mvs; i++) {
840  ret = read_code(gb, &skip, &value, &map, 1);
841  if (ret < 0)
842  return ret;
843  s->mvectors[i].y = value;
844  i += skip;
845  }
846 
847  if (get_bits_left(gb) <= 0)
848  return AVERROR_INVALIDDATA;
849  skip = (get_bits_count(gb) >> 3) + 1;
850  bytestream2_skip(&s->gbyte, skip);
851 
852  return 0;
853 }
854 
856  AVFrame *frame, AVFrame *prev)
857 {
858  AGMContext *s = avctx->priv_data;
859  int ret;
860 
861  compute_quant_matrix(s, (2 * s->compression - 100) / 100.0);
862 
863  if (s->flags & 2) {
864  ret = decode_motion_vectors(avctx, gb);
865  if (ret < 0)
866  return ret;
867  }
868 
869  s->blocks_w = avctx->coded_width >> 3;
870  s->blocks_h = avctx->coded_height >> 3;
871 
872  ret = decode_inter_plane(s, gb, s->size[0], s->luma_quant_matrix, frame, prev, 0);
873  if (ret < 0)
874  return ret;
875 
876  bytestream2_skip(&s->gbyte, s->size[0]);
877 
878  s->blocks_w = avctx->coded_width >> 4;
879  s->blocks_h = avctx->coded_height >> 4;
880 
881  ret = decode_inter_plane(s, gb, s->size[1], s->chroma_quant_matrix, frame, prev, 2);
882  if (ret < 0)
883  return ret;
884 
885  bytestream2_skip(&s->gbyte, s->size[1]);
886 
887  s->blocks_w = avctx->coded_width >> 4;
888  s->blocks_h = avctx->coded_height >> 4;
889 
890  ret = decode_inter_plane(s, gb, s->size[2], s->chroma_quant_matrix, frame, prev, 1);
891  if (ret < 0)
892  return ret;
893 
894  return 0;
895 }
896 
897 typedef struct Node {
898  int parent;
899  int child[2];
900 } Node;
901 
902 static void get_tree_codes(uint32_t *codes, Node *nodes, int idx, uint32_t pfx, int bitpos)
903 {
904  if (idx < 256 && idx >= 0) {
905  codes[idx] = pfx;
906  } else if (idx >= 0) {
907  get_tree_codes(codes, nodes, nodes[idx].child[0], pfx + (0 << bitpos), bitpos + 1);
908  get_tree_codes(codes, nodes, nodes[idx].child[1], pfx + (1U << bitpos), bitpos + 1);
909  }
910 }
911 
912 static int make_new_tree(const uint8_t *bitlens, uint32_t *codes)
913 {
914  int zlcount = 0, curlen, idx, nindex, last, llast;
915  int blcounts[32] = { 0 };
916  int syms[8192];
917  Node nodes[512];
918  int node_idx[1024];
919  int old_idx[512];
920 
921  for (int i = 0; i < 256; i++) {
922  int bitlen = bitlens[i];
923  int blcount = blcounts[bitlen];
924 
925  zlcount += bitlen < 1;
926  syms[(bitlen << 8) + blcount] = i;
927  blcounts[bitlen]++;
928  }
929 
930  for (int i = 0; i < 512; i++) {
931  nodes[i].child[0] = -1;
932  nodes[i].child[1] = -1;
933  }
934 
935  for (int i = 0; i < 256; i++) {
936  node_idx[i] = 257 + i;
937  }
938 
939  curlen = 1;
940  node_idx[512] = 256;
941  last = 255;
942  nindex = 1;
943 
944  for (curlen = 1; curlen < 32; curlen++) {
945  if (blcounts[curlen] > 0) {
946  int max_zlcount = zlcount + blcounts[curlen];
947 
948  for (int i = 0; zlcount < 256 && zlcount < max_zlcount; zlcount++, i++) {
949  int p = node_idx[nindex - 1 + 512];
950  int ch = syms[256 * curlen + i];
951 
952  if (nindex <= 0)
953  return AVERROR_INVALIDDATA;
954 
955  if (nodes[p].child[0] == -1) {
956  nodes[p].child[0] = ch;
957  } else {
958  nodes[p].child[1] = ch;
959  nindex--;
960  }
961  nodes[ch].parent = p;
962  }
963  }
964  llast = last - 1;
965  idx = 0;
966  while (nindex > 0) {
967  int p, ch;
968 
969  last = llast - idx;
970  p = node_idx[nindex - 1 + 512];
971  ch = node_idx[last];
972  if (nodes[p].child[0] == -1) {
973  nodes[p].child[0] = ch;
974  } else {
975  nodes[p].child[1] = ch;
976  nindex--;
977  }
978  old_idx[idx] = ch;
979  nodes[ch].parent = p;
980  if (idx == llast)
981  goto next;
982  idx++;
983  if (nindex <= 0) {
984  for (int i = 0; i < idx; i++)
985  node_idx[512 + i] = old_idx[i];
986  }
987  }
988  nindex = idx;
989  }
990 
991 next:
992 
993  get_tree_codes(codes, nodes, 256, 0, 0);
994  return 0;
995 }
996 
997 static int build_huff(const uint8_t *bitlen, VLC *vlc)
998 {
999  uint32_t new_codes[256];
1000  uint8_t bits[256];
1001  uint8_t symbols[256];
1002  uint32_t codes[256];
1003  int nb_codes = 0;
1004 
1005  int ret = make_new_tree(bitlen, new_codes);
1006  if (ret < 0)
1007  return ret;
1008 
1009  for (int i = 0; i < 256; i++) {
1010  if (bitlen[i]) {
1011  bits[nb_codes] = bitlen[i];
1012  codes[nb_codes] = new_codes[i];
1013  symbols[nb_codes] = i;
1014  nb_codes++;
1015  }
1016  }
1017 
1018  ff_free_vlc(vlc);
1019  return ff_init_vlc_sparse(vlc, 13, nb_codes,
1020  bits, 1, 1,
1021  codes, 4, 4,
1022  symbols, 1, 1,
1023  INIT_VLC_LE);
1024 }
1025 
1026 static int decode_huffman2(AVCodecContext *avctx, int header, int size)
1027 {
1028  AGMContext *s = avctx->priv_data;
1029  GetBitContext *gb = &s->gb;
1030  uint8_t lens[256];
1031  int ret, x, len;
1032 
1033  if ((ret = init_get_bits8(gb, s->gbyte.buffer,
1034  bytestream2_get_bytes_left(&s->gbyte))) < 0)
1035  return ret;
1036 
1037  s->output_size = get_bits_long(gb, 32);
1038 
1039  if (s->output_size > avctx->width * avctx->height * 9LL + 10000)
1040  return AVERROR_INVALIDDATA;
1041 
1042  av_fast_padded_malloc(&s->output, &s->padded_output_size, s->output_size);
1043  if (!s->output)
1044  return AVERROR(ENOMEM);
1045 
1046  x = get_bits(gb, 1);
1047  len = 4 + get_bits(gb, 1);
1048  if (x) {
1049  int cb[8] = { 0 };
1050  int count = get_bits(gb, 3) + 1;
1051 
1052  for (int i = 0; i < count; i++)
1053  cb[i] = get_bits(gb, len);
1054 
1055  for (int i = 0; i < 256; i++) {
1056  int idx = get_bits(gb, 3);
1057  lens[i] = cb[idx];
1058  }
1059  } else {
1060  for (int i = 0; i < 256; i++)
1061  lens[i] = get_bits(gb, len);
1062  }
1063 
1064  if ((ret = build_huff(lens, &s->vlc)) < 0)
1065  return ret;
1066 
1067  x = 0;
1068  while (get_bits_left(gb) > 0 && x < s->output_size) {
1069  int val = get_vlc2(gb, s->vlc.table, s->vlc.bits, 3);
1070  if (val < 0)
1071  return AVERROR_INVALIDDATA;
1072  s->output[x++] = val;
1073  }
1074 
1075  return 0;
1076 }
1077 
1079  int *got_frame, AVPacket *avpkt)
1080 {
1081  AGMContext *s = avctx->priv_data;
1082  GetBitContext *gb = &s->gb;
1083  GetByteContext *gbyte = &s->gbyte;
1084  int w, h, width, height, header;
1085  unsigned compressed_size;
1086  long skip;
1087  int ret;
1088 
1089  if (!avpkt->size)
1090  return 0;
1091 
1092  bytestream2_init(gbyte, avpkt->data, avpkt->size);
1093 
1094  header = bytestream2_get_le32(gbyte);
1095  s->fflags = bytestream2_get_le32(gbyte);
1096  s->bitstream_size = s->fflags & 0x1FFFFFFF;
1097  s->fflags >>= 29;
1098  av_log(avctx, AV_LOG_DEBUG, "fflags: %X\n", s->fflags);
1099  if (avpkt->size < s->bitstream_size + 8)
1100  return AVERROR_INVALIDDATA;
1101 
1102  s->key_frame = (avpkt->flags & AV_PKT_FLAG_KEY);
1103  frame->key_frame = s->key_frame;
1104  frame->pict_type = s->key_frame ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
1105 
1106  if (!s->key_frame) {
1107  if (!s->prev_frame->data[0]) {
1108  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1109  return AVERROR_INVALIDDATA;
1110  }
1111  }
1112 
1113  if (header) {
1114  if (avctx->codec_tag == MKTAG('A', 'G', 'M', '0') ||
1115  avctx->codec_tag == MKTAG('A', 'G', 'M', '1'))
1116  return AVERROR_PATCHWELCOME;
1117  else
1118  ret = decode_huffman2(avctx, header, (avpkt->size - s->bitstream_size) - 8);
1119  if (ret < 0)
1120  return ret;
1121  bytestream2_init(gbyte, s->output, s->output_size);
1122  } else if (!s->dct) {
1123  bytestream2_skip(gbyte, 4);
1124  }
1125 
1126  if (s->dct) {
1127  s->flags = 0;
1128  w = bytestream2_get_le32(gbyte);
1129  h = bytestream2_get_le32(gbyte);
1130  if (w == INT32_MIN || h == INT32_MIN)
1131  return AVERROR_INVALIDDATA;
1132  if (w < 0) {
1133  w = -w;
1134  s->flags |= 2;
1135  }
1136  if (h < 0) {
1137  h = -h;
1138  s->flags |= 1;
1139  }
1140 
1141  width = avctx->width;
1142  height = avctx->height;
1143  if (w < width || h < height || w & 7 || h & 7)
1144  return AVERROR_INVALIDDATA;
1145 
1146  ret = ff_set_dimensions(avctx, w, h);
1147  if (ret < 0)
1148  return ret;
1149  avctx->width = width;
1150  avctx->height = height;
1151 
1152  s->compression = bytestream2_get_le32(gbyte);
1153  if (s->compression < 0 || s->compression > 100)
1154  return AVERROR_INVALIDDATA;
1155 
1156  for (int i = 0; i < 3; i++)
1157  s->size[i] = bytestream2_get_le32(gbyte);
1158  if (header) {
1159  compressed_size = s->output_size;
1160  skip = 8LL;
1161  } else {
1162  compressed_size = avpkt->size;
1163  skip = 32LL;
1164  }
1165  if (s->size[0] < 0 || s->size[1] < 0 || s->size[2] < 0 ||
1166  skip + s->size[0] + s->size[1] + s->size[2] > compressed_size) {
1167  return AVERROR_INVALIDDATA;
1168  }
1169  }
1170 
1171  if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
1172  return ret;
1173 
1174  if (frame->key_frame) {
1175  if (!s->dct && !s->rgb)
1176  ret = decode_raw_intra(avctx, gbyte, frame);
1177  else if (!s->dct && s->rgb)
1178  ret = decode_raw_intra_rgb(avctx, gbyte, frame);
1179  else
1180  ret = decode_intra(avctx, gb, frame);
1181  } else {
1182  if (s->prev_frame-> width != frame->width ||
1183  s->prev_frame->height != frame->height)
1184  return AVERROR_INVALIDDATA;
1185 
1186  if (!(s->flags & 2)) {
1187  ret = av_frame_copy(frame, s->prev_frame);
1188  if (ret < 0)
1189  return ret;
1190  }
1191 
1192  if (s->dct) {
1193  ret = decode_inter(avctx, gb, frame, s->prev_frame);
1194  } else if (!s->dct && !s->rgb) {
1195  ret = decode_runlen(avctx, gbyte, frame);
1196  } else {
1197  ret = decode_runlen_rgb(avctx, gbyte, frame);
1198  }
1199  }
1200  if (ret < 0)
1201  return ret;
1202 
1203  av_frame_unref(s->prev_frame);
1204  if ((ret = av_frame_ref(s->prev_frame, frame)) < 0)
1205  return ret;
1206 
1207  frame->crop_top = avctx->coded_height - avctx->height;
1208  frame->crop_left = avctx->coded_width - avctx->width;
1209 
1210  *got_frame = 1;
1211 
1212  return avpkt->size;
1213 }
1214 
1216 {
1217  AGMContext *s = avctx->priv_data;
1218 
1219  s->rgb = avctx->codec_tag == MKTAG('A', 'G', 'M', '4');
1220  avctx->pix_fmt = s->rgb ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_YUV420P;
1221  s->avctx = avctx;
1222  s->plus = avctx->codec_tag == MKTAG('A', 'G', 'M', '3') ||
1223  avctx->codec_tag == MKTAG('A', 'G', 'M', '7');
1224 
1225  s->dct = avctx->codec_tag != MKTAG('A', 'G', 'M', '4') &&
1226  avctx->codec_tag != MKTAG('A', 'G', 'M', '5');
1227 
1228  if (!s->rgb && !s->dct) {
1229  if ((avctx->width & 1) || (avctx->height & 1))
1230  return AVERROR_INVALIDDATA;
1231  }
1232 
1233  avctx->idct_algo = FF_IDCT_SIMPLE;
1234  ff_idctdsp_init(&s->idsp, avctx);
1235  ff_permute_scantable(s->permutated_scantable, ff_zigzag_direct,
1236  s->idsp.idct_permutation);
1237 
1238  s->prev_frame = av_frame_alloc();
1239  if (!s->prev_frame)
1240  return AVERROR(ENOMEM);
1241 
1242  return 0;
1243 }
1244 
1245 static void decode_flush(AVCodecContext *avctx)
1246 {
1247  AGMContext *s = avctx->priv_data;
1248 
1249  av_frame_unref(s->prev_frame);
1250 }
1251 
1253 {
1254  AGMContext *s = avctx->priv_data;
1255 
1256  ff_free_vlc(&s->vlc);
1257  av_frame_free(&s->prev_frame);
1258  av_freep(&s->mvectors);
1259  s->mvectors_size = 0;
1260  av_freep(&s->wblocks);
1261  s->wblocks_size = 0;
1262  av_freep(&s->output);
1263  s->padded_output_size = 0;
1264  av_freep(&s->map);
1265  s->map_size = 0;
1266 
1267  return 0;
1268 }
1269 
1271  .p.name = "agm",
1272  CODEC_LONG_NAME("Amuse Graphics Movie"),
1273  .p.type = AVMEDIA_TYPE_VIDEO,
1274  .p.id = AV_CODEC_ID_AGM,
1275  .p.capabilities = AV_CODEC_CAP_DR1,
1276  .priv_data_size = sizeof(AGMContext),
1277  .init = decode_init,
1278  .close = decode_close,
1280  .flush = decode_flush,
1281  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
1283 };
fill_pixels
static av_always_inline int fill_pixels(uint8_t **y0, uint8_t **y1, uint8_t **u, uint8_t **v, int ylinesize, int ulinesize, int vlinesize, uint8_t *fill, int *nx, int *ny, int *np, int w, int h)
Definition: agm.c:578
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
level
uint8_t level
Definition: svq3.c:204
AGMContext::map_size
unsigned map_size
Definition: agm.c:81
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:839
r
const char * r
Definition: vf_curves.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AGMContext::wblocks_size
unsigned wblocks_size
Definition: agm.c:78
mem_internal.h
MotionVector::y
int16_t y
Definition: agm.c:39
Node
Definition: agm.c:897
cb
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:239
GetByteContext
Definition: bytestream.h:33
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:262
AGMContext::blocks_h
int blocks_h
Definition: agm.c:52
decode_init
static av_cold int decode_init(AVCodecContext *avctx)
Definition: agm.c:1215
AGMContext::compression
int compression
Definition: agm.c:50
decode_inter
static int decode_inter(AVCodecContext *avctx, GetBitContext *gb, AVFrame *frame, AVFrame *prev)
Definition: agm.c:855
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:546
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:116
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
w
uint8_t w
Definition: llviddspenc.c:38
AVPacket::data
uint8_t * data
Definition: packet.h:374
b
#define b
Definition: input.c:41
chroma
static av_always_inline void chroma(WaveformContext *s, AVFrame *in, AVFrame *out, int component, int intensity, int offset_y, int offset_x, int column, int mirror, int jobnr, int nb_jobs)
Definition: vf_waveform.c:1635
AGMContext::padded_output_size
unsigned padded_output_size
Definition: agm.c:61
FFCodec
Definition: codec_internal.h:119
AGMContext::chroma_quant_matrix
int chroma_quant_matrix[64]
Definition: agm.c:72
AGMContext::block
int16_t block[64]
Definition: agm.c:75
copy_block8
static void copy_block8(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:47
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
max
#define max(a, b)
Definition: cuda_runtime.h:33
AGMContext::luma_quant_matrix
int luma_quant_matrix[64]
Definition: agm.c:71
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
decode_intra_blocks
static int decode_intra_blocks(AGMContext *s, GetBitContext *gb, const int *quant_matrix, int *skip, int *dc_level)
Definition: agm.c:178
INIT_VLC_LE
#define INIT_VLC_LE
Definition: vlc.h:99
AGMContext::vlc
VLC vlc
Definition: agm.c:67
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
decode_inter_block
static int decode_inter_block(AGMContext *s, GetBitContext *gb, const int *quant_matrix, int *skip, int *map)
Definition: agm.c:344
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:429
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:228
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:346
decode_flush
static void decode_flush(AVCodecContext *avctx)
Definition: agm.c:1245
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
ff_permute_scantable
av_cold void ff_permute_scantable(uint8_t dst[64], const uint8_t src[64], const uint8_t permutation[64])
Definition: idctdsp.c:30
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
AGMContext
Definition: agm.c:42
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:123
FF_IDCT_SIMPLE
#define FF_IDCT_SIMPLE
Definition: avcodec.h:1423
MotionVector::x
int16_t x
Definition: agm.c:39
AGMContext::prev_frame
AVFrame * prev_frame
Definition: agm.c:69
decode_huffman2
static int decode_huffman2(AVCodecContext *avctx, int header, int size)
Definition: agm.c:1026
GetBitContext
Definition: get_bits.h:61
val
static double val(void *priv, double ch)
Definition: aeval.c:77
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:586
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:104
read_code
static int read_code(GetBitContext *gb, int *oskip, int *level, int *map, int mode)
Definition: agm.c:86
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:667
AGMContext::bitstream_size
int bitstream_size
Definition: agm.c:49
width
#define width
AGMContext::idsp
IDCTDSPContext idsp
Definition: agm.c:83
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:298
s
#define s(width, name)
Definition: cbs_vp9.c:256
AGMContext::output
uint8_t * output
Definition: agm.c:60
g
const char * g
Definition: vf_curves.c:127
AGMContext::avctx
AVCodecContext * avctx
Definition: agm.c:44
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:376
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:363
bits
uint8_t bits
Definition: vp3data.h:128
Node::parent
int parent
Definition: agm.c:898
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
decode.h
get_bits.h
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
AGMContext::flags
unsigned flags
Definition: agm.c:57
decode_close
static av_cold int decode_close(AVCodecContext *avctx)
Definition: agm.c:1252
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:264
decode_inter_plane
static int decode_inter_plane(AGMContext *s, GetBitContext *gb, int size, const int *quant_matrix, AVFrame *frame, AVFrame *prev, int plane)
Definition: agm.c:374
if
if(ret)
Definition: filter_design.txt:179
ff_agm_decoder
const FFCodec ff_agm_decoder
Definition: agm.c:1270
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
decode_frame
static int decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: agm.c:1078
decode_runlen_rgb
static int decode_runlen_rgb(AVCodecContext *avctx, GetByteContext *gbyte, AVFrame *frame)
Definition: agm.c:643
AGMContext::gb
GetBitContext gb
Definition: agm.c:45
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
AGMContext::mvectors_size
unsigned mvectors_size
Definition: agm.c:65
decode_motion_vectors
static int decode_motion_vectors(AVCodecContext *avctx, GetBitContext *gb)
Definition: agm.c:814
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:787
jpegquanttables.h
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
f
f
Definition: af_crystalizer.c:122
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1450
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
decode_intra_block
static int decode_intra_block(AGMContext *s, GetBitContext *gb, const int *quant_matrix, int *skip, int *dc_level)
Definition: agm.c:255
AVPacket::size
int size
Definition: packet.h:375
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:353
codec_internal.h
shift
static int shift(int a, int b)
Definition: bonk.c:253
av_frame_copy
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:771
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
Node::child
int child[2]
Definition: agm.c:899
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: codec_internal.h:60
size
int size
Definition: twinvq_data.h:10344
decode_runlen
static int decode_runlen(AVCodecContext *avctx, GetByteContext *gbyte, AVFrame *frame)
Definition: agm.c:696
AGMContext::plus
int plus
Definition: agm.c:54
header
static const uint8_t header[24]
Definition: sdr2.c:67
height
#define height
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:380
MotionVector
Definition: agm.c:38
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem.h:116
AGMContext::fflags
unsigned fflags
Definition: agm.c:58
AGMContext::output_size
unsigned output_size
Definition: agm.c:62
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
copy_block.h
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:446
ff_init_vlc_sparse
int ff_init_vlc_sparse(VLC *vlc, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
Definition: vlc.c:272
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:49
ff_mjpeg_std_chrominance_quant_tbl
const uint8_t ff_mjpeg_std_chrominance_quant_tbl[64]
Definition: jpegquanttables.c:45
av_always_inline
#define av_always_inline
Definition: attributes.h:49
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:487
AGMContext::rgb
int rgb
Definition: agm.c:56
AVCodecContext::idct_algo
int idct_algo
IDCT algorithm, see FF_IDCT_* below.
Definition: avcodec.h:1420
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:211
len
int len
Definition: vorbis_enc_data.h:426
AVCodecContext::height
int height
Definition: avcodec.h:571
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:608
ff_free_vlc
void ff_free_vlc(VLC *vlc)
Definition: vlc.c:375
idctdsp.h
avcodec.h
AGMContext::gbyte
GetByteContext gbyte
Definition: agm.c:46
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
AGMContext::size
int size[3]
Definition: agm.c:53
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AGMContext::dct
int dct
Definition: agm.c:55
AGMContext::map
int * map
Definition: agm.c:80
decode_intra_plane
static int decode_intra_plane(AGMContext *s, GetBitContext *gb, int size, const int *quant_matrix, AVFrame *frame, int plane)
Definition: agm.c:295
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:683
pos
unsigned int pos
Definition: spdifenc.c:412
IDCTDSPContext
Definition: idctdsp.h:44
AV_CODEC_ID_AGM
@ AV_CODEC_ID_AGM
Definition: codec_id.h:297
U
#define U(x)
Definition: vpx_arith.h:37
ff_mjpeg_std_luminance_quant_tbl
const uint8_t ff_mjpeg_std_luminance_quant_tbl[64]
Definition: jpegquanttables.c:35
AVCodecContext
main external API structure.
Definition: avcodec.h:398
decode_inter_blocks
static int decode_inter_blocks(AGMContext *s, GetBitContext *gb, const int *quant_matrix, int *skip, int *map)
Definition: agm.c:219
mode
mode
Definition: ebur128.h:83
VLC
Definition: vlc.h:31
decode_raw_intra_rgb
static int decode_raw_intra_rgb(AVCodecContext *avctx, GetByteContext *gbyte, AVFrame *frame)
Definition: agm.c:555
AGMContext::blocks_w
int blocks_w
Definition: agm.c:51
build_huff
static int build_huff(const uint8_t *bitlen, VLC *vlc)
Definition: agm.c:997
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:586
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AGMContext::key_frame
int key_frame
Definition: agm.c:48
map
const VDPAUPixFmtMap * map
Definition: hwcontext_vdpau.c:71
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:423
AGMContext::wblocks
int16_t * wblocks
Definition: agm.c:77
AVPacket
This structure stores compressed data.
Definition: packet.h:351
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:425
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:571
AGMContext::permutated_scantable
uint8_t permutated_scantable[64]
Definition: agm.c:74
decode_raw_intra
static int decode_raw_intra(AVCodecContext *avctx, GetByteContext *gbyte, AVFrame *frame)
Definition: agm.c:746
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:370
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2038
AGMContext::mvectors
MotionVector * mvectors
Definition: agm.c:64
make_new_tree
static int make_new_tree(const uint8_t *bitlens, uint32_t *codes)
Definition: agm.c:912
get_tree_codes
static void get_tree_codes(uint32_t *codes, Node *nodes, int idx, uint32_t pfx, int bitpos)
Definition: agm.c:902
decode_intra
static int decode_intra(AVCodecContext *avctx, GetBitContext *gb, AVFrame *frame)
Definition: agm.c:779
compute_quant_matrix
static void compute_quant_matrix(AGMContext *s, double qscale)
Definition: agm.c:516