FFmpeg
agm.c
Go to the documentation of this file.
1 /*
2  * Amuse Graphics Movie decoder
3  *
4  * Copyright (c) 2018 Paul B Mahol
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 
27 #define BITSTREAM_READER_LE
28 
29 #include "avcodec.h"
30 #include "bytestream.h"
31 #include "copy_block.h"
32 #include "get_bits.h"
33 #include "idctdsp.h"
34 #include "internal.h"
35 
36 static const uint8_t unscaled_luma[64] = {
37  16, 11, 10, 16, 24, 40, 51, 61, 12, 12, 14, 19,
38  26, 58, 60, 55, 14, 13, 16, 24, 40, 57, 69, 56,
39  14, 17, 22, 29, 51, 87, 80, 62, 18, 22, 37, 56,
40  68,109,103, 77, 24, 35, 55, 64, 81,104,113, 92,
41  49, 64, 78, 87,103,121,120,101, 72, 92, 95, 98,
42  112,100,103,99
43 };
44 
45 static const uint8_t unscaled_chroma[64] = {
46  17, 18, 24, 47, 99, 99, 99, 99, 18, 21, 26, 66,
47  99, 99, 99, 99, 24, 26, 56, 99, 99, 99, 99, 99,
48  47, 66, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
49  99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
50  99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
51  99, 99, 99, 99
52 };
53 
54 typedef struct MotionVector {
55  int16_t x, y;
56 } MotionVector;
57 
58 typedef struct AGMContext {
59  const AVClass *class;
63 
64  int key_frame;
67  int blocks_w;
68  int blocks_h;
69  int size[3];
70  int plus;
71  int dct;
72  int rgb;
73  unsigned flags;
74  unsigned fflags;
75 
78  unsigned output_size;
79 
81  unsigned mvectors_size;
82 
84 
86 
89 
91  DECLARE_ALIGNED(32, int16_t, block)[64];
92 
93  int16_t *wblocks;
94  unsigned wblocks_size;
95 
96  int *map;
97  unsigned map_size;
98 
100 } AGMContext;
101 
102 static int read_code(GetBitContext *gb, int *oskip, int *level, int *map, int mode)
103 {
104  int len = 0, skip = 0, max;
105 
106  if (get_bits_left(gb) < 2)
107  return AVERROR_INVALIDDATA;
108 
109  if (show_bits(gb, 2)) {
110  switch (show_bits(gb, 4)) {
111  case 1:
112  case 9:
113  len = 1;
114  skip = 3;
115  break;
116  case 2:
117  len = 3;
118  skip = 4;
119  break;
120  case 3:
121  len = 7;
122  skip = 4;
123  break;
124  case 5:
125  case 13:
126  len = 2;
127  skip = 3;
128  break;
129  case 6:
130  len = 4;
131  skip = 4;
132  break;
133  case 7:
134  len = 8;
135  skip = 4;
136  break;
137  case 10:
138  len = 5;
139  skip = 4;
140  break;
141  case 11:
142  len = 9;
143  skip = 4;
144  break;
145  case 14:
146  len = 6;
147  skip = 4;
148  break;
149  case 15:
150  len = ((show_bits(gb, 5) & 0x10) | 0xA0) >> 4;
151  skip = 5;
152  break;
153  default:
154  return AVERROR_INVALIDDATA;
155  }
156 
157  skip_bits(gb, skip);
158  *level = get_bits(gb, len);
159  *map = 1;
160  *oskip = 0;
161  max = 1 << (len - 1);
162  if (*level < max)
163  *level = -(max + *level);
164  } else if (show_bits(gb, 3) & 4) {
165  skip_bits(gb, 3);
166  if (mode == 1) {
167  if (show_bits(gb, 4)) {
168  if (show_bits(gb, 4) == 1) {
169  skip_bits(gb, 4);
170  *oskip = get_bits(gb, 16);
171  } else {
172  *oskip = get_bits(gb, 4);
173  }
174  } else {
175  skip_bits(gb, 4);
176  *oskip = get_bits(gb, 10);
177  }
178  } else if (mode == 0) {
179  *oskip = get_bits(gb, 10);
180  }
181  *level = 0;
182  } else {
183  skip_bits(gb, 3);
184  if (mode == 0)
185  *oskip = get_bits(gb, 4);
186  else if (mode == 1)
187  *oskip = 0;
188  *level = 0;
189  }
190 
191  return 0;
192 }
193 
195  const int *quant_matrix, int *skip, int *dc_level)
196 {
197  const uint8_t *scantable = s->scantable.permutated;
198  int level, ret, map = 0;
199 
200  memset(s->wblocks, 0, s->wblocks_size);
201 
202  for (int i = 0; i < 64; i++) {
203  int16_t *block = s->wblocks + scantable[i];
204 
205  for (int j = 0; j < s->blocks_w;) {
206  if (*skip > 0) {
207  int rskip;
208 
209  rskip = FFMIN(*skip, s->blocks_w - j);
210  j += rskip;
211  if (i == 0) {
212  for (int k = 0; k < rskip; k++)
213  block[64 * k] = *dc_level * quant_matrix[0];
214  }
215  block += rskip * 64;
216  *skip -= rskip;
217  } else {
218  ret = read_code(gb, skip, &level, &map, s->flags & 1);
219  if (ret < 0)
220  return ret;
221 
222  if (i == 0)
223  *dc_level += level;
224 
225  block[0] = (i == 0 ? *dc_level : level) * quant_matrix[i];
226  block += 64;
227  j++;
228  }
229  }
230  }
231 
232  return 0;
233 }
234 
236  const int *quant_matrix, int *skip,
237  int *map)
238 {
239  const uint8_t *scantable = s->scantable.permutated;
240  int level, ret;
241 
242  memset(s->wblocks, 0, s->wblocks_size);
243  memset(s->map, 0, s->map_size);
244 
245  for (int i = 0; i < 64; i++) {
246  int16_t *block = s->wblocks + scantable[i];
247 
248  for (int j = 0; j < s->blocks_w;) {
249  if (*skip > 0) {
250  int rskip;
251 
252  rskip = FFMIN(*skip, s->blocks_w - j);
253  j += rskip;
254  block += rskip * 64;
255  *skip -= rskip;
256  } else {
257  ret = read_code(gb, skip, &level, &map[j], s->flags & 1);
258  if (ret < 0)
259  return ret;
260 
261  block[0] = level * quant_matrix[i];
262  block += 64;
263  j++;
264  }
265  }
266  }
267 
268  return 0;
269 }
270 
272  const int *quant_matrix, int *skip, int *dc_level)
273 {
274  const uint8_t *scantable = s->scantable.permutated;
275  const int offset = s->plus ? 0 : 1024;
276  int16_t *block = s->block;
277  int level, ret, map = 0;
278 
279  memset(block, 0, sizeof(s->block));
280 
281  if (*skip > 0) {
282  (*skip)--;
283  } else {
284  ret = read_code(gb, skip, &level, &map, s->flags & 1);
285  if (ret < 0)
286  return ret;
287  *dc_level += level;
288  }
289  block[scantable[0]] = offset + *dc_level * quant_matrix[0];
290 
291  for (int i = 1; i < 64;) {
292  if (*skip > 0) {
293  int rskip;
294 
295  rskip = FFMIN(*skip, 64 - i);
296  i += rskip;
297  *skip -= rskip;
298  } else {
299  ret = read_code(gb, skip, &level, &map, s->flags & 1);
300  if (ret < 0)
301  return ret;
302 
303  block[scantable[i]] = level * quant_matrix[i];
304  i++;
305  }
306  }
307 
308  return 0;
309 }
310 
312  const int *quant_matrix, AVFrame *frame,
313  int plane)
314 {
315  int ret, skip = 0, dc_level = 0;
316  const int offset = s->plus ? 0 : 1024;
317 
318  if ((ret = init_get_bits8(gb, s->gbyte.buffer, size)) < 0)
319  return ret;
320 
321  if (s->flags & 1) {
322  av_fast_padded_malloc(&s->wblocks, &s->wblocks_size,
323  64 * s->blocks_w * sizeof(*s->wblocks));
324  if (!s->wblocks)
325  return AVERROR(ENOMEM);
326 
327  for (int y = 0; y < s->blocks_h; y++) {
328  ret = decode_intra_blocks(s, gb, quant_matrix, &skip, &dc_level);
329  if (ret < 0)
330  return ret;
331 
332  for (int x = 0; x < s->blocks_w; x++) {
333  s->wblocks[64 * x] += offset;
334  s->idsp.idct_put(frame->data[plane] + (s->blocks_h - 1 - y) * 8 * frame->linesize[plane] + x * 8,
335  frame->linesize[plane], s->wblocks + 64 * x);
336  }
337  }
338  } else {
339  for (int y = 0; y < s->blocks_h; y++) {
340  for (int x = 0; x < s->blocks_w; x++) {
341  ret = decode_intra_block(s, gb, quant_matrix, &skip, &dc_level);
342  if (ret < 0)
343  return ret;
344 
345  s->idsp.idct_put(frame->data[plane] + (s->blocks_h - 1 - y) * 8 * frame->linesize[plane] + x * 8,
346  frame->linesize[plane], s->block);
347  }
348  }
349  }
350 
351  align_get_bits(gb);
352  if (get_bits_left(gb) < 0)
353  av_log(s->avctx, AV_LOG_WARNING, "overread\n");
354  if (get_bits_left(gb) > 0)
355  av_log(s->avctx, AV_LOG_WARNING, "underread: %d\n", get_bits_left(gb));
356 
357  return 0;
358 }
359 
361  const int *quant_matrix, int *skip,
362  int *map)
363 {
364  const uint8_t *scantable = s->scantable.permutated;
365  int16_t *block = s->block;
366  int level, ret;
367 
368  memset(block, 0, sizeof(s->block));
369 
370  for (int i = 0; i < 64;) {
371  if (*skip > 0) {
372  int rskip;
373 
374  rskip = FFMIN(*skip, 64 - i);
375  i += rskip;
376  *skip -= rskip;
377  } else {
378  ret = read_code(gb, skip, &level, map, s->flags & 1);
379  if (ret < 0)
380  return ret;
381 
382  block[scantable[i]] = level * quant_matrix[i];
383  i++;
384  }
385  }
386 
387  return 0;
388 }
389 
391  const int *quant_matrix, AVFrame *frame,
392  AVFrame *prev, int plane)
393 {
394  int ret, skip = 0;
395 
396  if ((ret = init_get_bits8(gb, s->gbyte.buffer, size)) < 0)
397  return ret;
398 
399  if (s->flags == 3) {
400  av_fast_padded_malloc(&s->wblocks, &s->wblocks_size,
401  64 * s->blocks_w * sizeof(*s->wblocks));
402  if (!s->wblocks)
403  return AVERROR(ENOMEM);
404 
405  av_fast_padded_malloc(&s->map, &s->map_size,
406  s->blocks_w * sizeof(*s->map));
407  if (!s->map)
408  return AVERROR(ENOMEM);
409 
410  for (int y = 0; y < s->blocks_h; y++) {
411  ret = decode_inter_blocks(s, gb, quant_matrix, &skip, s->map);
412  if (ret < 0)
413  return ret;
414 
415  for (int x = 0; x < s->blocks_w; x++) {
416  int shift = plane == 0;
417  int mvpos = (y >> shift) * (s->blocks_w >> shift) + (x >> shift);
418  int orig_mv_x = s->mvectors[mvpos].x;
419  int mv_x = s->mvectors[mvpos].x / (1 + !shift);
420  int mv_y = s->mvectors[mvpos].y / (1 + !shift);
421  int h = s->avctx->coded_height >> !shift;
422  int w = s->avctx->coded_width >> !shift;
423  int map = s->map[x];
424 
425  if (orig_mv_x >= -32) {
426  if (y * 8 + mv_y < 0 || y * 8 + mv_y + 8 > h ||
427  x * 8 + mv_x < 0 || x * 8 + mv_x + 8 > w)
428  return AVERROR_INVALIDDATA;
429 
430  copy_block8(frame->data[plane] + (s->blocks_h - 1 - y) * 8 * frame->linesize[plane] + x * 8,
431  prev->data[plane] + ((s->blocks_h - 1 - y) * 8 - mv_y) * prev->linesize[plane] + (x * 8 + mv_x),
432  frame->linesize[plane], prev->linesize[plane], 8);
433  if (map) {
434  s->idsp.idct(s->wblocks + x * 64);
435  for (int i = 0; i < 64; i++)
436  s->wblocks[i + x * 64] = (s->wblocks[i + x * 64] + 1) & 0xFFFC;
437  s->idsp.add_pixels_clamped(&s->wblocks[x*64], frame->data[plane] + (s->blocks_h - 1 - y) * 8 * frame->linesize[plane] + x * 8,
438  frame->linesize[plane]);
439  }
440  } else if (map) {
441  s->idsp.idct_put(frame->data[plane] + (s->blocks_h - 1 - y) * 8 * frame->linesize[plane] + x * 8,
442  frame->linesize[plane], s->wblocks + x * 64);
443  }
444  }
445  }
446  } else if (s->flags & 2) {
447  for (int y = 0; y < s->blocks_h; y++) {
448  for (int x = 0; x < s->blocks_w; x++) {
449  int shift = plane == 0;
450  int mvpos = (y >> shift) * (s->blocks_w >> shift) + (x >> shift);
451  int orig_mv_x = s->mvectors[mvpos].x;
452  int mv_x = s->mvectors[mvpos].x / (1 + !shift);
453  int mv_y = s->mvectors[mvpos].y / (1 + !shift);
454  int h = s->avctx->coded_height >> !shift;
455  int w = s->avctx->coded_width >> !shift;
456  int map = 0;
457 
458  ret = decode_inter_block(s, gb, quant_matrix, &skip, &map);
459  if (ret < 0)
460  return ret;
461 
462  if (orig_mv_x >= -32) {
463  if (y * 8 + mv_y < 0 || y * 8 + mv_y + 8 > h ||
464  x * 8 + mv_x < 0 || x * 8 + mv_x + 8 > w)
465  return AVERROR_INVALIDDATA;
466 
467  copy_block8(frame->data[plane] + (s->blocks_h - 1 - y) * 8 * frame->linesize[plane] + x * 8,
468  prev->data[plane] + ((s->blocks_h - 1 - y) * 8 - mv_y) * prev->linesize[plane] + (x * 8 + mv_x),
469  frame->linesize[plane], prev->linesize[plane], 8);
470  if (map) {
471  s->idsp.idct(s->block);
472  for (int i = 0; i < 64; i++)
473  s->block[i] = (s->block[i] + 1) & 0xFFFC;
474  s->idsp.add_pixels_clamped(s->block, frame->data[plane] + (s->blocks_h - 1 - y) * 8 * frame->linesize[plane] + x * 8,
475  frame->linesize[plane]);
476  }
477  } else if (map) {
478  s->idsp.idct_put(frame->data[plane] + (s->blocks_h - 1 - y) * 8 * frame->linesize[plane] + x * 8,
479  frame->linesize[plane], s->block);
480  }
481  }
482  }
483  } else if (s->flags & 1) {
484  av_fast_padded_malloc(&s->wblocks, &s->wblocks_size,
485  64 * s->blocks_w * sizeof(*s->wblocks));
486  if (!s->wblocks)
487  return AVERROR(ENOMEM);
488 
489  av_fast_padded_malloc(&s->map, &s->map_size,
490  s->blocks_w * sizeof(*s->map));
491  if (!s->map)
492  return AVERROR(ENOMEM);
493 
494  for (int y = 0; y < s->blocks_h; y++) {
495  ret = decode_inter_blocks(s, gb, quant_matrix, &skip, s->map);
496  if (ret < 0)
497  return ret;
498 
499  for (int x = 0; x < s->blocks_w; x++) {
500  if (!s->map[x])
501  continue;
502  s->idsp.idct_add(frame->data[plane] + (s->blocks_h - 1 - y) * 8 * frame->linesize[plane] + x * 8,
503  frame->linesize[plane], s->wblocks + 64 * x);
504  }
505  }
506  } else {
507  for (int y = 0; y < s->blocks_h; y++) {
508  for (int x = 0; x < s->blocks_w; x++) {
509  int map = 0;
510 
511  ret = decode_inter_block(s, gb, quant_matrix, &skip, &map);
512  if (ret < 0)
513  return ret;
514 
515  if (!map)
516  continue;
517  s->idsp.idct_add(frame->data[plane] + (s->blocks_h - 1 - y) * 8 * frame->linesize[plane] + x * 8,
518  frame->linesize[plane], s->block);
519  }
520  }
521  }
522 
523  align_get_bits(gb);
524  if (get_bits_left(gb) < 0)
525  av_log(s->avctx, AV_LOG_WARNING, "overread\n");
526  if (get_bits_left(gb) > 0)
527  av_log(s->avctx, AV_LOG_WARNING, "underread: %d\n", get_bits_left(gb));
528 
529  return 0;
530 }
531 
532 static void compute_quant_matrix(AGMContext *s, double qscale)
533 {
534  int luma[64], chroma[64];
535  double f = 1.0 - fabs(qscale);
536 
537  if (!s->key_frame && (s->flags & 2)) {
538  if (qscale >= 0.0) {
539  for (int i = 0; i < 64; i++) {
540  luma[i] = FFMAX(1, 16 * f);
541  chroma[i] = FFMAX(1, 16 * f);
542  }
543  } else {
544  for (int i = 0; i < 64; i++) {
545  luma[i] = FFMAX(1, 16 - qscale * 32);
546  chroma[i] = FFMAX(1, 16 - qscale * 32);
547  }
548  }
549  } else {
550  if (qscale >= 0.0) {
551  for (int i = 0; i < 64; i++) {
552  luma[i] = FFMAX(1, unscaled_luma [(i & 7) * 8 + (i >> 3)] * f);
553  chroma[i] = FFMAX(1, unscaled_chroma[(i & 7) * 8 + (i >> 3)] * f);
554  }
555  } else {
556  for (int i = 0; i < 64; i++) {
557  luma[i] = FFMAX(1, 255.0 - (255 - unscaled_luma [(i & 7) * 8 + (i >> 3)]) * f);
558  chroma[i] = FFMAX(1, 255.0 - (255 - unscaled_chroma[(i & 7) * 8 + (i >> 3)]) * f);
559  }
560  }
561  }
562 
563  for (int i = 0; i < 64; i++) {
564  int pos = ff_zigzag_direct[i];
565 
566  s->luma_quant_matrix[i] = luma[pos] * ((pos / 8) & 1 ? -1 : 1);
567  s->chroma_quant_matrix[i] = chroma[pos] * ((pos / 8) & 1 ? -1 : 1);
568  }
569 }
570 
572 {
573  uint8_t *dst = frame->data[0] + (avctx->height - 1) * frame->linesize[0];
574  uint8_t r = 0, g = 0, b = 0;
575 
576  if (bytestream2_get_bytes_left(gbyte) < 3 * avctx->width * avctx->height)
577  return AVERROR_INVALIDDATA;
578 
579  for (int y = 0; y < avctx->height; y++) {
580  for (int x = 0; x < avctx->width; x++) {
581  dst[x*3+0] = bytestream2_get_byteu(gbyte) + r;
582  r = dst[x*3+0];
583  dst[x*3+1] = bytestream2_get_byteu(gbyte) + g;
584  g = dst[x*3+1];
585  dst[x*3+2] = bytestream2_get_byteu(gbyte) + b;
586  b = dst[x*3+2];
587  }
588  dst -= frame->linesize[0];
589  }
590 
591  return 0;
592 }
593 
594 static int fill_pixels(uint8_t **y0, uint8_t **y1,
595  uint8_t **u, uint8_t **v,
596  int ylinesize, int ulinesize, int vlinesize,
597  uint8_t *fill,
598  int *nx, int *ny, int *np, int w, int h)
599 {
600  uint8_t *y0dst = *y0;
601  uint8_t *y1dst = *y1;
602  uint8_t *udst = *u;
603  uint8_t *vdst = *v;
604  int x = *nx, y = *ny, pos = *np;
605 
606  if (pos == 0) {
607  y0dst[2*x+0] += fill[0];
608  y0dst[2*x+1] += fill[1];
609  y1dst[2*x+0] += fill[2];
610  y1dst[2*x+1] += fill[3];
611  pos++;
612  } else if (pos == 1) {
613  udst[x] += fill[0];
614  vdst[x] += fill[1];
615  x++;
616  if (x >= w) {
617  x = 0;
618  y++;
619  if (y >= h)
620  return 1;
621  y0dst -= 2*ylinesize;
622  y1dst -= 2*ylinesize;
623  udst -= ulinesize;
624  vdst -= vlinesize;
625  }
626  y0dst[2*x+0] += fill[2];
627  y0dst[2*x+1] += fill[3];
628  pos++;
629  } else if (pos == 2) {
630  y1dst[2*x+0] += fill[0];
631  y1dst[2*x+1] += fill[1];
632  udst[x] += fill[2];
633  vdst[x] += fill[3];
634  x++;
635  if (x >= w) {
636  x = 0;
637  y++;
638  if (y >= h)
639  return 1;
640  y0dst -= 2*ylinesize;
641  y1dst -= 2*ylinesize;
642  udst -= ulinesize;
643  vdst -= vlinesize;
644  }
645  pos = 0;
646  }
647 
648  *y0 = y0dst;
649  *y1 = y1dst;
650  *u = udst;
651  *v = vdst;
652  *np = pos;
653  *nx = x;
654  *ny = y;
655 
656  return 0;
657 }
658 
660 {
661  uint8_t *dst = frame->data[0] + (avctx->height - 1) * frame->linesize[0];
662  int runlen, y = 0, x = 0;
663  uint8_t fill[4];
664  unsigned code;
665 
666  while (bytestream2_get_bytes_left(gbyte) > 0) {
667  code = bytestream2_peek_le32(gbyte);
668  runlen = code & 0xFFFFFF;
669 
670  if (code >> 24 == 0x77) {
671  bytestream2_skip(gbyte, 4);
672 
673  for (int i = 0; i < 4; i++)
674  fill[i] = bytestream2_get_byte(gbyte);
675 
676  while (runlen > 0) {
677  runlen--;
678 
679  for (int i = 0; i < 4; i++) {
680  dst[x] += fill[i];
681  x++;
682  if (x >= frame->width * 3) {
683  x = 0;
684  y++;
685  dst -= frame->linesize[0];
686  if (y >= frame->height)
687  return 0;
688  }
689  }
690  }
691  } else {
692  for (int i = 0; i < 4; i++)
693  fill[i] = bytestream2_get_byte(gbyte);
694 
695  for (int i = 0; i < 4; i++) {
696  dst[x] += fill[i];
697  x++;
698  if (x >= frame->width * 3) {
699  x = 0;
700  y++;
701  dst -= frame->linesize[0];
702  if (y >= frame->height)
703  return 0;
704  }
705  }
706  }
707  }
708 
709  return 0;
710 }
711 
713 {
714  uint8_t *y0dst = frame->data[0] + (avctx->height - 1) * frame->linesize[0];
715  uint8_t *y1dst = y0dst - frame->linesize[0];
716  uint8_t *udst = frame->data[1] + ((avctx->height >> 1) - 1) * frame->linesize[1];
717  uint8_t *vdst = frame->data[2] + ((avctx->height >> 1) - 1) * frame->linesize[2];
718  int runlen, y = 0, x = 0, pos = 0;
719  uint8_t fill[4];
720  unsigned code;
721 
722  while (bytestream2_get_bytes_left(gbyte) > 0) {
723  code = bytestream2_peek_le32(gbyte);
724  runlen = code & 0xFFFFFF;
725 
726  if (code >> 24 == 0x77) {
727  bytestream2_skip(gbyte, 4);
728 
729  for (int i = 0; i < 4; i++)
730  fill[i] = bytestream2_get_byte(gbyte);
731 
732  while (runlen > 0) {
733  runlen--;
734 
735  if (fill_pixels(&y0dst, &y1dst, &udst, &vdst,
736  frame->linesize[0],
737  frame->linesize[1],
738  frame->linesize[2],
739  fill, &x, &y, &pos,
740  avctx->width / 2,
741  avctx->height / 2))
742  return 0;
743  }
744  } else {
745  for (int i = 0; i < 4; i++)
746  fill[i] = bytestream2_get_byte(gbyte);
747 
748  if (fill_pixels(&y0dst, &y1dst, &udst, &vdst,
749  frame->linesize[0],
750  frame->linesize[1],
751  frame->linesize[2],
752  fill, &x, &y, &pos,
753  avctx->width / 2,
754  avctx->height / 2))
755  return 0;
756  }
757  }
758 
759  return 0;
760 }
761 
763 {
764  uint8_t *y0dst = frame->data[0] + (avctx->height - 1) * frame->linesize[0];
765  uint8_t *y1dst = y0dst - frame->linesize[0];
766  uint8_t *udst = frame->data[1] + ((avctx->height >> 1) - 1) * frame->linesize[1];
767  uint8_t *vdst = frame->data[2] + ((avctx->height >> 1) - 1) * frame->linesize[2];
768  uint8_t ly0 = 0, ly1 = 0, ly2 = 0, ly3 = 0, lu = 0, lv = 0;
769 
770  for (int y = 0; y < avctx->height / 2; y++) {
771  for (int x = 0; x < avctx->width / 2; x++) {
772  y0dst[x*2+0] = bytestream2_get_byte(gbyte) + ly0;
773  ly0 = y0dst[x*2+0];
774  y0dst[x*2+1] = bytestream2_get_byte(gbyte) + ly1;
775  ly1 = y0dst[x*2+1];
776  y1dst[x*2+0] = bytestream2_get_byte(gbyte) + ly2;
777  ly2 = y1dst[x*2+0];
778  y1dst[x*2+1] = bytestream2_get_byte(gbyte) + ly3;
779  ly3 = y1dst[x*2+1];
780  udst[x] = bytestream2_get_byte(gbyte) + lu;
781  lu = udst[x];
782  vdst[x] = bytestream2_get_byte(gbyte) + lv;
783  lv = vdst[x];
784  }
785 
786  y0dst -= 2*frame->linesize[0];
787  y1dst -= 2*frame->linesize[0];
788  udst -= frame->linesize[1];
789  vdst -= frame->linesize[2];
790  }
791 
792  return 0;
793 }
794 
796 {
797  AGMContext *s = avctx->priv_data;
798  int ret;
799 
800  compute_quant_matrix(s, (2 * s->compression - 100) / 100.0);
801 
802  s->blocks_w = avctx->coded_width >> 3;
803  s->blocks_h = avctx->coded_height >> 3;
804 
805  ret = decode_intra_plane(s, gb, s->size[0], s->luma_quant_matrix, frame, 0);
806  if (ret < 0)
807  return ret;
808 
809  bytestream2_skip(&s->gbyte, s->size[0]);
810 
811  s->blocks_w = avctx->coded_width >> 4;
812  s->blocks_h = avctx->coded_height >> 4;
813 
814  ret = decode_intra_plane(s, gb, s->size[1], s->chroma_quant_matrix, frame, 2);
815  if (ret < 0)
816  return ret;
817 
818  bytestream2_skip(&s->gbyte, s->size[1]);
819 
820  s->blocks_w = avctx->coded_width >> 4;
821  s->blocks_h = avctx->coded_height >> 4;
822 
823  ret = decode_intra_plane(s, gb, s->size[2], s->chroma_quant_matrix, frame, 1);
824  if (ret < 0)
825  return ret;
826 
827  return 0;
828 }
829 
831 {
832  AGMContext *s = avctx->priv_data;
833  int nb_mvs = ((avctx->coded_height + 15) >> 4) * ((avctx->coded_width + 15) >> 4);
834  int ret, skip = 0, value, map;
835 
836  av_fast_padded_malloc(&s->mvectors, &s->mvectors_size,
837  nb_mvs * sizeof(*s->mvectors));
838  if (!s->mvectors)
839  return AVERROR(ENOMEM);
840 
841  if ((ret = init_get_bits8(gb, s->gbyte.buffer, bytestream2_get_bytes_left(&s->gbyte) -
842  (s->size[0] + s->size[1] + s->size[2]))) < 0)
843  return ret;
844 
845  memset(s->mvectors, 0, sizeof(*s->mvectors) * nb_mvs);
846 
847  for (int i = 0; i < nb_mvs; i++) {
848  ret = read_code(gb, &skip, &value, &map, 1);
849  if (ret < 0)
850  return ret;
851  s->mvectors[i].x = value;
852  i += skip;
853  }
854 
855  for (int i = 0; i < nb_mvs; i++) {
856  ret = read_code(gb, &skip, &value, &map, 1);
857  if (ret < 0)
858  return ret;
859  s->mvectors[i].y = value;
860  i += skip;
861  }
862 
863  if (get_bits_left(gb) <= 0)
864  return AVERROR_INVALIDDATA;
865  skip = (get_bits_count(gb) >> 3) + 1;
866  bytestream2_skip(&s->gbyte, skip);
867 
868  return 0;
869 }
870 
872  AVFrame *frame, AVFrame *prev)
873 {
874  AGMContext *s = avctx->priv_data;
875  int ret;
876 
877  compute_quant_matrix(s, (2 * s->compression - 100) / 100.0);
878 
879  if (s->flags & 2) {
880  ret = decode_motion_vectors(avctx, gb);
881  if (ret < 0)
882  return ret;
883  }
884 
885  s->blocks_w = avctx->coded_width >> 3;
886  s->blocks_h = avctx->coded_height >> 3;
887 
888  ret = decode_inter_plane(s, gb, s->size[0], s->luma_quant_matrix, frame, prev, 0);
889  if (ret < 0)
890  return ret;
891 
892  bytestream2_skip(&s->gbyte, s->size[0]);
893 
894  s->blocks_w = avctx->coded_width >> 4;
895  s->blocks_h = avctx->coded_height >> 4;
896 
897  ret = decode_inter_plane(s, gb, s->size[1], s->chroma_quant_matrix, frame, prev, 2);
898  if (ret < 0)
899  return ret;
900 
901  bytestream2_skip(&s->gbyte, s->size[1]);
902 
903  s->blocks_w = avctx->coded_width >> 4;
904  s->blocks_h = avctx->coded_height >> 4;
905 
906  ret = decode_inter_plane(s, gb, s->size[2], s->chroma_quant_matrix, frame, prev, 1);
907  if (ret < 0)
908  return ret;
909 
910  return 0;
911 }
912 
913 typedef struct Node {
914  int parent;
915  int child[2];
916 } Node;
917 
918 static void get_tree_codes(uint32_t *codes, Node *nodes, int idx, uint32_t pfx, int bitpos)
919 {
920  if (idx < 256 && idx >= 0) {
921  codes[idx] = pfx;
922  } else if (idx >= 0) {
923  get_tree_codes(codes, nodes, nodes[idx].child[0], pfx + (0 << bitpos), bitpos + 1);
924  get_tree_codes(codes, nodes, nodes[idx].child[1], pfx + (1U << bitpos), bitpos + 1);
925  }
926 }
927 
928 static int make_new_tree(const uint8_t *bitlens, uint32_t *codes)
929 {
930  int zlcount = 0, curlen, idx, nindex, last, llast;
931  int blcounts[32] = { 0 };
932  int syms[8192];
933  Node nodes[512];
934  int node_idx[1024];
935  int old_idx[512];
936 
937  for (int i = 0; i < 256; i++) {
938  int bitlen = bitlens[i];
939  int blcount = blcounts[bitlen];
940 
941  zlcount += bitlen < 1;
942  syms[(bitlen << 8) + blcount] = i;
943  blcounts[bitlen]++;
944  }
945 
946  for (int i = 0; i < 512; i++) {
947  nodes[i].child[0] = -1;
948  nodes[i].child[1] = -1;
949  }
950 
951  for (int i = 0; i < 256; i++) {
952  node_idx[i] = 257 + i;
953  }
954 
955  curlen = 1;
956  node_idx[512] = 256;
957  last = 255;
958  nindex = 1;
959 
960  for (curlen = 1; curlen < 32; curlen++) {
961  if (blcounts[curlen] > 0) {
962  int max_zlcount = zlcount + blcounts[curlen];
963 
964  for (int i = 0; zlcount < 256 && zlcount < max_zlcount; zlcount++, i++) {
965  int p = node_idx[nindex - 1 + 512];
966  int ch = syms[256 * curlen + i];
967 
968  if (nindex <= 0)
969  return AVERROR_INVALIDDATA;
970 
971  if (nodes[p].child[0] == -1) {
972  nodes[p].child[0] = ch;
973  } else {
974  nodes[p].child[1] = ch;
975  nindex--;
976  }
977  nodes[ch].parent = p;
978  }
979  }
980  llast = last - 1;
981  idx = 0;
982  while (nindex > 0) {
983  int p, ch;
984 
985  last = llast - idx;
986  p = node_idx[nindex - 1 + 512];
987  ch = node_idx[last];
988  if (nodes[p].child[0] == -1) {
989  nodes[p].child[0] = ch;
990  } else {
991  nodes[p].child[1] = ch;
992  nindex--;
993  }
994  old_idx[idx] = ch;
995  nodes[ch].parent = p;
996  if (idx == llast)
997  goto next;
998  idx++;
999  if (nindex <= 0) {
1000  for (int i = 0; i < idx; i++)
1001  node_idx[512 + i] = old_idx[i];
1002  }
1003  }
1004  nindex = idx;
1005  }
1006 
1007 next:
1008 
1009  get_tree_codes(codes, nodes, 256, 0, 0);
1010  return 0;
1011 }
1012 
1013 static int build_huff(const uint8_t *bitlen, VLC *vlc)
1014 {
1015  uint32_t new_codes[256];
1016  uint8_t bits[256];
1017  uint8_t symbols[256];
1018  uint32_t codes[256];
1019  int nb_codes = 0;
1020 
1021  int ret = make_new_tree(bitlen, new_codes);
1022  if (ret < 0)
1023  return ret;
1024 
1025  for (int i = 0; i < 256; i++) {
1026  if (bitlen[i]) {
1027  bits[nb_codes] = bitlen[i];
1028  codes[nb_codes] = new_codes[i];
1029  symbols[nb_codes] = i;
1030  nb_codes++;
1031  }
1032  }
1033 
1034  ff_free_vlc(vlc);
1035  return ff_init_vlc_sparse(vlc, 13, nb_codes,
1036  bits, 1, 1,
1037  codes, 4, 4,
1038  symbols, 1, 1,
1039  INIT_VLC_LE);
1040 }
1041 
1042 static int decode_huffman2(AVCodecContext *avctx, int header, int size)
1043 {
1044  AGMContext *s = avctx->priv_data;
1045  GetBitContext *gb = &s->gb;
1046  uint8_t lens[256];
1047  int ret, x, len;
1048 
1049  if ((ret = init_get_bits8(gb, s->gbyte.buffer,
1050  bytestream2_get_bytes_left(&s->gbyte))) < 0)
1051  return ret;
1052 
1053  s->output_size = get_bits_long(gb, 32);
1054 
1055  if (s->output_size > avctx->width * avctx->height * 9LL + 10000)
1056  return AVERROR_INVALIDDATA;
1057 
1058  av_fast_padded_malloc(&s->output, &s->padded_output_size, s->output_size);
1059  if (!s->output)
1060  return AVERROR(ENOMEM);
1061 
1062  x = get_bits(gb, 1);
1063  len = 4 + get_bits(gb, 1);
1064  if (x) {
1065  int cb[8] = { 0 };
1066  int count = get_bits(gb, 3) + 1;
1067 
1068  for (int i = 0; i < count; i++)
1069  cb[i] = get_bits(gb, len);
1070 
1071  for (int i = 0; i < 256; i++) {
1072  int idx = get_bits(gb, 3);
1073  lens[i] = cb[idx];
1074  }
1075  } else {
1076  for (int i = 0; i < 256; i++)
1077  lens[i] = get_bits(gb, len);
1078  }
1079 
1080  if ((ret = build_huff(lens, &s->vlc)) < 0)
1081  return ret;
1082 
1083  x = 0;
1084  while (get_bits_left(gb) > 0 && x < s->output_size) {
1085  int val = get_vlc2(gb, s->vlc.table, s->vlc.bits, 3);
1086  if (val < 0)
1087  return AVERROR_INVALIDDATA;
1088  s->output[x++] = val;
1089  }
1090 
1091  return 0;
1092 }
1093 
1094 static int decode_frame(AVCodecContext *avctx, void *data,
1095  int *got_frame, AVPacket *avpkt)
1096 {
1097  AGMContext *s = avctx->priv_data;
1098  GetBitContext *gb = &s->gb;
1099  GetByteContext *gbyte = &s->gbyte;
1100  AVFrame *frame = data;
1101  int w, h, width, height, header;
1102  unsigned compressed_size;
1103  long skip;
1104  int ret;
1105 
1106  if (!avpkt->size)
1107  return 0;
1108 
1109  bytestream2_init(gbyte, avpkt->data, avpkt->size);
1110 
1111  header = bytestream2_get_le32(gbyte);
1112  s->fflags = bytestream2_get_le32(gbyte);
1113  s->bitstream_size = s->fflags & 0x1FFFFFFF;
1114  s->fflags >>= 29;
1115  av_log(avctx, AV_LOG_DEBUG, "fflags: %X\n", s->fflags);
1116  if (avpkt->size < s->bitstream_size + 8)
1117  return AVERROR_INVALIDDATA;
1118 
1119  s->key_frame = (avpkt->flags & AV_PKT_FLAG_KEY);
1120  frame->key_frame = s->key_frame;
1121  frame->pict_type = s->key_frame ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
1122 
1123  if (!s->key_frame) {
1124  if (!s->prev_frame->data[0]) {
1125  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1126  return AVERROR_INVALIDDATA;
1127  }
1128  }
1129 
1130  if (header) {
1131  if (avctx->codec_tag == MKTAG('A', 'G', 'M', '0') ||
1132  avctx->codec_tag == MKTAG('A', 'G', 'M', '1'))
1133  return AVERROR_PATCHWELCOME;
1134  else
1135  ret = decode_huffman2(avctx, header, (avpkt->size - s->bitstream_size) - 8);
1136  if (ret < 0)
1137  return ret;
1138  bytestream2_init(gbyte, s->output, s->output_size);
1139  } else if (!s->dct) {
1140  bytestream2_skip(gbyte, 4);
1141  }
1142 
1143  if (s->dct) {
1144  s->flags = 0;
1145  w = bytestream2_get_le32(gbyte);
1146  h = bytestream2_get_le32(gbyte);
1147  if (w == INT32_MIN || h == INT32_MIN)
1148  return AVERROR_INVALIDDATA;
1149  if (w < 0) {
1150  w = -w;
1151  s->flags |= 2;
1152  }
1153  if (h < 0) {
1154  h = -h;
1155  s->flags |= 1;
1156  }
1157 
1158  width = avctx->width;
1159  height = avctx->height;
1160  if (w < width || h < height || w & 7 || h & 7)
1161  return AVERROR_INVALIDDATA;
1162 
1163  ret = ff_set_dimensions(avctx, w, h);
1164  if (ret < 0)
1165  return ret;
1166  avctx->width = width;
1167  avctx->height = height;
1168 
1169  s->compression = bytestream2_get_le32(gbyte);
1170  if (s->compression < 0 || s->compression > 100)
1171  return AVERROR_INVALIDDATA;
1172 
1173  for (int i = 0; i < 3; i++)
1174  s->size[i] = bytestream2_get_le32(gbyte);
1175  if (header) {
1176  compressed_size = s->output_size;
1177  skip = 8LL;
1178  } else {
1179  compressed_size = avpkt->size;
1180  skip = 32LL;
1181  }
1182  if (s->size[0] < 0 || s->size[1] < 0 || s->size[2] < 0 ||
1183  skip + s->size[0] + s->size[1] + s->size[2] > compressed_size) {
1184  return AVERROR_INVALIDDATA;
1185  }
1186  }
1187 
1188  if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
1189  return ret;
1190 
1191  if (frame->key_frame) {
1192  if (!s->dct && !s->rgb)
1193  ret = decode_raw_intra(avctx, gbyte, frame);
1194  else if (!s->dct && s->rgb)
1195  ret = decode_raw_intra_rgb(avctx, gbyte, frame);
1196  else
1197  ret = decode_intra(avctx, gb, frame);
1198  } else {
1199  if (s->prev_frame-> width != frame->width ||
1200  s->prev_frame->height != frame->height)
1201  return AVERROR_INVALIDDATA;
1202 
1203  if (!(s->flags & 2)) {
1204  ret = av_frame_copy(frame, s->prev_frame);
1205  if (ret < 0)
1206  return ret;
1207  }
1208 
1209  if (s->dct) {
1210  ret = decode_inter(avctx, gb, frame, s->prev_frame);
1211  } else if (!s->dct && !s->rgb) {
1212  ret = decode_runlen(avctx, gbyte, frame);
1213  } else {
1214  ret = decode_runlen_rgb(avctx, gbyte, frame);
1215  }
1216  }
1217  if (ret < 0)
1218  return ret;
1219 
1220  av_frame_unref(s->prev_frame);
1221  if ((ret = av_frame_ref(s->prev_frame, frame)) < 0)
1222  return ret;
1223 
1224  frame->crop_top = avctx->coded_height - avctx->height;
1225  frame->crop_left = avctx->coded_width - avctx->width;
1226 
1227  *got_frame = 1;
1228 
1229  return avpkt->size;
1230 }
1231 
1233 {
1234  AGMContext *s = avctx->priv_data;
1235 
1236  s->rgb = avctx->codec_tag == MKTAG('A', 'G', 'M', '4');
1237  avctx->pix_fmt = s->rgb ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_YUV420P;
1238  s->avctx = avctx;
1239  s->plus = avctx->codec_tag == MKTAG('A', 'G', 'M', '3') ||
1240  avctx->codec_tag == MKTAG('A', 'G', 'M', '7');
1241 
1242  s->dct = avctx->codec_tag != MKTAG('A', 'G', 'M', '4') &&
1243  avctx->codec_tag != MKTAG('A', 'G', 'M', '5');
1244 
1245  if (!s->rgb && !s->dct) {
1246  if ((avctx->width & 1) || (avctx->height & 1))
1247  return AVERROR_INVALIDDATA;
1248  }
1249 
1250  avctx->idct_algo = FF_IDCT_SIMPLE;
1251  ff_idctdsp_init(&s->idsp, avctx);
1252  ff_init_scantable(s->idsp.idct_permutation, &s->scantable, ff_zigzag_direct);
1253 
1254  s->prev_frame = av_frame_alloc();
1255  if (!s->prev_frame)
1256  return AVERROR(ENOMEM);
1257 
1258  return 0;
1259 }
1260 
1261 static void decode_flush(AVCodecContext *avctx)
1262 {
1263  AGMContext *s = avctx->priv_data;
1264 
1265  av_frame_unref(s->prev_frame);
1266 }
1267 
1269 {
1270  AGMContext *s = avctx->priv_data;
1271 
1272  ff_free_vlc(&s->vlc);
1273  av_frame_free(&s->prev_frame);
1274  av_freep(&s->mvectors);
1275  s->mvectors_size = 0;
1276  av_freep(&s->wblocks);
1277  s->wblocks_size = 0;
1278  av_freep(&s->output);
1279  s->padded_output_size = 0;
1280  av_freep(&s->map);
1281  s->map_size = 0;
1282 
1283  return 0;
1284 }
1285 
1287  .name = "agm",
1288  .long_name = NULL_IF_CONFIG_SMALL("Amuse Graphics Movie"),
1289  .type = AVMEDIA_TYPE_VIDEO,
1290  .id = AV_CODEC_ID_AGM,
1291  .priv_data_size = sizeof(AGMContext),
1292  .init = decode_init,
1293  .close = decode_close,
1294  .decode = decode_frame,
1295  .flush = decode_flush,
1296  .capabilities = AV_CODEC_CAP_DR1,
1297  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
1300 };
unscaled_chroma
static const uint8_t unscaled_chroma[64]
Definition: agm.c:45
AVCodec
AVCodec.
Definition: avcodec.h:3481
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
ff_init_scantable
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: idctdsp.c:29
level
uint8_t level
Definition: svq3.c:207
AGMContext::map_size
unsigned map_size
Definition: agm.c:97
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
child
AVS_FilterInfo AVS_Value child
Definition: avisynth_c.h:808
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
r
const char * r
Definition: vf_curves.c:114
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AGMContext::wblocks_size
unsigned wblocks_size
Definition: agm.c:94
MotionVector::y
int16_t y
Definition: agm.c:55
Node
Definition: agm.c:913
cb
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:112
GetByteContext
Definition: bytestream.h:33
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:252
AGMContext::blocks_h
int blocks_h
Definition: agm.c:68
decode_init
static av_cold int decode_init(AVCodecContext *avctx)
Definition: agm.c:1232
AGMContext::compression
int compression
Definition: agm.c:66
MKTAG
#define MKTAG(a, b, c, d)
Definition: common.h:366
decode_inter
static int decode_inter(AVCodecContext *avctx, GetBitContext *gb, AVFrame *frame, AVFrame *prev)
Definition: agm.c:871
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:546
ch
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
decode_frame
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: agm.c:1094
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
count
void INT64 INT64 count
Definition: avisynth_c.h:767
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
w
uint8_t w
Definition: llviddspenc.c:38
internal.h
AVPacket::data
uint8_t * data
Definition: avcodec.h:1477
b
#define b
Definition: input.c:41
chroma
static av_always_inline void chroma(WaveformContext *s, AVFrame *in, AVFrame *out, int component, int intensity, int offset_y, int offset_x, int column, int mirror, int jobnr, int nb_jobs)
Definition: vf_waveform.c:1511
AGMContext::padded_output_size
unsigned padded_output_size
Definition: agm.c:77
data
const char data[16]
Definition: mxf.c:91
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:797
AGMContext::chroma_quant_matrix
int chroma_quant_matrix[64]
Definition: agm.c:88
AGMContext::block
int16_t block[64]
Definition: agm.c:91
copy_block8
static void copy_block8(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:47
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
max
#define max(a, b)
Definition: cuda_runtime.h:33
AGMContext::luma_quant_matrix
int luma_quant_matrix[64]
Definition: agm.c:87
decode_intra_blocks
static int decode_intra_blocks(AGMContext *s, GetBitContext *gb, const int *quant_matrix, int *skip, int *dc_level)
Definition: agm.c:194
INIT_VLC_LE
#define INIT_VLC_LE
Definition: vlc.h:54
AGMContext::vlc
VLC vlc
Definition: agm.c:83
decode_inter_block
static int decode_inter_block(AGMContext *s, GetBitContext *gb, const int *quant_matrix, int *skip, int *map)
Definition: agm.c:360
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1509
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
bytestream2_get_bytes_left
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:154
decode_flush
static void decode_flush(AVCodecContext *avctx)
Definition: agm.c:1261
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:164
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
AGMContext
Definition: agm.c:58
FF_IDCT_SIMPLE
#define FF_IDCT_SIMPLE
Definition: avcodec.h:2771
MotionVector::x
int16_t x
Definition: agm.c:55
AGMContext::prev_frame
AVFrame * prev_frame
Definition: agm.c:85
U
#define U(x)
Definition: vp56_arith.h:37
decode_huffman2
static int decode_huffman2(AVCodecContext *avctx, int header, int size)
Definition: agm.c:1042
plane
int plane
Definition: avisynth_c.h:384
GetBitContext
Definition: get_bits.h:61
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:1753
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
read_code
static int read_code(GetBitContext *gb, int *oskip, int *level, int *map, int mode)
Definition: agm.c:102
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold
#define av_cold
Definition: attributes.h:84
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:42
AGMContext::bitstream_size
int bitstream_size
Definition: agm.c:65
width
#define width
AGMContext::idsp
IDCTDSPContext idsp
Definition: agm.c:99
s
#define s(width, name)
Definition: cbs_vp9.c:257
AGMContext::output
uint8_t * output
Definition: agm.c:76
g
const char * g
Definition: vf_curves.c:115
AGMContext::avctx
AVCodecContext * avctx
Definition: agm.c:60
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1176
bits
uint8_t bits
Definition: vp3data.h:202
unscaled_luma
static const uint8_t unscaled_luma[64]
Definition: agm.c:36
Node::parent
int parent
Definition: agm.c:914
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
get_bits.h
ff_free_vlc
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:359
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
AGMContext::flags
unsigned flags
Definition: agm.c:73
decode_close
static av_cold int decode_close(AVCodecContext *avctx)
Definition: agm.c:1268
f
#define f(width, name)
Definition: cbs_vp9.c:255
decode_inter_plane
static int decode_inter_plane(AGMContext *s, GetBitContext *gb, int size, const int *quant_matrix, AVFrame *frame, AVFrame *prev, int plane)
Definition: agm.c:390
if
if(ret)
Definition: filter_design.txt:179
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
flush
static void flush(AVCodecContext *avctx)
Definition: aacdec_template.c:500
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
decode_runlen_rgb
static int decode_runlen_rgb(AVCodecContext *avctx, GetByteContext *gbyte, AVFrame *frame)
Definition: agm.c:659
AGMContext::gb
GetBitContext gb
Definition: agm.c:61
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
AGMContext::mvectors_size
unsigned mvectors_size
Definition: agm.c:81
decode_motion_vectors
static int decode_motion_vectors(AVCodecContext *avctx, GetBitContext *gb)
Definition: agm.c:830
ff_init_vlc_sparse
int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
Definition: bitstream.c:273
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: internal.h:66
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1965
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:981
decode_intra_block
static int decode_intra_block(AGMContext *s, GetBitContext *gb, const int *quant_matrix, int *skip, int *dc_level)
Definition: agm.c:271
AVPacket::size
int size
Definition: avcodec.h:1478
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
av_frame_copy
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:792
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
Node::child
int child[2]
Definition: agm.c:915
size
int size
Definition: twinvq_data.h:11134
decode_runlen
static int decode_runlen(AVCodecContext *avctx, GetByteContext *gbyte, AVFrame *frame)
Definition: agm.c:712
AGMContext::plus
int plus
Definition: agm.c:70
val
const char const char void * val
Definition: avisynth_c.h:863
header
static const uint8_t header[24]
Definition: sdr2.c:67
height
#define height
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1483
MotionVector
Definition: agm.c:54
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem.h:112
AGMContext::fflags
unsigned fflags
Definition: agm.c:74
AGMContext::output_size
unsigned output_size
Definition: agm.c:78
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
copy_block.h
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:446
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:70
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
uint8_t
uint8_t
Definition: audio_convert.c:194
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
AGMContext::rgb
int rgb
Definition: agm.c:72
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:238
AVCodecContext::idct_algo
int idct_algo
IDCT algorithm, see FF_IDCT_* below.
Definition: avcodec.h:2768
AVCodec::name
const char * name
Name of the codec implementation.
Definition: avcodec.h:3488
len
int len
Definition: vorbis_enc_data.h:452
AVCodecContext::height
int height
Definition: avcodec.h:1738
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1775
idctdsp.h
avcodec.h
AGMContext::gbyte
GetByteContext gbyte
Definition: agm.c:62
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
AGMContext::size
int size[3]
Definition: agm.c:69
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AGMContext::dct
int dct
Definition: agm.c:71
AGMContext::map
int * map
Definition: agm.c:96
decode_intra_plane
static int decode_intra_plane(AGMContext *s, GetBitContext *gb, int size, const int *quant_matrix, AVFrame *frame, int plane)
Definition: agm.c:311
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:693
IDCTDSPContext
Definition: idctdsp.h:53
ff_agm_decoder
AVCodec ff_agm_decoder
Definition: agm.c:1286
AV_CODEC_ID_AGM
@ AV_CODEC_ID_AGM
Definition: avcodec.h:457
AVCodecContext
main external API structure.
Definition: avcodec.h:1565
decode_inter_blocks
static int decode_inter_blocks(AGMContext *s, GetBitContext *gb, const int *quant_matrix, int *skip, int *map)
Definition: agm.c:235
mode
mode
Definition: ebur128.h:83
VLC
Definition: vlc.h:26
decode_raw_intra_rgb
static int decode_raw_intra_rgb(AVCodecContext *avctx, GetByteContext *gbyte, AVFrame *frame)
Definition: agm.c:571
AGMContext::scantable
ScanTable scantable
Definition: agm.c:90
AGMContext::blocks_w
int blocks_w
Definition: agm.c:67
build_huff
static int build_huff(const uint8_t *bitlen, VLC *vlc)
Definition: agm.c:1013
shift
static int shift(int a, int b)
Definition: sonic.c:82
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:1753
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
fill_pixels
static int fill_pixels(uint8_t **y0, uint8_t **y1, uint8_t **u, uint8_t **v, int ylinesize, int ulinesize, int vlinesize, uint8_t *fill, int *nx, int *ny, int *np, int w, int h)
Definition: agm.c:594
AGMContext::key_frame
int key_frame
Definition: agm.c:64
ScanTable
Scantable.
Definition: idctdsp.h:31
map
const VDPAUPixFmtMap * map
Definition: hwcontext_vdpau.c:85
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:1590
AGMContext::wblocks
int16_t * wblocks
Definition: agm.c:93
AVPacket
This structure stores compressed data.
Definition: avcodec.h:1454
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:1592
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:1738
decode_raw_intra
static int decode_raw_intra(AVCodecContext *avctx, GetByteContext *gbyte, AVFrame *frame)
Definition: agm.c:762
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
h
h
Definition: vp9dsp_template.c:2038
AGMContext::mvectors
MotionVector * mvectors
Definition: agm.c:80
make_new_tree
static int make_new_tree(const uint8_t *bitlens, uint32_t *codes)
Definition: agm.c:928
get_tree_codes
static void get_tree_codes(uint32_t *codes, Node *nodes, int idx, uint32_t pfx, int bitpos)
Definition: agm.c:918
decode_intra
static int decode_intra(AVCodecContext *avctx, GetBitContext *gb, AVFrame *frame)
Definition: agm.c:795
compute_quant_matrix
static void compute_quant_matrix(AGMContext *s, double qscale)
Definition: agm.c:532