FFmpeg
magicyuv.c
Go to the documentation of this file.
1 /*
2  * MagicYUV decoder
3  * Copyright (c) 2016 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include <stdlib.h>
23 #include <string.h>
24 
25 #define CACHED_BITSTREAM_READER !ARCH_X86_32
26 
27 #include "libavutil/pixdesc.h"
28 
29 #include "avcodec.h"
30 #include "bytestream.h"
31 #include "codec_internal.h"
32 #include "decode.h"
33 #include "get_bits.h"
34 #include "lossless_videodsp.h"
35 #include "thread.h"
36 
37 #define VLC_BITS 12
38 
39 typedef struct Slice {
40  uint32_t start;
41  uint32_t size;
42 } Slice;
43 
44 typedef enum Prediction {
45  LEFT = 1,
48 } Prediction;
49 
50 typedef struct HuffEntry {
51  uint8_t len;
52  uint16_t sym;
53 } HuffEntry;
54 
55 typedef struct MagicYUVContext {
57  int max;
58  int bps;
60  int nb_slices;
61  int planes; // number of encoded planes in bitstream
62  int decorrelate; // postprocessing work
63  int color_matrix; // video color matrix
64  int flags;
65  int interlaced; // video is interlaced
66  const uint8_t *buf; // pointer to AVPacket->data
67  int hshift[4];
68  int vshift[4];
69  Slice *slices[4]; // slice bitstream positions for each plane
70  unsigned int slices_size[4]; // slice sizes for each plane
71  VLC vlc[4]; // VLC for each plane
72  VLC_MULTI multi[4]; // Buffer for joint VLC data
73  int (*magy_decode_slice)(AVCodecContext *avctx, void *tdata,
74  int j, int threadnr);
76  HuffEntry he[1 << 14];
77  uint8_t len[1 << 14];
79 
80 static int huff_build(AVCodecContext *avctx,
81  const uint8_t len[], uint16_t codes_pos[33],
82  VLC *vlc, VLC_MULTI *multi, int nb_elems, void *logctx)
83 {
84  MagicYUVContext *s = avctx->priv_data;
85  HuffEntry *he = s->he;
86 
87  for (int i = 31; i > 0; i--)
88  codes_pos[i] += codes_pos[i + 1];
89 
90  for (unsigned i = nb_elems; i-- > 0;)
91  he[--codes_pos[len[i]]] = (HuffEntry){ len[i], i };
92 
93  ff_vlc_free(vlc);
94  ff_vlc_free_multi(multi);
95  return ff_vlc_init_multi_from_lengths(vlc, multi, FFMIN(he[0].len, VLC_BITS), nb_elems, nb_elems,
96  &he[0].len, sizeof(he[0]),
97  &he[0].sym, sizeof(he[0]), sizeof(he[0].sym),
98  0, 0, logctx);
99 }
100 
101 static void magicyuv_median_pred16(uint16_t *dst, const uint16_t *src1,
102  const uint16_t *diff, intptr_t w,
103  int *left, int *left_top, int max)
104 {
105  int i;
106  uint16_t l, lt;
107 
108  l = *left;
109  lt = *left_top;
110 
111  for (i = 0; i < w; i++) {
112  l = mid_pred(l, src1[i], (l + src1[i] - lt)) + diff[i];
113  l &= max;
114  lt = src1[i];
115  dst[i] = l;
116  }
117 
118  *left = l;
119  *left_top = lt;
120 }
121 
122 #define READ_PLANE(dst, plane, b, c) \
123 { \
124  x = 0; \
125  for (; CACHED_BITSTREAM_READER && x < width-c && get_bits_left(&gb) > 0;) {\
126  ret = get_vlc_multi(&gb, (uint8_t *)dst + x * b, multi, \
127  vlc, vlc_bits, 3, b); \
128  if (ret <= 0) \
129  return AVERROR_INVALIDDATA; \
130  x += ret; \
131  } \
132  for (; x < width && get_bits_left(&gb) > 0; x++) \
133  dst[x] = get_vlc2(&gb, vlc, vlc_bits, 3); \
134  dst += stride; \
135 }
136 
137 static int magy_decode_slice10(AVCodecContext *avctx, void *tdata,
138  int j, int threadnr)
139 {
140  const MagicYUVContext *s = avctx->priv_data;
141  int interlaced = s->interlaced;
142  const int bps = s->bps;
143  const int max = s->max - 1;
144  AVFrame *p = s->p;
145  int i, k, x;
146  GetBitContext gb;
147  uint16_t *dst;
148 
149  for (i = 0; i < s->planes; i++) {
150  int left, lefttop, top;
151  int height = AV_CEIL_RSHIFT(FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height), s->vshift[i]);
152  int width = AV_CEIL_RSHIFT(avctx->coded_width, s->hshift[i]);
153  int sheight = AV_CEIL_RSHIFT(s->slice_height, s->vshift[i]);
154  ptrdiff_t fake_stride = (p->linesize[i] / 2) * (1 + interlaced);
155  ptrdiff_t stride = p->linesize[i] / 2;
156  const VLC_MULTI_ELEM *const multi = s->multi[i].table;
157  const VLCElem *const vlc = s->vlc[i].table;
158  const int vlc_bits = s->vlc[i].bits;
159  int flags, pred;
160  int ret = init_get_bits8(&gb, s->buf + s->slices[i][j].start,
161  s->slices[i][j].size);
162 
163  if (ret < 0)
164  return ret;
165 
166  flags = get_bits(&gb, 8);
167  pred = get_bits(&gb, 8);
168 
169  dst = (uint16_t *)p->data[i] + j * sheight * stride;
170  if (flags & 1) {
171  if (get_bits_left(&gb) < bps * width * height)
172  return AVERROR_INVALIDDATA;
173  for (k = 0; k < height; k++) {
174  for (x = 0; x < width; x++)
175  dst[x] = get_bits(&gb, bps);
176 
177  dst += stride;
178  }
179  } else {
180  for (k = 0; k < height; k++)
181  READ_PLANE(dst, i, 2, 3)
182  }
183 
184  switch (pred) {
185  case LEFT:
186  dst = (uint16_t *)p->data[i] + j * sheight * stride;
187  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
188  dst += stride;
189  if (interlaced) {
190  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
191  dst += stride;
192  }
193  for (k = 1 + interlaced; k < height; k++) {
194  s->llviddsp.add_left_pred_int16(dst, dst, max, width, dst[-fake_stride]);
195  dst += stride;
196  }
197  break;
198  case GRADIENT:
199  dst = (uint16_t *)p->data[i] + j * sheight * stride;
200  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
201  dst += stride;
202  if (interlaced) {
203  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
204  dst += stride;
205  }
206  for (k = 1 + interlaced; k < height; k++) {
207  top = dst[-fake_stride];
208  left = top + dst[0];
209  dst[0] = left & max;
210  for (x = 1; x < width; x++) {
211  top = dst[x - fake_stride];
212  lefttop = dst[x - (fake_stride + 1)];
213  left += top - lefttop + dst[x];
214  dst[x] = left & max;
215  }
216  dst += stride;
217  }
218  break;
219  case MEDIAN:
220  dst = (uint16_t *)p->data[i] + j * sheight * stride;
221  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
222  dst += stride;
223  if (interlaced) {
224  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
225  dst += stride;
226  }
227  lefttop = left = dst[0];
228  for (k = 1 + interlaced; k < height; k++) {
229  magicyuv_median_pred16(dst, dst - fake_stride, dst, width, &left, &lefttop, max);
230  lefttop = left = dst[0];
231  dst += stride;
232  }
233  break;
234  default:
235  avpriv_request_sample(avctx, "Unknown prediction: %d", pred);
236  }
237  }
238 
239  if (s->decorrelate) {
240  int height = FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height);
241  int width = avctx->coded_width;
242  uint16_t *r = (uint16_t *)p->data[0] + j * s->slice_height * p->linesize[0] / 2;
243  uint16_t *g = (uint16_t *)p->data[1] + j * s->slice_height * p->linesize[1] / 2;
244  uint16_t *b = (uint16_t *)p->data[2] + j * s->slice_height * p->linesize[2] / 2;
245 
246  for (i = 0; i < height; i++) {
247  for (k = 0; k < width; k++) {
248  b[k] = (b[k] + g[k]) & max;
249  r[k] = (r[k] + g[k]) & max;
250  }
251  b += p->linesize[0] / 2;
252  g += p->linesize[1] / 2;
253  r += p->linesize[2] / 2;
254  }
255  }
256 
257  return 0;
258 }
259 
260 static int magy_decode_slice(AVCodecContext *avctx, void *tdata,
261  int j, int threadnr)
262 {
263  const MagicYUVContext *s = avctx->priv_data;
264  int interlaced = s->interlaced;
265  AVFrame *p = s->p;
266  int i, k, x, min_width;
267  GetBitContext gb;
268  uint8_t *dst;
269 
270  for (i = 0; i < s->planes; i++) {
271  int left, lefttop, top;
272  int height = AV_CEIL_RSHIFT(FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height), s->vshift[i]);
273  int width = AV_CEIL_RSHIFT(avctx->coded_width, s->hshift[i]);
274  int sheight = AV_CEIL_RSHIFT(s->slice_height, s->vshift[i]);
275  ptrdiff_t fake_stride = p->linesize[i] * (1 + interlaced);
276  ptrdiff_t stride = p->linesize[i];
277  const uint8_t *slice = s->buf + s->slices[i][j].start;
278  const VLC_MULTI_ELEM *const multi = s->multi[i].table;
279  const VLCElem *const vlc = s->vlc[i].table;
280  const int vlc_bits = s->vlc[i].bits;
281  int flags, pred;
282 
283  flags = bytestream_get_byte(&slice);
284  pred = bytestream_get_byte(&slice);
285 
286  dst = p->data[i] + j * sheight * stride;
287  if (flags & 1) {
288  if (s->slices[i][j].size - 2 < width * height)
289  return AVERROR_INVALIDDATA;
290  for (k = 0; k < height; k++) {
291  bytestream_get_buffer(&slice, dst, width);
292  dst += stride;
293  }
294  } else {
295  int ret = init_get_bits8(&gb, slice, s->slices[i][j].size - 2);
296 
297  if (ret < 0)
298  return ret;
299 
300  for (k = 0; k < height; k++)
301  READ_PLANE(dst, i, 1, 7)
302  }
303 
304  switch (pred) {
305  case LEFT:
306  dst = p->data[i] + j * sheight * stride;
307  s->llviddsp.add_left_pred(dst, dst, width, 0);
308  dst += stride;
309  if (interlaced) {
310  s->llviddsp.add_left_pred(dst, dst, width, 0);
311  dst += stride;
312  }
313  for (k = 1 + interlaced; k < height; k++) {
314  s->llviddsp.add_left_pred(dst, dst, width, dst[-fake_stride]);
315  dst += stride;
316  }
317  break;
318  case GRADIENT:
319  dst = p->data[i] + j * sheight * stride;
320  s->llviddsp.add_left_pred(dst, dst, width, 0);
321  dst += stride;
322  if (interlaced) {
323  s->llviddsp.add_left_pred(dst, dst, width, 0);
324  dst += stride;
325  }
326  min_width = FFMIN(width, 32);
327  for (k = 1 + interlaced; k < height; k++) {
328  top = dst[-fake_stride];
329  left = top + dst[0];
330  dst[0] = left;
331  for (x = 1; x < min_width; x++) { /* dsp need aligned 32 */
332  top = dst[x - fake_stride];
333  lefttop = dst[x - (fake_stride + 1)];
334  left += top - lefttop + dst[x];
335  dst[x] = left;
336  }
337  if (width > 32)
338  s->llviddsp.add_gradient_pred(dst + 32, fake_stride, width - 32);
339  dst += stride;
340  }
341  break;
342  case MEDIAN:
343  dst = p->data[i] + j * sheight * stride;
344  s->llviddsp.add_left_pred(dst, dst, width, 0);
345  dst += stride;
346  if (interlaced) {
347  s->llviddsp.add_left_pred(dst, dst, width, 0);
348  dst += stride;
349  }
350  lefttop = left = dst[0];
351  for (k = 1 + interlaced; k < height; k++) {
352  s->llviddsp.add_median_pred(dst, dst - fake_stride,
353  dst, width, &left, &lefttop);
354  lefttop = left = dst[0];
355  dst += stride;
356  }
357  break;
358  default:
359  avpriv_request_sample(avctx, "Unknown prediction: %d", pred);
360  }
361  }
362 
363  if (s->decorrelate) {
364  int height = FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height);
365  int width = avctx->coded_width;
366  uint8_t *b = p->data[0] + j * s->slice_height * p->linesize[0];
367  uint8_t *g = p->data[1] + j * s->slice_height * p->linesize[1];
368  uint8_t *r = p->data[2] + j * s->slice_height * p->linesize[2];
369 
370  for (i = 0; i < height; i++) {
371  s->llviddsp.add_bytes(b, g, width);
372  s->llviddsp.add_bytes(r, g, width);
373  b += p->linesize[0];
374  g += p->linesize[1];
375  r += p->linesize[2];
376  }
377  }
378 
379  return 0;
380 }
381 
382 static int build_huffman(AVCodecContext *avctx, const uint8_t *table,
383  int table_size, int max)
384 {
385  MagicYUVContext *s = avctx->priv_data;
386  GetByteContext gb;
387  uint8_t *len = s->len;
388  uint16_t length_count[33] = { 0 };
389  int i = 0, j = 0, k;
390 
391  bytestream2_init(&gb, table, table_size);
392 
393  while (bytestream2_get_bytes_left(&gb) > 0) {
394  int b = bytestream2_peek_byteu(&gb) & 0x80;
395  int x = bytestream2_get_byteu(&gb) & ~0x80;
396  int l = 1;
397 
398  if (b) {
399  if (bytestream2_get_bytes_left(&gb) <= 0)
400  break;
401  l += bytestream2_get_byteu(&gb);
402  }
403  k = j + l;
404  if (k > max || x == 0 || x > 32) {
405  av_log(avctx, AV_LOG_ERROR, "Invalid Huffman codes\n");
406  return AVERROR_INVALIDDATA;
407  }
408 
409  length_count[x] += l;
410  for (; j < k; j++)
411  len[j] = x;
412 
413  if (j == max) {
414  j = 0;
415  if (huff_build(avctx, len, length_count, &s->vlc[i], &s->multi[i], max, avctx)) {
416  av_log(avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
417  return AVERROR_INVALIDDATA;
418  }
419  i++;
420  if (i == s->planes) {
421  break;
422  }
423  memset(length_count, 0, sizeof(length_count));
424  }
425  }
426 
427  if (i != s->planes) {
428  av_log(avctx, AV_LOG_ERROR, "Huffman tables too short\n");
429  return AVERROR_INVALIDDATA;
430  }
431 
432  return 0;
433 }
434 
436  int *got_frame, AVPacket *avpkt)
437 {
438  MagicYUVContext *s = avctx->priv_data;
439  GetByteContext gb;
440  uint32_t first_offset, offset, next_offset, header_size, slice_width;
441  int width, height, format, version, table_size;
442  int ret, i, j;
443 
444  if (avpkt->size < 36)
445  return AVERROR_INVALIDDATA;
446 
447  bytestream2_init(&gb, avpkt->data, avpkt->size);
448  if (bytestream2_get_le32u(&gb) != MKTAG('M', 'A', 'G', 'Y'))
449  return AVERROR_INVALIDDATA;
450 
451  header_size = bytestream2_get_le32u(&gb);
452  if (header_size < 32 || header_size >= avpkt->size) {
453  av_log(avctx, AV_LOG_ERROR,
454  "header or packet too small %"PRIu32"\n", header_size);
455  return AVERROR_INVALIDDATA;
456  }
457 
458  version = bytestream2_get_byteu(&gb);
459  if (version != 7) {
460  avpriv_request_sample(avctx, "Version %d", version);
461  return AVERROR_PATCHWELCOME;
462  }
463 
464  s->hshift[1] =
465  s->vshift[1] =
466  s->hshift[2] =
467  s->vshift[2] = 0;
468  s->decorrelate = 0;
469  s->bps = 8;
470 
471  format = bytestream2_get_byteu(&gb);
472  switch (format) {
473  case 0x65:
474  avctx->pix_fmt = AV_PIX_FMT_GBRP;
475  s->decorrelate = 1;
476  break;
477  case 0x66:
478  avctx->pix_fmt = AV_PIX_FMT_GBRAP;
479  s->decorrelate = 1;
480  break;
481  case 0x67:
482  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
483  break;
484  case 0x68:
485  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
486  s->hshift[1] =
487  s->hshift[2] = 1;
488  break;
489  case 0x69:
490  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
491  s->hshift[1] =
492  s->vshift[1] =
493  s->hshift[2] =
494  s->vshift[2] = 1;
495  break;
496  case 0x6a:
497  avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
498  break;
499  case 0x6b:
500  avctx->pix_fmt = AV_PIX_FMT_GRAY8;
501  break;
502  case 0x6c:
503  avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
504  s->hshift[1] =
505  s->hshift[2] = 1;
506  s->bps = 10;
507  break;
508  case 0x76:
509  avctx->pix_fmt = AV_PIX_FMT_YUV444P10;
510  s->bps = 10;
511  break;
512  case 0x6d:
513  avctx->pix_fmt = AV_PIX_FMT_GBRP10;
514  s->decorrelate = 1;
515  s->bps = 10;
516  break;
517  case 0x6e:
518  avctx->pix_fmt = AV_PIX_FMT_GBRAP10;
519  s->decorrelate = 1;
520  s->bps = 10;
521  break;
522  case 0x6f:
523  avctx->pix_fmt = AV_PIX_FMT_GBRP12;
524  s->decorrelate = 1;
525  s->bps = 12;
526  break;
527  case 0x70:
528  avctx->pix_fmt = AV_PIX_FMT_GBRAP12;
529  s->decorrelate = 1;
530  s->bps = 12;
531  break;
532  case 0x71:
533  avctx->pix_fmt = AV_PIX_FMT_GBRP14;
534  s->decorrelate = 1;
535  s->bps = 14;
536  break;
537  case 0x72:
538  avctx->pix_fmt = AV_PIX_FMT_GBRAP14;
539  s->decorrelate = 1;
540  s->bps = 14;
541  break;
542  case 0x73:
543  avctx->pix_fmt = AV_PIX_FMT_GRAY10;
544  s->bps = 10;
545  break;
546  case 0x7b:
547  avctx->pix_fmt = AV_PIX_FMT_YUV420P10;
548  s->hshift[1] =
549  s->vshift[1] =
550  s->hshift[2] =
551  s->vshift[2] = 1;
552  s->bps = 10;
553  break;
554  default:
555  avpriv_request_sample(avctx, "Format 0x%X", format);
556  return AVERROR_PATCHWELCOME;
557  }
558  s->max = 1 << s->bps;
559  s->magy_decode_slice = s->bps == 8 ? magy_decode_slice : magy_decode_slice10;
560  s->planes = av_pix_fmt_count_planes(avctx->pix_fmt);
561 
562  bytestream2_skipu(&gb, 1);
563  s->color_matrix = bytestream2_get_byteu(&gb);
564  s->flags = bytestream2_get_byteu(&gb);
565  s->interlaced = !!(s->flags & 2);
566  bytestream2_skipu(&gb, 3);
567 
568  width = bytestream2_get_le32u(&gb);
569  height = bytestream2_get_le32u(&gb);
570  ret = ff_set_dimensions(avctx, width, height);
571  if (ret < 0)
572  return ret;
573 
574  slice_width = bytestream2_get_le32u(&gb);
575  if (slice_width != avctx->coded_width) {
576  avpriv_request_sample(avctx, "Slice width %"PRIu32, slice_width);
577  return AVERROR_PATCHWELCOME;
578  }
579  s->slice_height = bytestream2_get_le32u(&gb);
580  if (s->slice_height <= 0 || s->slice_height > INT_MAX - avctx->coded_height) {
581  av_log(avctx, AV_LOG_ERROR,
582  "invalid slice height: %d\n", s->slice_height);
583  return AVERROR_INVALIDDATA;
584  }
585 
586  bytestream2_skipu(&gb, 4);
587 
588  s->nb_slices = (avctx->coded_height + s->slice_height - 1) / s->slice_height;
589  if (s->nb_slices > INT_MAX / FFMAX(sizeof(Slice), 4 * 5)) {
590  av_log(avctx, AV_LOG_ERROR,
591  "invalid number of slices: %d\n", s->nb_slices);
592  return AVERROR_INVALIDDATA;
593  }
594 
595  if (s->interlaced) {
596  if ((s->slice_height >> s->vshift[1]) < 2) {
597  av_log(avctx, AV_LOG_ERROR, "impossible slice height\n");
598  return AVERROR_INVALIDDATA;
599  }
600  if ((avctx->coded_height % s->slice_height) && ((avctx->coded_height % s->slice_height) >> s->vshift[1]) < 2) {
601  av_log(avctx, AV_LOG_ERROR, "impossible height\n");
602  return AVERROR_INVALIDDATA;
603  }
604  }
605 
606  if (bytestream2_get_bytes_left(&gb) <= s->nb_slices * s->planes * 5)
607  return AVERROR_INVALIDDATA;
608  for (i = 0; i < s->planes; i++) {
609  av_fast_malloc(&s->slices[i], &s->slices_size[i], s->nb_slices * sizeof(Slice));
610  if (!s->slices[i])
611  return AVERROR(ENOMEM);
612 
613  offset = bytestream2_get_le32u(&gb);
614  if (offset >= avpkt->size - header_size)
615  return AVERROR_INVALIDDATA;
616 
617  if (i == 0)
618  first_offset = offset;
619 
620  for (j = 0; j < s->nb_slices - 1; j++) {
621  s->slices[i][j].start = offset + header_size;
622 
623  next_offset = bytestream2_get_le32u(&gb);
624  if (next_offset <= offset || next_offset >= avpkt->size - header_size)
625  return AVERROR_INVALIDDATA;
626 
627  s->slices[i][j].size = next_offset - offset;
628  if (s->slices[i][j].size < 2)
629  return AVERROR_INVALIDDATA;
630  offset = next_offset;
631  }
632 
633  s->slices[i][j].start = offset + header_size;
634  s->slices[i][j].size = avpkt->size - s->slices[i][j].start;
635 
636  if (s->slices[i][j].size < 2)
637  return AVERROR_INVALIDDATA;
638  }
639 
640  if (bytestream2_get_byteu(&gb) != s->planes)
641  return AVERROR_INVALIDDATA;
642 
643  bytestream2_skipu(&gb, s->nb_slices * s->planes);
644 
645  table_size = header_size + first_offset - bytestream2_tell(&gb);
646  if (table_size < 2)
647  return AVERROR_INVALIDDATA;
648 
649  ret = build_huffman(avctx, avpkt->data + bytestream2_tell(&gb),
650  table_size, s->max);
651  if (ret < 0)
652  return ret;
653 
655  p->flags |= AV_FRAME_FLAG_KEY;
656 
657  if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
658  return ret;
659 
660  s->buf = avpkt->data;
661  s->p = p;
662  avctx->execute2(avctx, s->magy_decode_slice, NULL, NULL, s->nb_slices);
663 
664  if (avctx->pix_fmt == AV_PIX_FMT_GBRP ||
665  avctx->pix_fmt == AV_PIX_FMT_GBRAP ||
666  avctx->pix_fmt == AV_PIX_FMT_GBRP10 ||
667  avctx->pix_fmt == AV_PIX_FMT_GBRAP10||
668  avctx->pix_fmt == AV_PIX_FMT_GBRAP12||
669  avctx->pix_fmt == AV_PIX_FMT_GBRAP14||
670  avctx->pix_fmt == AV_PIX_FMT_GBRP12||
671  avctx->pix_fmt == AV_PIX_FMT_GBRP14) {
672  FFSWAP(uint8_t*, p->data[0], p->data[1]);
673  FFSWAP(int, p->linesize[0], p->linesize[1]);
674  } else {
675  switch (s->color_matrix) {
676  case 1:
678  break;
679  case 2:
681  break;
682  }
683  p->color_range = (s->flags & 4) ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
684  }
685 
686  *got_frame = 1;
687 
688  return avpkt->size;
689 }
690 
692 {
693  MagicYUVContext *s = avctx->priv_data;
694  ff_llviddsp_init(&s->llviddsp);
695  return 0;
696 }
697 
699 {
700  MagicYUVContext * const s = avctx->priv_data;
701  int i;
702 
703  for (i = 0; i < FF_ARRAY_ELEMS(s->slices); i++) {
704  av_freep(&s->slices[i]);
705  s->slices_size[i] = 0;
706  ff_vlc_free(&s->vlc[i]);
707  ff_vlc_free_multi(&s->multi[i]);
708  }
709 
710  return 0;
711 }
712 
714  .p.name = "magicyuv",
715  CODEC_LONG_NAME("MagicYUV video"),
716  .p.type = AVMEDIA_TYPE_VIDEO,
717  .p.id = AV_CODEC_ID_MAGICYUV,
718  .priv_data_size = sizeof(MagicYUVContext),
720  .close = magy_decode_end,
722  .p.capabilities = AV_CODEC_CAP_DR1 |
725 };
ff_magicyuv_decoder
const FFCodec ff_magicyuv_decoder
Definition: magicyuv.c:713
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:623
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:695
r
const char * r
Definition: vf_curves.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
Prediction
Definition: aptx.h:70
GetByteContext
Definition: bytestream.h:33
HuffEntry::len
uint8_t len
Definition: exr.c:95
MEDIAN
@ MEDIAN
Definition: magicyuv.c:47
MagicYUVContext::nb_slices
int nb_slices
Definition: magicyuv.c:60
src1
const pixel * src1
Definition: h264pred_template.c:421
MagicYUVContext::hshift
int hshift[4]
Definition: magicyuv.c:67
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
VLC_MULTI_ELEM
Definition: vlc.h:42
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:634
MagicYUVContext::color_matrix
int color_matrix
Definition: magicyuv.c:63
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:344
pixdesc.h
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:683
MagicYUVContext::llviddsp
LLVidDSPContext llviddsp
Definition: magicyuv.c:75
VLC_BITS
#define VLC_BITS
Definition: magicyuv.c:37
AVPacket::data
uint8_t * data
Definition: packet.h:522
b
#define b
Definition: input.c:41
table
static const uint16_t table[]
Definition: prosumer.c:205
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:478
FFCodec
Definition: codec_internal.h:127
max
#define max(a, b)
Definition: cuda_runtime.h:33
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:616
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
build_huffman
static int build_huffman(AVCodecContext *avctx, const uint8_t *table, int table_size, int max)
Definition: magicyuv.c:382
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:94
Slice::size
uint32_t size
Definition: magicyuv.c:41
magy_decode_slice10
static int magy_decode_slice10(AVCodecContext *avctx, void *tdata, int j, int threadnr)
Definition: magicyuv.c:137
thread.h
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:365
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in FFCodec caps_internal and use ff_thread_get_buffer() to allocate frames. Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
MagicYUVContext
Definition: magicyuv.c:55
Slice::start
uint32_t start
Definition: magicyuv.c:40
MagicYUVContext::max
int max
Definition: magicyuv.c:57
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3002
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:615
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:496
MagicYUVContext::bps
int bps
Definition: magicyuv.c:58
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:494
GetBitContext
Definition: get_bits.h:108
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:633
LLVidDSPContext
Definition: lossless_videodsp.h:28
huff_build
static int huff_build(AVCodecContext *avctx, const uint8_t len[], uint16_t codes_pos[33], VLC *vlc, VLC_MULTI *multi, int nb_elems, void *logctx)
Definition: magicyuv.c:80
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:481
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:595
MagicYUVContext::slices_size
unsigned int slices_size[4]
Definition: magicyuv.c:70
AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:498
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:287
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_GBRAP14
#define AV_PIX_FMT_GBRAP14
Definition: pixfmt.h:500
AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:499
VLC_MULTI
Definition: vlc.h:48
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
g
const char * g
Definition: vf_curves.c:127
MagicYUVContext::multi
VLC_MULTI multi[4]
Definition: magicyuv.c:72
ff_vlc_free_multi
void ff_vlc_free_multi(VLC_MULTI *vlc)
Definition: vlc.c:575
HuffEntry::sym
uint16_t sym
Definition: exr.c:96
GRADIENT
@ GRADIENT
Definition: magicyuv.c:46
decode.h
get_bits.h
Slice
Definition: magicyuv.c:39
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
MagicYUVContext::decorrelate
int decorrelate
Definition: magicyuv.c:62
AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:459
if
if(ret)
Definition: filter_design.txt:179
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:110
MagicYUVContext::vshift
int vshift[4]
Definition: magicyuv.c:68
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
magy_decode_init
static av_cold int magy_decode_init(AVCodecContext *avctx)
Definition: magicyuv.c:691
MagicYUVContext::len
uint8_t len[1<< 14]
Definition: magicyuv.c:77
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
ff_vlc_init_multi_from_lengths
int ff_vlc_init_multi_from_lengths(VLC *vlc, VLC_MULTI *multi, int nb_bits, int nb_elems, int nb_codes, const int8_t *lens, int lens_wrap, const void *symbols, int symbols_wrap, int symbols_size, int offset, int flags, void *logctx)
Build VLC decoding tables suitable for use with get_vlc_multi()
Definition: vlc.c:517
Prediction
Prediction
Definition: magicyuv.c:44
MagicYUVContext::magy_decode_slice
int(* magy_decode_slice)(AVCodecContext *avctx, void *tdata, int j, int threadnr)
Definition: magicyuv.c:73
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:479
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
MagicYUVContext::slices
Slice * slices[4]
Definition: magicyuv.c:69
AV_CODEC_ID_MAGICYUV
@ AV_CODEC_ID_MAGICYUV
Definition: codec_id.h:270
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
MagicYUVContext::he
HuffEntry he[1<< 14]
Definition: magicyuv.c:76
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:446
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:365
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:523
codec_internal.h
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
bps
unsigned bps
Definition: movenc.c:1792
VLCElem
Definition: vlc.h:32
MagicYUVContext::p
AVFrame * p
Definition: magicyuv.c:56
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:164
height
#define height
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:174
MagicYUVContext::slice_height
int slice_height
Definition: magicyuv.c:59
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:114
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
MagicYUVContext::flags
int flags
Definition: magicyuv.c:64
version
version
Definition: libkvazaar.c:321
interlaced
uint8_t interlaced
Definition: mxfenc.c:2263
MagicYUVContext::vlc
VLC vlc[4]
Definition: magicyuv.c:71
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:495
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
len
int len
Definition: vorbis_enc_data.h:426
MagicYUVContext::interlaced
int interlaced
Definition: magicyuv.c:65
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:666
magy_decode_end
static av_cold int magy_decode_end(AVCodecContext *avctx)
Definition: magicyuv.c:698
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
bytestream_get_buffer
static av_always_inline unsigned int bytestream_get_buffer(const uint8_t **b, uint8_t *dst, unsigned int size)
Definition: bytestream.h:363
mid_pred
#define mid_pred
Definition: mathops.h:98
ff_vlc_free
void ff_vlc_free(VLC *vlc)
Definition: vlc.c:580
ret
ret
Definition: filter_design.txt:187
pred
static const float pred[4]
Definition: siprdata.h:259
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
magy_decode_frame
static int magy_decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, AVPacket *avpkt)
Definition: magicyuv.c:435
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
ff_llviddsp_init
void ff_llviddsp_init(LLVidDSPContext *c)
Definition: lossless_videodsp.c:113
AVCodecContext
main external API structure.
Definition: avcodec.h:445
magicyuv_median_pred16
static void magicyuv_median_pred16(uint16_t *dst, const uint16_t *src1, const uint16_t *diff, intptr_t w, int *left, int *left_top, int max)
Definition: magicyuv.c:101
VLC
Definition: vlc.h:36
READ_PLANE
#define READ_PLANE(dst, plane, b, c)
Definition: magicyuv.c:122
HuffEntry
Definition: exr.c:94
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:633
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
lossless_videodsp.h
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
AVPacket
This structure stores compressed data.
Definition: packet.h:499
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
MagicYUVContext::planes
int planes
Definition: magicyuv.c:61
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:555
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:482
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:389
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
MagicYUVContext::buf
const uint8_t * buf
Definition: magicyuv.c:66
magy_decode_slice
static int magy_decode_slice(AVCodecContext *avctx, void *tdata, int j, int threadnr)
Definition: magicyuv.c:260
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:611
int
int
Definition: ffmpeg_filter.c:409
AVCodecContext::execute2
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
Definition: avcodec.h:1631
LEFT
@ LEFT
Definition: magicyuv.c:45