FFmpeg
magicyuv.c
Go to the documentation of this file.
1 /*
2  * MagicYUV decoder
3  * Copyright (c) 2016 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include <stdlib.h>
23 #include <string.h>
24 
25 #define CACHED_BITSTREAM_READER !ARCH_X86_32
26 
27 #include "libavutil/pixdesc.h"
28 #include "libavutil/qsort.h"
29 
30 #include "avcodec.h"
31 #include "bytestream.h"
32 #include "get_bits.h"
33 #include "huffyuvdsp.h"
34 #include "internal.h"
35 #include "lossless_videodsp.h"
36 #include "thread.h"
37 
38 typedef struct Slice {
39  uint32_t start;
40  uint32_t size;
41 } Slice;
42 
43 typedef enum Prediction {
44  LEFT = 1,
47 } Prediction;
48 
49 typedef struct HuffEntry {
50  uint16_t sym;
52  uint32_t code;
53 } HuffEntry;
54 
55 typedef struct MagicYUVContext {
57  int max;
58  int bps;
60  int nb_slices;
61  int planes; // number of encoded planes in bitstream
62  int decorrelate; // postprocessing work
63  int color_matrix; // video color matrix
64  int flags;
65  int interlaced; // video is interlaced
66  uint8_t *buf; // pointer to AVPacket->data
67  int hshift[4];
68  int vshift[4];
69  Slice *slices[4]; // slice bitstream positions for each plane
70  unsigned int slices_size[4]; // slice sizes for each plane
71  VLC vlc[4]; // VLC for each plane
72  int (*magy_decode_slice)(AVCodecContext *avctx, void *tdata,
73  int j, int threadnr);
76 
77 static int huff_cmp_len(const void *a, const void *b)
78 {
79  const HuffEntry *aa = a, *bb = b;
80  return (aa->len - bb->len) * 4096 + bb->sym - aa->sym;
81 }
82 
83 static int huff_build(HuffEntry he[], VLC *vlc, int nb_elems)
84 {
85  uint32_t code;
86  int i;
87 
88  AV_QSORT(he, nb_elems, HuffEntry, huff_cmp_len);
89 
90  code = 1;
91  for (i = nb_elems - 1; i >= 0; i--) {
92  he[i].code = code >> (32 - he[i].len);
93  code += 0x80000000u >> (he[i].len - 1);
94  }
95 
96  ff_free_vlc(vlc);
97  return ff_init_vlc_sparse(vlc, FFMIN(he[nb_elems - 1].len, 12), nb_elems,
98  &he[0].len, sizeof(he[0]), sizeof(he[0].len),
99  &he[0].code, sizeof(he[0]), sizeof(he[0].code),
100  &he[0].sym, sizeof(he[0]), sizeof(he[0].sym), 0);
101 }
102 
103 static void magicyuv_median_pred16(uint16_t *dst, const uint16_t *src1,
104  const uint16_t *diff, intptr_t w,
105  int *left, int *left_top, int max)
106 {
107  int i;
108  uint16_t l, lt;
109 
110  l = *left;
111  lt = *left_top;
112 
113  for (i = 0; i < w; i++) {
114  l = mid_pred(l, src1[i], (l + src1[i] - lt)) + diff[i];
115  l &= max;
116  lt = src1[i];
117  dst[i] = l;
118  }
119 
120  *left = l;
121  *left_top = lt;
122 }
123 
124 static int magy_decode_slice10(AVCodecContext *avctx, void *tdata,
125  int j, int threadnr)
126 {
127  MagicYUVContext *s = avctx->priv_data;
128  int interlaced = s->interlaced;
129  const int bps = s->bps;
130  const int max = s->max - 1;
131  AVFrame *p = s->p;
132  int i, k, x;
133  GetBitContext gb;
134  uint16_t *dst;
135 
136  for (i = 0; i < s->planes; i++) {
137  int left, lefttop, top;
138  int height = AV_CEIL_RSHIFT(FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height), s->vshift[i]);
139  int width = AV_CEIL_RSHIFT(avctx->coded_width, s->hshift[i]);
140  int sheight = AV_CEIL_RSHIFT(s->slice_height, s->vshift[i]);
141  ptrdiff_t fake_stride = (p->linesize[i] / 2) * (1 + interlaced);
142  ptrdiff_t stride = p->linesize[i] / 2;
143  int flags, pred;
144  int ret = init_get_bits8(&gb, s->buf + s->slices[i][j].start,
145  s->slices[i][j].size);
146 
147  if (ret < 0)
148  return ret;
149 
150  flags = get_bits(&gb, 8);
151  pred = get_bits(&gb, 8);
152 
153  dst = (uint16_t *)p->data[i] + j * sheight * stride;
154  if (flags & 1) {
155  if (get_bits_left(&gb) < bps * width * height)
156  return AVERROR_INVALIDDATA;
157  for (k = 0; k < height; k++) {
158  for (x = 0; x < width; x++)
159  dst[x] = get_bits(&gb, bps);
160 
161  dst += stride;
162  }
163  } else {
164  for (k = 0; k < height; k++) {
165  for (x = 0; x < width; x++) {
166  int pix;
167  if (get_bits_left(&gb) <= 0)
168  return AVERROR_INVALIDDATA;
169 
170  pix = get_vlc2(&gb, s->vlc[i].table, s->vlc[i].bits, 3);
171  if (pix < 0)
172  return AVERROR_INVALIDDATA;
173 
174  dst[x] = pix;
175  }
176  dst += stride;
177  }
178  }
179 
180  switch (pred) {
181  case LEFT:
182  dst = (uint16_t *)p->data[i] + j * sheight * stride;
183  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
184  dst += stride;
185  if (interlaced) {
186  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
187  dst += stride;
188  }
189  for (k = 1 + interlaced; k < height; k++) {
190  s->llviddsp.add_left_pred_int16(dst, dst, max, width, dst[-fake_stride]);
191  dst += stride;
192  }
193  break;
194  case GRADIENT:
195  dst = (uint16_t *)p->data[i] + j * sheight * stride;
196  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
197  dst += stride;
198  if (interlaced) {
199  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
200  dst += stride;
201  }
202  for (k = 1 + interlaced; k < height; k++) {
203  top = dst[-fake_stride];
204  left = top + dst[0];
205  dst[0] = left & max;
206  for (x = 1; x < width; x++) {
207  top = dst[x - fake_stride];
208  lefttop = dst[x - (fake_stride + 1)];
209  left += top - lefttop + dst[x];
210  dst[x] = left & max;
211  }
212  dst += stride;
213  }
214  break;
215  case MEDIAN:
216  dst = (uint16_t *)p->data[i] + j * sheight * stride;
217  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
218  dst += stride;
219  if (interlaced) {
220  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
221  dst += stride;
222  }
223  lefttop = left = dst[0];
224  for (k = 1 + interlaced; k < height; k++) {
225  magicyuv_median_pred16(dst, dst - fake_stride, dst, width, &left, &lefttop, max);
226  lefttop = left = dst[0];
227  dst += stride;
228  }
229  break;
230  default:
231  avpriv_request_sample(avctx, "Unknown prediction: %d", pred);
232  }
233  }
234 
235  if (s->decorrelate) {
236  int height = FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height);
237  int width = avctx->coded_width;
238  uint16_t *r = (uint16_t *)p->data[0] + j * s->slice_height * p->linesize[0] / 2;
239  uint16_t *g = (uint16_t *)p->data[1] + j * s->slice_height * p->linesize[1] / 2;
240  uint16_t *b = (uint16_t *)p->data[2] + j * s->slice_height * p->linesize[2] / 2;
241 
242  for (i = 0; i < height; i++) {
243  for (k = 0; k < width; k++) {
244  b[k] = (b[k] + g[k]) & max;
245  r[k] = (r[k] + g[k]) & max;
246  }
247  b += p->linesize[0] / 2;
248  g += p->linesize[1] / 2;
249  r += p->linesize[2] / 2;
250  }
251  }
252 
253  return 0;
254 }
255 
256 static int magy_decode_slice(AVCodecContext *avctx, void *tdata,
257  int j, int threadnr)
258 {
259  MagicYUVContext *s = avctx->priv_data;
260  int interlaced = s->interlaced;
261  AVFrame *p = s->p;
262  int i, k, x, min_width;
263  GetBitContext gb;
264  uint8_t *dst;
265 
266  for (i = 0; i < s->planes; i++) {
267  int left, lefttop, top;
268  int height = AV_CEIL_RSHIFT(FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height), s->vshift[i]);
269  int width = AV_CEIL_RSHIFT(avctx->coded_width, s->hshift[i]);
270  int sheight = AV_CEIL_RSHIFT(s->slice_height, s->vshift[i]);
271  ptrdiff_t fake_stride = p->linesize[i] * (1 + interlaced);
272  ptrdiff_t stride = p->linesize[i];
273  int flags, pred;
274  int ret = init_get_bits8(&gb, s->buf + s->slices[i][j].start,
275  s->slices[i][j].size);
276 
277  if (ret < 0)
278  return ret;
279 
280  flags = get_bits(&gb, 8);
281  pred = get_bits(&gb, 8);
282 
283  dst = p->data[i] + j * sheight * stride;
284  if (flags & 1) {
285  if (get_bits_left(&gb) < 8* width * height)
286  return AVERROR_INVALIDDATA;
287  for (k = 0; k < height; k++) {
288  for (x = 0; x < width; x++)
289  dst[x] = get_bits(&gb, 8);
290 
291  dst += stride;
292  }
293  } else {
294  for (k = 0; k < height; k++) {
295  for (x = 0; x < width; x++) {
296  int pix;
297  if (get_bits_left(&gb) <= 0)
298  return AVERROR_INVALIDDATA;
299 
300  pix = get_vlc2(&gb, s->vlc[i].table, s->vlc[i].bits, 3);
301  if (pix < 0)
302  return AVERROR_INVALIDDATA;
303 
304  dst[x] = pix;
305  }
306  dst += stride;
307  }
308  }
309 
310  switch (pred) {
311  case LEFT:
312  dst = p->data[i] + j * sheight * stride;
313  s->llviddsp.add_left_pred(dst, dst, width, 0);
314  dst += stride;
315  if (interlaced) {
316  s->llviddsp.add_left_pred(dst, dst, width, 0);
317  dst += stride;
318  }
319  for (k = 1 + interlaced; k < height; k++) {
320  s->llviddsp.add_left_pred(dst, dst, width, dst[-fake_stride]);
321  dst += stride;
322  }
323  break;
324  case GRADIENT:
325  dst = p->data[i] + j * sheight * stride;
326  s->llviddsp.add_left_pred(dst, dst, width, 0);
327  dst += stride;
328  if (interlaced) {
329  s->llviddsp.add_left_pred(dst, dst, width, 0);
330  dst += stride;
331  }
332  min_width = FFMIN(width, 32);
333  for (k = 1 + interlaced; k < height; k++) {
334  top = dst[-fake_stride];
335  left = top + dst[0];
336  dst[0] = left;
337  for (x = 1; x < min_width; x++) { /* dsp need aligned 32 */
338  top = dst[x - fake_stride];
339  lefttop = dst[x - (fake_stride + 1)];
340  left += top - lefttop + dst[x];
341  dst[x] = left;
342  }
343  if (width > 32)
344  s->llviddsp.add_gradient_pred(dst + 32, fake_stride, width - 32);
345  dst += stride;
346  }
347  break;
348  case MEDIAN:
349  dst = p->data[i] + j * sheight * stride;
350  s->llviddsp.add_left_pred(dst, dst, width, 0);
351  dst += stride;
352  if (interlaced) {
353  s->llviddsp.add_left_pred(dst, dst, width, 0);
354  dst += stride;
355  }
356  lefttop = left = dst[0];
357  for (k = 1 + interlaced; k < height; k++) {
358  s->llviddsp.add_median_pred(dst, dst - fake_stride,
359  dst, width, &left, &lefttop);
360  lefttop = left = dst[0];
361  dst += stride;
362  }
363  break;
364  default:
365  avpriv_request_sample(avctx, "Unknown prediction: %d", pred);
366  }
367  }
368 
369  if (s->decorrelate) {
370  int height = FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height);
371  int width = avctx->coded_width;
372  uint8_t *b = p->data[0] + j * s->slice_height * p->linesize[0];
373  uint8_t *g = p->data[1] + j * s->slice_height * p->linesize[1];
374  uint8_t *r = p->data[2] + j * s->slice_height * p->linesize[2];
375 
376  for (i = 0; i < height; i++) {
377  s->llviddsp.add_bytes(b, g, width);
378  s->llviddsp.add_bytes(r, g, width);
379  b += p->linesize[0];
380  g += p->linesize[1];
381  r += p->linesize[2];
382  }
383  }
384 
385  return 0;
386 }
387 
388 static int build_huffman(AVCodecContext *avctx, GetBitContext *gbit, int max)
389 {
390  MagicYUVContext *s = avctx->priv_data;
391  HuffEntry he[4096];
392  int i = 0, j = 0, k;
393 
394  while (get_bits_left(gbit) >= 8) {
395  int b = get_bits(gbit, 1);
396  int x = get_bits(gbit, 7);
397  int l = get_bitsz(gbit, b * 8) + 1;
398 
399  k = j + l;
400  if (k > max || x == 0 || x > 32) {
401  av_log(avctx, AV_LOG_ERROR, "Invalid Huffman codes\n");
402  return AVERROR_INVALIDDATA;
403  }
404 
405  for (; j < k; j++) {
406  he[j].sym = j;
407  he[j].len = x;
408  }
409 
410  if (j == max) {
411  j = 0;
412  if (huff_build(he, &s->vlc[i], max)) {
413  av_log(avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
414  return AVERROR_INVALIDDATA;
415  }
416  i++;
417  if (i == s->planes) {
418  break;
419  }
420  }
421  }
422 
423  if (i != s->planes) {
424  av_log(avctx, AV_LOG_ERROR, "Huffman tables too short\n");
425  return AVERROR_INVALIDDATA;
426  }
427 
428  return 0;
429 }
430 
431 static int magy_decode_frame(AVCodecContext *avctx, void *data,
432  int *got_frame, AVPacket *avpkt)
433 {
434  MagicYUVContext *s = avctx->priv_data;
435  ThreadFrame frame = { .f = data };
436  AVFrame *p = data;
437  GetByteContext gbyte;
438  GetBitContext gbit;
439  uint32_t first_offset, offset, next_offset, header_size, slice_width;
440  int width, height, format, version, table_size;
441  int ret, i, j;
442 
443  bytestream2_init(&gbyte, avpkt->data, avpkt->size);
444  if (bytestream2_get_le32(&gbyte) != MKTAG('M', 'A', 'G', 'Y'))
445  return AVERROR_INVALIDDATA;
446 
447  header_size = bytestream2_get_le32(&gbyte);
448  if (header_size < 32 || header_size >= avpkt->size) {
449  av_log(avctx, AV_LOG_ERROR,
450  "header or packet too small %"PRIu32"\n", header_size);
451  return AVERROR_INVALIDDATA;
452  }
453 
454  version = bytestream2_get_byte(&gbyte);
455  if (version != 7) {
456  avpriv_request_sample(avctx, "Version %d", version);
457  return AVERROR_PATCHWELCOME;
458  }
459 
460  s->hshift[1] =
461  s->vshift[1] =
462  s->hshift[2] =
463  s->vshift[2] = 0;
464  s->decorrelate = 0;
465  s->bps = 8;
466 
467  format = bytestream2_get_byte(&gbyte);
468  switch (format) {
469  case 0x65:
470  avctx->pix_fmt = AV_PIX_FMT_GBRP;
471  s->decorrelate = 1;
472  break;
473  case 0x66:
474  avctx->pix_fmt = AV_PIX_FMT_GBRAP;
475  s->decorrelate = 1;
476  break;
477  case 0x67:
478  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
479  break;
480  case 0x68:
481  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
482  s->hshift[1] =
483  s->hshift[2] = 1;
484  break;
485  case 0x69:
486  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
487  s->hshift[1] =
488  s->vshift[1] =
489  s->hshift[2] =
490  s->vshift[2] = 1;
491  break;
492  case 0x6a:
493  avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
494  break;
495  case 0x6b:
496  avctx->pix_fmt = AV_PIX_FMT_GRAY8;
497  break;
498  case 0x6c:
499  avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
500  s->hshift[1] =
501  s->hshift[2] = 1;
502  s->bps = 10;
503  break;
504  case 0x76:
505  avctx->pix_fmt = AV_PIX_FMT_YUV444P10;
506  s->bps = 10;
507  break;
508  case 0x6d:
509  avctx->pix_fmt = AV_PIX_FMT_GBRP10;
510  s->decorrelate = 1;
511  s->bps = 10;
512  break;
513  case 0x6e:
514  avctx->pix_fmt = AV_PIX_FMT_GBRAP10;
515  s->decorrelate = 1;
516  s->bps = 10;
517  break;
518  case 0x6f:
519  avctx->pix_fmt = AV_PIX_FMT_GBRP12;
520  s->decorrelate = 1;
521  s->bps = 12;
522  break;
523  case 0x70:
524  avctx->pix_fmt = AV_PIX_FMT_GBRAP12;
525  s->decorrelate = 1;
526  s->bps = 12;
527  break;
528  case 0x73:
529  avctx->pix_fmt = AV_PIX_FMT_GRAY10;
530  s->bps = 10;
531  break;
532  default:
533  avpriv_request_sample(avctx, "Format 0x%X", format);
534  return AVERROR_PATCHWELCOME;
535  }
536  s->max = 1 << s->bps;
539 
540  bytestream2_skip(&gbyte, 1);
541  s->color_matrix = bytestream2_get_byte(&gbyte);
542  s->flags = bytestream2_get_byte(&gbyte);
543  s->interlaced = !!(s->flags & 2);
544  bytestream2_skip(&gbyte, 3);
545 
546  width = bytestream2_get_le32(&gbyte);
547  height = bytestream2_get_le32(&gbyte);
548  ret = ff_set_dimensions(avctx, width, height);
549  if (ret < 0)
550  return ret;
551 
552  slice_width = bytestream2_get_le32(&gbyte);
553  if (slice_width != avctx->coded_width) {
554  avpriv_request_sample(avctx, "Slice width %"PRIu32, slice_width);
555  return AVERROR_PATCHWELCOME;
556  }
557  s->slice_height = bytestream2_get_le32(&gbyte);
558  if (s->slice_height <= 0 || s->slice_height > INT_MAX - avctx->coded_height) {
559  av_log(avctx, AV_LOG_ERROR,
560  "invalid slice height: %d\n", s->slice_height);
561  return AVERROR_INVALIDDATA;
562  }
563 
564  bytestream2_skip(&gbyte, 4);
565 
566  s->nb_slices = (avctx->coded_height + s->slice_height - 1) / s->slice_height;
567  if (s->nb_slices > INT_MAX / sizeof(Slice)) {
568  av_log(avctx, AV_LOG_ERROR,
569  "invalid number of slices: %d\n", s->nb_slices);
570  return AVERROR_INVALIDDATA;
571  }
572 
573  if (s->interlaced) {
574  if ((s->slice_height >> s->vshift[1]) < 2) {
575  av_log(avctx, AV_LOG_ERROR, "impossible slice height\n");
576  return AVERROR_INVALIDDATA;
577  }
578  if ((avctx->coded_height % s->slice_height) && ((avctx->coded_height % s->slice_height) >> s->vshift[1]) < 2) {
579  av_log(avctx, AV_LOG_ERROR, "impossible height\n");
580  return AVERROR_INVALIDDATA;
581  }
582  }
583 
584  for (i = 0; i < s->planes; i++) {
585  av_fast_malloc(&s->slices[i], &s->slices_size[i], s->nb_slices * sizeof(Slice));
586  if (!s->slices[i])
587  return AVERROR(ENOMEM);
588 
589  offset = bytestream2_get_le32(&gbyte);
590  if (offset >= avpkt->size - header_size)
591  return AVERROR_INVALIDDATA;
592 
593  if (i == 0)
594  first_offset = offset;
595 
596  for (j = 0; j < s->nb_slices - 1; j++) {
597  s->slices[i][j].start = offset + header_size;
598 
599  next_offset = bytestream2_get_le32(&gbyte);
600  if (next_offset <= offset || next_offset >= avpkt->size - header_size)
601  return AVERROR_INVALIDDATA;
602 
603  s->slices[i][j].size = next_offset - offset;
604  offset = next_offset;
605  }
606 
607  s->slices[i][j].start = offset + header_size;
608  s->slices[i][j].size = avpkt->size - s->slices[i][j].start;
609  }
610 
611  if (bytestream2_get_byte(&gbyte) != s->planes)
612  return AVERROR_INVALIDDATA;
613 
614  bytestream2_skip(&gbyte, s->nb_slices * s->planes);
615 
616  table_size = header_size + first_offset - bytestream2_tell(&gbyte);
617  if (table_size < 2)
618  return AVERROR_INVALIDDATA;
619 
620  ret = init_get_bits8(&gbit, avpkt->data + bytestream2_tell(&gbyte), table_size);
621  if (ret < 0)
622  return ret;
623 
624  ret = build_huffman(avctx, &gbit, s->max);
625  if (ret < 0)
626  return ret;
627 
629  p->key_frame = 1;
630 
631  if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
632  return ret;
633 
634  s->buf = avpkt->data;
635  s->p = p;
636  avctx->execute2(avctx, s->magy_decode_slice, NULL, NULL, s->nb_slices);
637 
638  if (avctx->pix_fmt == AV_PIX_FMT_GBRP ||
639  avctx->pix_fmt == AV_PIX_FMT_GBRAP ||
640  avctx->pix_fmt == AV_PIX_FMT_GBRP10 ||
641  avctx->pix_fmt == AV_PIX_FMT_GBRAP10||
642  avctx->pix_fmt == AV_PIX_FMT_GBRAP12||
643  avctx->pix_fmt == AV_PIX_FMT_GBRP12) {
644  FFSWAP(uint8_t*, p->data[0], p->data[1]);
645  FFSWAP(int, p->linesize[0], p->linesize[1]);
646  } else {
647  switch (s->color_matrix) {
648  case 1:
650  break;
651  case 2:
653  break;
654  }
656  }
657 
658  *got_frame = 1;
659 
660  return avpkt->size;
661 }
662 
664 {
665  MagicYUVContext *s = avctx->priv_data;
667  return 0;
668 }
669 
671 {
672  MagicYUVContext * const s = avctx->priv_data;
673  int i;
674 
675  for (i = 0; i < FF_ARRAY_ELEMS(s->slices); i++) {
676  av_freep(&s->slices[i]);
677  s->slices_size[i] = 0;
678  ff_free_vlc(&s->vlc[i]);
679  }
680 
681  return 0;
682 }
683 
685  .name = "magicyuv",
686  .long_name = NULL_IF_CONFIG_SMALL("MagicYUV video"),
687  .type = AVMEDIA_TYPE_VIDEO,
688  .id = AV_CODEC_ID_MAGICYUV,
689  .priv_data_size = sizeof(MagicYUVContext),
691  .close = magy_decode_end,
693  .capabilities = AV_CODEC_CAP_DR1 |
696  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
697 };
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:514
#define NULL
Definition: coverity.c:32
static int build_huffman(AVCodecContext *avctx, GetBitContext *gbit, int max)
Definition: magicyuv.c:388
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
int(* add_left_pred)(uint8_t *dst, const uint8_t *src, ptrdiff_t w, int left)
version
Definition: libkvazaar.c:317
This structure describes decoded (raw) audio or video data.
Definition: frame.h:308
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:714
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:419
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2613
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:106
const char * g
Definition: vf_curves.c:115
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:518
#define avpriv_request_sample(...)
int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
Definition: bitstream.c:270
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
int size
Definition: packet.h:364
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:415
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:36
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
unsigned int slices_size[4]
Definition: magicyuv.c:70
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate.The lists are not just lists
AVCodec.
Definition: codec.h:190
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
LLVidDSPContext llviddsp
Definition: magicyuv.c:74
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:380
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
uint8_t
#define av_cold
Definition: attributes.h:88
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Multithreading support functions.
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:262
Prediction
Definition: magicyuv.c:43
#define height
uint8_t * data
Definition: packet.h:363
void(* add_median_pred)(uint8_t *dst, const uint8_t *top, const uint8_t *diff, ptrdiff_t w, int *left, int *left_top)
bitstream reader API header.
int hshift[4]
Definition: magicyuv.c:67
#define max(a, b)
Definition: cuda_runtime.h:33
uint32_t code
Definition: magicyuv.c:52
#define av_log(a,...)
void(* add_gradient_pred)(uint8_t *src, const ptrdiff_t stride, const ptrdiff_t width)
static int magy_decode_slice(AVCodecContext *avctx, void *tdata, int j, int threadnr)
Definition: magicyuv.c:256
static int huff_cmp_len(const void *a, const void *b)
Definition: magicyuv.c:77
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
int slice_height
Definition: magicyuv.c:59
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:164
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:153
const char * r
Definition: vf_curves.c:114
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:552
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:563
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:420
const char * name
Name of the codec implementation.
Definition: codec.h:197
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:402
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:106
Definition: vlc.h:26
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
Slice * slices[4]
Definition: magicyuv.c:69
static int huff_build(HuffEntry he[], VLC *vlc, int nb_elems)
Definition: magicyuv.c:83
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames.The frames must then be freed with ff_thread_release_buffer().Otherwise decode directly into the user-supplied frames.Call ff_thread_report_progress() after some part of the current picture has decoded.A good place to put this is where draw_horiz_band() is called-add this if it isn't called anywhere
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:502
#define b
Definition: input.c:41
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:391
int vshift[4]
Definition: magicyuv.c:68
#define FFMIN(a, b)
Definition: common.h:96
uint8_t interlaced
Definition: mxfenc.c:2168
#define width
uint8_t w
Definition: llviddspenc.c:38
Definition: magicyuv.c:38
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:797
#define FF_ARRAY_ELEMS(a)
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:538
int bits
Definition: vlc.h:27
if(ret)
static const float pred[4]
Definition: siprdata.h:259
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:110
static av_cold int magy_decode_init(AVCodecContext *avctx)
Definition: magicyuv.c:663
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
Definition: avcodec.h:1845
int(* add_left_pred_int16)(uint16_t *dst, const uint16_t *src, unsigned mask, ptrdiff_t w, unsigned left)
#define src1
Definition: h264pred.c:139
uint8_t len
Definition: magicyuv.c:51
Libavcodec external API header.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:339
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
main external API structure.
Definition: avcodec.h:526
static int magy_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: magicyuv.c:431
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:206
uint32_t size
Definition: magicyuv.c:40
void ff_llviddsp_init(LLVidDSPContext *c)
uint32_t start
Definition: magicyuv.c:39
int coded_height
Definition: avcodec.h:714
AVCodec ff_magicyuv_decoder
Definition: magicyuv.c:684
Definition: magicyuv.c:44
#define mid_pred
Definition: mathops.h:97
VLC vlc[4]
Definition: magicyuv.c:71
AVFrame * p
Definition: magicyuv.c:56
static int magy_decode_slice10(AVCodecContext *avctx, void *tdata, int j, int threadnr)
Definition: magicyuv.c:124
uint8_t * buf
Definition: magicyuv.c:66
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:416
int(* magy_decode_slice)(AVCodecContext *avctx, void *tdata, int j, int threadnr)
Definition: magicyuv.c:72
#define flags(name, subs,...)
Definition: cbs_av1.c:560
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:400
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:322
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:537
static av_cold int magy_decode_end(AVCodecContext *avctx)
Definition: magicyuv.c:670
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
int
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
Y , 8bpp.
Definition: pixfmt.h:74
common internal api header.
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
uint16_t sym
Definition: magicyuv.c:50
unsigned bps
Definition: movenc.c:1533
void * priv_data
Definition: avcodec.h:553
static av_always_inline int diff(const uint32_t a, const uint32_t b)
int len
VLC_TYPE(* table)[2]
code, bits
Definition: vlc.h:28
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:386
static void magicyuv_median_pred16(uint16_t *dst, const uint16_t *src1, const uint16_t *diff, intptr_t w, int *left, int *left_top, int max)
Definition: magicyuv.c:103
#define av_freep(p)
#define FFSWAP(type, a, b)
Definition: common.h:99
#define stride
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
void(* add_bytes)(uint8_t *dst, uint8_t *src, ptrdiff_t w)
#define MKTAG(a, b, c, d)
Definition: common.h:406
This structure stores compressed data.
Definition: packet.h:340
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:358
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
#define AV_QSORT(p, num, type, cmp)
Quicksort This sort is fast, and fully inplace but not stable and it is possible to construct input t...
Definition: qsort.h:33
for(j=16;j >0;--j)
int i
Definition: input.c:407
static av_always_inline int get_bitsz(GetBitContext *s, int n)
Read 0-25 bits.
Definition: get_bits.h:415
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
int color_matrix
Definition: magicyuv.c:63