FFmpeg
magicyuv.c
Go to the documentation of this file.
1 /*
2  * MagicYUV decoder
3  * Copyright (c) 2016 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include <stdlib.h>
23 #include <string.h>
24 
25 #include "libavutil/pixdesc.h"
26 #include "libavutil/qsort.h"
27 
28 #include "avcodec.h"
29 #include "bytestream.h"
30 #include "get_bits.h"
31 #include "huffyuvdsp.h"
32 #include "internal.h"
33 #include "lossless_videodsp.h"
34 #include "thread.h"
35 
36 typedef struct Slice {
37  uint32_t start;
38  uint32_t size;
39 } Slice;
40 
41 typedef enum Prediction {
42  LEFT = 1,
45 } Prediction;
46 
47 typedef struct HuffEntry {
48  uint16_t sym;
50  uint32_t code;
51 } HuffEntry;
52 
53 typedef struct MagicYUVContext {
55  int max;
56  int bps;
58  int nb_slices;
59  int planes; // number of encoded planes in bitstream
60  int decorrelate; // postprocessing work
61  int color_matrix; // video color matrix
62  int flags;
63  int interlaced; // video is interlaced
64  uint8_t *buf; // pointer to AVPacket->data
65  int hshift[4];
66  int vshift[4];
67  Slice *slices[4]; // slice bitstream positions for each plane
68  unsigned int slices_size[4]; // slice sizes for each plane
69  uint8_t len[4][4096]; // table of code lengths for each plane
70  VLC vlc[4]; // VLC for each plane
72  int (*magy_decode_slice)(AVCodecContext *avctx, void *tdata,
73  int j, int threadnr);
76 
77 static int huff_cmp_len(const void *a, const void *b)
78 {
79  const HuffEntry *aa = a, *bb = b;
80  return (aa->len - bb->len) * 256 + aa->sym - bb->sym;
81 }
82 
83 static int huff_cmp_len10(const void *a, const void *b)
84 {
85  const HuffEntry *aa = a, *bb = b;
86  return (aa->len - bb->len) * 1024 + aa->sym - bb->sym;
87 }
88 
89 static int huff_cmp_len12(const void *a, const void *b)
90 {
91  const HuffEntry *aa = a, *bb = b;
92  return (aa->len - bb->len) * 4096 + aa->sym - bb->sym;
93 }
94 
95 static int huff_build10(VLC *vlc, uint8_t *len)
96 {
97  HuffEntry he[1024];
98  uint32_t codes[1024];
99  uint8_t bits[1024];
100  uint16_t syms[1024];
101  uint32_t code;
102  int i;
103 
104  for (i = 0; i < 1024; i++) {
105  he[i].sym = 1023 - i;
106  he[i].len = len[i];
107  if (len[i] == 0 || len[i] > 32)
108  return AVERROR_INVALIDDATA;
109  }
110  AV_QSORT(he, 1024, HuffEntry, huff_cmp_len10);
111 
112  code = 1;
113  for (i = 1023; i >= 0; i--) {
114  codes[i] = code >> (32 - he[i].len);
115  bits[i] = he[i].len;
116  syms[i] = he[i].sym;
117  code += 0x80000000u >> (he[i].len - 1);
118  }
119 
120  ff_free_vlc(vlc);
121  return ff_init_vlc_sparse(vlc, FFMIN(he[1023].len, 12), 1024,
122  bits, sizeof(*bits), sizeof(*bits),
123  codes, sizeof(*codes), sizeof(*codes),
124  syms, sizeof(*syms), sizeof(*syms), 0);
125 }
126 
127 static int huff_build12(VLC *vlc, uint8_t *len)
128 {
129  HuffEntry he[4096];
130  uint32_t codes[4096];
131  uint8_t bits[4096];
132  uint16_t syms[4096];
133  uint32_t code;
134  int i;
135 
136  for (i = 0; i < 4096; i++) {
137  he[i].sym = 4095 - i;
138  he[i].len = len[i];
139  if (len[i] == 0 || len[i] > 32)
140  return AVERROR_INVALIDDATA;
141  }
142  AV_QSORT(he, 4096, HuffEntry, huff_cmp_len12);
143 
144  code = 1;
145  for (i = 4095; i >= 0; i--) {
146  codes[i] = code >> (32 - he[i].len);
147  bits[i] = he[i].len;
148  syms[i] = he[i].sym;
149  code += 0x80000000u >> (he[i].len - 1);
150  }
151 
152  ff_free_vlc(vlc);
153  return ff_init_vlc_sparse(vlc, FFMIN(he[4095].len, 14), 4096,
154  bits, sizeof(*bits), sizeof(*bits),
155  codes, sizeof(*codes), sizeof(*codes),
156  syms, sizeof(*syms), sizeof(*syms), 0);
157 }
158 
159 static int huff_build(VLC *vlc, uint8_t *len)
160 {
161  HuffEntry he[256];
162  uint32_t codes[256];
163  uint8_t bits[256];
164  uint8_t syms[256];
165  uint32_t code;
166  int i;
167 
168  for (i = 0; i < 256; i++) {
169  he[i].sym = 255 - i;
170  he[i].len = len[i];
171  if (len[i] == 0 || len[i] > 32)
172  return AVERROR_INVALIDDATA;
173  }
174  AV_QSORT(he, 256, HuffEntry, huff_cmp_len);
175 
176  code = 1;
177  for (i = 255; i >= 0; i--) {
178  codes[i] = code >> (32 - he[i].len);
179  bits[i] = he[i].len;
180  syms[i] = he[i].sym;
181  code += 0x80000000u >> (he[i].len - 1);
182  }
183 
184  ff_free_vlc(vlc);
185  return ff_init_vlc_sparse(vlc, FFMIN(he[255].len, 12), 256,
186  bits, sizeof(*bits), sizeof(*bits),
187  codes, sizeof(*codes), sizeof(*codes),
188  syms, sizeof(*syms), sizeof(*syms), 0);
189 }
190 
191 static void magicyuv_median_pred16(uint16_t *dst, const uint16_t *src1,
192  const uint16_t *diff, intptr_t w,
193  int *left, int *left_top, int max)
194 {
195  int i;
196  uint16_t l, lt;
197 
198  l = *left;
199  lt = *left_top;
200 
201  for (i = 0; i < w; i++) {
202  l = mid_pred(l, src1[i], (l + src1[i] - lt)) + diff[i];
203  l &= max;
204  lt = src1[i];
205  dst[i] = l;
206  }
207 
208  *left = l;
209  *left_top = lt;
210 }
211 
212 static int magy_decode_slice10(AVCodecContext *avctx, void *tdata,
213  int j, int threadnr)
214 {
215  MagicYUVContext *s = avctx->priv_data;
216  int interlaced = s->interlaced;
217  const int bps = s->bps;
218  const int max = s->max - 1;
219  AVFrame *p = s->p;
220  int i, k, x;
221  GetBitContext gb;
222  uint16_t *dst;
223 
224  for (i = 0; i < s->planes; i++) {
225  int left, lefttop, top;
226  int height = AV_CEIL_RSHIFT(FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height), s->vshift[i]);
227  int width = AV_CEIL_RSHIFT(avctx->coded_width, s->hshift[i]);
228  int sheight = AV_CEIL_RSHIFT(s->slice_height, s->vshift[i]);
229  ptrdiff_t fake_stride = (p->linesize[i] / 2) * (1 + interlaced);
230  ptrdiff_t stride = p->linesize[i] / 2;
231  int flags, pred;
232  int ret = init_get_bits8(&gb, s->buf + s->slices[i][j].start,
233  s->slices[i][j].size);
234 
235  if (ret < 0)
236  return ret;
237 
238  flags = get_bits(&gb, 8);
239  pred = get_bits(&gb, 8);
240 
241  dst = (uint16_t *)p->data[i] + j * sheight * stride;
242  if (flags & 1) {
243  if (get_bits_left(&gb) < bps * width * height)
244  return AVERROR_INVALIDDATA;
245  for (k = 0; k < height; k++) {
246  for (x = 0; x < width; x++)
247  dst[x] = get_bits(&gb, bps);
248 
249  dst += stride;
250  }
251  } else {
252  for (k = 0; k < height; k++) {
253  for (x = 0; x < width; x++) {
254  int pix;
255  if (get_bits_left(&gb) <= 0)
256  return AVERROR_INVALIDDATA;
257 
258  pix = get_vlc2(&gb, s->vlc[i].table, s->vlc[i].bits, 3);
259  if (pix < 0)
260  return AVERROR_INVALIDDATA;
261 
262  dst[x] = max - pix;
263  }
264  dst += stride;
265  }
266  }
267 
268  switch (pred) {
269  case LEFT:
270  dst = (uint16_t *)p->data[i] + j * sheight * stride;
271  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
272  dst += stride;
273  if (interlaced) {
274  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
275  dst += stride;
276  }
277  for (k = 1 + interlaced; k < height; k++) {
278  s->llviddsp.add_left_pred_int16(dst, dst, max, width, dst[-fake_stride]);
279  dst += stride;
280  }
281  break;
282  case GRADIENT:
283  dst = (uint16_t *)p->data[i] + j * sheight * stride;
284  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
285  dst += stride;
286  if (interlaced) {
287  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
288  dst += stride;
289  }
290  for (k = 1 + interlaced; k < height; k++) {
291  top = dst[-fake_stride];
292  left = top + dst[0];
293  dst[0] = left & max;
294  for (x = 1; x < width; x++) {
295  top = dst[x - fake_stride];
296  lefttop = dst[x - (fake_stride + 1)];
297  left += top - lefttop + dst[x];
298  dst[x] = left & max;
299  }
300  dst += stride;
301  }
302  break;
303  case MEDIAN:
304  dst = (uint16_t *)p->data[i] + j * sheight * stride;
305  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
306  dst += stride;
307  if (interlaced) {
308  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
309  dst += stride;
310  }
311  lefttop = left = dst[0];
312  for (k = 1 + interlaced; k < height; k++) {
313  magicyuv_median_pred16(dst, dst - fake_stride, dst, width, &left, &lefttop, max);
314  lefttop = left = dst[0];
315  dst += stride;
316  }
317  break;
318  default:
319  avpriv_request_sample(avctx, "Unknown prediction: %d", pred);
320  }
321  }
322 
323  if (s->decorrelate) {
324  int height = FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height);
325  int width = avctx->coded_width;
326  uint16_t *r = (uint16_t *)p->data[0] + j * s->slice_height * p->linesize[0] / 2;
327  uint16_t *g = (uint16_t *)p->data[1] + j * s->slice_height * p->linesize[1] / 2;
328  uint16_t *b = (uint16_t *)p->data[2] + j * s->slice_height * p->linesize[2] / 2;
329 
330  for (i = 0; i < height; i++) {
331  for (k = 0; k < width; k++) {
332  b[k] = (b[k] + g[k]) & max;
333  r[k] = (r[k] + g[k]) & max;
334  }
335  b += p->linesize[0] / 2;
336  g += p->linesize[1] / 2;
337  r += p->linesize[2] / 2;
338  }
339  }
340 
341  return 0;
342 }
343 
344 static int magy_decode_slice(AVCodecContext *avctx, void *tdata,
345  int j, int threadnr)
346 {
347  MagicYUVContext *s = avctx->priv_data;
348  int interlaced = s->interlaced;
349  AVFrame *p = s->p;
350  int i, k, x, min_width;
351  GetBitContext gb;
352  uint8_t *dst;
353 
354  for (i = 0; i < s->planes; i++) {
355  int left, lefttop, top;
356  int height = AV_CEIL_RSHIFT(FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height), s->vshift[i]);
357  int width = AV_CEIL_RSHIFT(avctx->coded_width, s->hshift[i]);
358  int sheight = AV_CEIL_RSHIFT(s->slice_height, s->vshift[i]);
359  ptrdiff_t fake_stride = p->linesize[i] * (1 + interlaced);
360  ptrdiff_t stride = p->linesize[i];
361  int flags, pred;
362  int ret = init_get_bits8(&gb, s->buf + s->slices[i][j].start,
363  s->slices[i][j].size);
364 
365  if (ret < 0)
366  return ret;
367 
368  flags = get_bits(&gb, 8);
369  pred = get_bits(&gb, 8);
370 
371  dst = p->data[i] + j * sheight * stride;
372  if (flags & 1) {
373  if (get_bits_left(&gb) < 8* width * height)
374  return AVERROR_INVALIDDATA;
375  for (k = 0; k < height; k++) {
376  for (x = 0; x < width; x++)
377  dst[x] = get_bits(&gb, 8);
378 
379  dst += stride;
380  }
381  } else {
382  for (k = 0; k < height; k++) {
383  for (x = 0; x < width; x++) {
384  int pix;
385  if (get_bits_left(&gb) <= 0)
386  return AVERROR_INVALIDDATA;
387 
388  pix = get_vlc2(&gb, s->vlc[i].table, s->vlc[i].bits, 3);
389  if (pix < 0)
390  return AVERROR_INVALIDDATA;
391 
392  dst[x] = 255 - pix;
393  }
394  dst += stride;
395  }
396  }
397 
398  switch (pred) {
399  case LEFT:
400  dst = p->data[i] + j * sheight * stride;
401  s->llviddsp.add_left_pred(dst, dst, width, 0);
402  dst += stride;
403  if (interlaced) {
404  s->llviddsp.add_left_pred(dst, dst, width, 0);
405  dst += stride;
406  }
407  for (k = 1 + interlaced; k < height; k++) {
408  s->llviddsp.add_left_pred(dst, dst, width, dst[-fake_stride]);
409  dst += stride;
410  }
411  break;
412  case GRADIENT:
413  dst = p->data[i] + j * sheight * stride;
414  s->llviddsp.add_left_pred(dst, dst, width, 0);
415  dst += stride;
416  if (interlaced) {
417  s->llviddsp.add_left_pred(dst, dst, width, 0);
418  dst += stride;
419  }
420  min_width = FFMIN(width, 32);
421  for (k = 1 + interlaced; k < height; k++) {
422  top = dst[-fake_stride];
423  left = top + dst[0];
424  dst[0] = left;
425  for (x = 1; x < min_width; x++) { /* dsp need aligned 32 */
426  top = dst[x - fake_stride];
427  lefttop = dst[x - (fake_stride + 1)];
428  left += top - lefttop + dst[x];
429  dst[x] = left;
430  }
431  if (width > 32)
432  s->llviddsp.add_gradient_pred(dst + 32, fake_stride, width - 32);
433  dst += stride;
434  }
435  break;
436  case MEDIAN:
437  dst = p->data[i] + j * sheight * stride;
438  s->llviddsp.add_left_pred(dst, dst, width, 0);
439  dst += stride;
440  if (interlaced) {
441  s->llviddsp.add_left_pred(dst, dst, width, 0);
442  dst += stride;
443  }
444  lefttop = left = dst[0];
445  for (k = 1 + interlaced; k < height; k++) {
446  s->llviddsp.add_median_pred(dst, dst - fake_stride,
447  dst, width, &left, &lefttop);
448  lefttop = left = dst[0];
449  dst += stride;
450  }
451  break;
452  default:
453  avpriv_request_sample(avctx, "Unknown prediction: %d", pred);
454  }
455  }
456 
457  if (s->decorrelate) {
458  int height = FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height);
459  int width = avctx->coded_width;
460  uint8_t *b = p->data[0] + j * s->slice_height * p->linesize[0];
461  uint8_t *g = p->data[1] + j * s->slice_height * p->linesize[1];
462  uint8_t *r = p->data[2] + j * s->slice_height * p->linesize[2];
463 
464  for (i = 0; i < height; i++) {
465  s->llviddsp.add_bytes(b, g, width);
466  s->llviddsp.add_bytes(r, g, width);
467  b += p->linesize[0];
468  g += p->linesize[1];
469  r += p->linesize[2];
470  }
471  }
472 
473  return 0;
474 }
475 
476 static int build_huffman(AVCodecContext *avctx, GetBitContext *gbit, int max)
477 {
478  MagicYUVContext *s = avctx->priv_data;
479  int i = 0, j = 0, k;
480 
481  memset(s->len, 0, sizeof(s->len));
482  while (get_bits_left(gbit) >= 8) {
483  int b = get_bits(gbit, 1);
484  int x = get_bits(gbit, 7);
485  int l = get_bitsz(gbit, b * 8) + 1;
486 
487  for (k = 0; k < l; k++)
488  if (j + k < max)
489  s->len[i][j + k] = x;
490 
491  j += l;
492  if (j == max) {
493  j = 0;
494  if (s->huff_build(&s->vlc[i], s->len[i])) {
495  av_log(avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
496  return AVERROR_INVALIDDATA;
497  }
498  i++;
499  if (i == s->planes) {
500  break;
501  }
502  } else if (j > max) {
503  av_log(avctx, AV_LOG_ERROR, "Invalid Huffman codes\n");
504  return AVERROR_INVALIDDATA;
505  }
506  }
507 
508  if (i != s->planes) {
509  av_log(avctx, AV_LOG_ERROR, "Huffman tables too short\n");
510  return AVERROR_INVALIDDATA;
511  }
512 
513  return 0;
514 }
515 
516 static int magy_decode_frame(AVCodecContext *avctx, void *data,
517  int *got_frame, AVPacket *avpkt)
518 {
519  MagicYUVContext *s = avctx->priv_data;
520  ThreadFrame frame = { .f = data };
521  AVFrame *p = data;
522  GetByteContext gbyte;
523  GetBitContext gbit;
524  uint32_t first_offset, offset, next_offset, header_size, slice_width;
525  int width, height, format, version, table_size;
526  int ret, i, j;
527 
528  bytestream2_init(&gbyte, avpkt->data, avpkt->size);
529  if (bytestream2_get_le32(&gbyte) != MKTAG('M', 'A', 'G', 'Y'))
530  return AVERROR_INVALIDDATA;
531 
532  header_size = bytestream2_get_le32(&gbyte);
533  if (header_size < 32 || header_size >= avpkt->size) {
534  av_log(avctx, AV_LOG_ERROR,
535  "header or packet too small %"PRIu32"\n", header_size);
536  return AVERROR_INVALIDDATA;
537  }
538 
539  version = bytestream2_get_byte(&gbyte);
540  if (version != 7) {
541  avpriv_request_sample(avctx, "Version %d", version);
542  return AVERROR_PATCHWELCOME;
543  }
544 
545  s->hshift[1] =
546  s->vshift[1] =
547  s->hshift[2] =
548  s->vshift[2] = 0;
549  s->decorrelate = 0;
550  s->max = 256;
551  s->bps = 8;
552  s->huff_build = huff_build;
554 
555  format = bytestream2_get_byte(&gbyte);
556  switch (format) {
557  case 0x65:
558  avctx->pix_fmt = AV_PIX_FMT_GBRP;
559  s->decorrelate = 1;
560  break;
561  case 0x66:
562  avctx->pix_fmt = AV_PIX_FMT_GBRAP;
563  s->decorrelate = 1;
564  break;
565  case 0x67:
566  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
567  break;
568  case 0x68:
569  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
570  s->hshift[1] =
571  s->hshift[2] = 1;
572  break;
573  case 0x69:
574  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
575  s->hshift[1] =
576  s->vshift[1] =
577  s->hshift[2] =
578  s->vshift[2] = 1;
579  break;
580  case 0x6a:
581  avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
582  break;
583  case 0x6b:
584  avctx->pix_fmt = AV_PIX_FMT_GRAY8;
585  break;
586  case 0x6c:
587  avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
588  s->hshift[1] =
589  s->hshift[2] = 1;
590  s->max = 1024;
593  s->bps = 10;
594  break;
595  case 0x76:
596  avctx->pix_fmt = AV_PIX_FMT_YUV444P10;
597  s->max = 1024;
600  s->bps = 10;
601  break;
602  case 0x6d:
603  avctx->pix_fmt = AV_PIX_FMT_GBRP10;
604  s->decorrelate = 1;
605  s->max = 1024;
608  s->bps = 10;
609  break;
610  case 0x6e:
611  avctx->pix_fmt = AV_PIX_FMT_GBRAP10;
612  s->decorrelate = 1;
613  s->max = 1024;
616  s->bps = 10;
617  break;
618  case 0x6f:
619  avctx->pix_fmt = AV_PIX_FMT_GBRP12;
620  s->decorrelate = 1;
621  s->max = 4096;
624  s->bps = 12;
625  break;
626  case 0x70:
627  avctx->pix_fmt = AV_PIX_FMT_GBRAP12;
628  s->decorrelate = 1;
629  s->max = 4096;
632  s->bps = 12;
633  break;
634  case 0x73:
635  avctx->pix_fmt = AV_PIX_FMT_GRAY10;
636  s->max = 1024;
639  s->bps = 10;
640  break;
641  default:
642  avpriv_request_sample(avctx, "Format 0x%X", format);
643  return AVERROR_PATCHWELCOME;
644  }
646 
647  bytestream2_skip(&gbyte, 1);
648  s->color_matrix = bytestream2_get_byte(&gbyte);
649  s->flags = bytestream2_get_byte(&gbyte);
650  s->interlaced = !!(s->flags & 2);
651  bytestream2_skip(&gbyte, 3);
652 
653  width = bytestream2_get_le32(&gbyte);
654  height = bytestream2_get_le32(&gbyte);
655  ret = ff_set_dimensions(avctx, width, height);
656  if (ret < 0)
657  return ret;
658 
659  slice_width = bytestream2_get_le32(&gbyte);
660  if (slice_width != avctx->coded_width) {
661  avpriv_request_sample(avctx, "Slice width %"PRIu32, slice_width);
662  return AVERROR_PATCHWELCOME;
663  }
664  s->slice_height = bytestream2_get_le32(&gbyte);
665  if (s->slice_height <= 0 || s->slice_height > INT_MAX - avctx->coded_height) {
666  av_log(avctx, AV_LOG_ERROR,
667  "invalid slice height: %d\n", s->slice_height);
668  return AVERROR_INVALIDDATA;
669  }
670 
671  bytestream2_skip(&gbyte, 4);
672 
673  s->nb_slices = (avctx->coded_height + s->slice_height - 1) / s->slice_height;
674  if (s->nb_slices > INT_MAX / sizeof(Slice)) {
675  av_log(avctx, AV_LOG_ERROR,
676  "invalid number of slices: %d\n", s->nb_slices);
677  return AVERROR_INVALIDDATA;
678  }
679 
680  for (i = 0; i < s->planes; i++) {
681  av_fast_malloc(&s->slices[i], &s->slices_size[i], s->nb_slices * sizeof(Slice));
682  if (!s->slices[i])
683  return AVERROR(ENOMEM);
684 
685  offset = bytestream2_get_le32(&gbyte);
686  if (offset >= avpkt->size - header_size)
687  return AVERROR_INVALIDDATA;
688 
689  if (i == 0)
690  first_offset = offset;
691 
692  for (j = 0; j < s->nb_slices - 1; j++) {
693  s->slices[i][j].start = offset + header_size;
694 
695  next_offset = bytestream2_get_le32(&gbyte);
696  if (next_offset <= offset || next_offset >= avpkt->size - header_size)
697  return AVERROR_INVALIDDATA;
698 
699  s->slices[i][j].size = next_offset - offset;
700  offset = next_offset;
701  }
702 
703  s->slices[i][j].start = offset + header_size;
704  s->slices[i][j].size = avpkt->size - s->slices[i][j].start;
705  }
706 
707  if (bytestream2_get_byte(&gbyte) != s->planes)
708  return AVERROR_INVALIDDATA;
709 
710  bytestream2_skip(&gbyte, s->nb_slices * s->planes);
711 
712  table_size = header_size + first_offset - bytestream2_tell(&gbyte);
713  if (table_size < 2)
714  return AVERROR_INVALIDDATA;
715 
716  ret = init_get_bits8(&gbit, avpkt->data + bytestream2_tell(&gbyte), table_size);
717  if (ret < 0)
718  return ret;
719 
720  ret = build_huffman(avctx, &gbit, s->max);
721  if (ret < 0)
722  return ret;
723 
725  p->key_frame = 1;
726 
727  if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
728  return ret;
729 
730  s->buf = avpkt->data;
731  s->p = p;
732  avctx->execute2(avctx, s->magy_decode_slice, NULL, NULL, s->nb_slices);
733 
734  if (avctx->pix_fmt == AV_PIX_FMT_GBRP ||
735  avctx->pix_fmt == AV_PIX_FMT_GBRAP ||
736  avctx->pix_fmt == AV_PIX_FMT_GBRP10 ||
737  avctx->pix_fmt == AV_PIX_FMT_GBRAP10||
738  avctx->pix_fmt == AV_PIX_FMT_GBRAP12||
739  avctx->pix_fmt == AV_PIX_FMT_GBRP12) {
740  FFSWAP(uint8_t*, p->data[0], p->data[1]);
741  FFSWAP(int, p->linesize[0], p->linesize[1]);
742  } else {
743  switch (s->color_matrix) {
744  case 1:
746  break;
747  case 2:
749  break;
750  }
752  }
753 
754  *got_frame = 1;
755 
756  return avpkt->size;
757 }
758 
759 #if HAVE_THREADS
760 static int magy_init_thread_copy(AVCodecContext *avctx)
761 {
762  MagicYUVContext *s = avctx->priv_data;
763  int i;
764 
765  for (i = 0; i < FF_ARRAY_ELEMS(s->slices); i++) {
766  s->slices[i] = NULL;
767  s->slices_size[i] = 0;
768  }
769 
770  return 0;
771 }
772 #endif
773 
775 {
776  MagicYUVContext *s = avctx->priv_data;
778  return 0;
779 }
780 
782 {
783  MagicYUVContext * const s = avctx->priv_data;
784  int i;
785 
786  for (i = 0; i < FF_ARRAY_ELEMS(s->slices); i++) {
787  av_freep(&s->slices[i]);
788  s->slices_size[i] = 0;
789  ff_free_vlc(&s->vlc[i]);
790  }
791 
792  return 0;
793 }
794 
796  .name = "magicyuv",
797  .long_name = NULL_IF_CONFIG_SMALL("MagicYUV video"),
798  .type = AVMEDIA_TYPE_VIDEO,
799  .id = AV_CODEC_ID_MAGICYUV,
800  .priv_data_size = sizeof(MagicYUVContext),
802  .init_thread_copy = ONLY_IF_THREADS_ENABLED(magy_init_thread_copy),
803  .close = magy_decode_end,
804  .decode = magy_decode_frame,
805  .capabilities = AV_CODEC_CAP_DR1 |
808  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
809 };
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:498
#define NULL
Definition: coverity.c:32
static int build_huffman(AVCodecContext *avctx, GetBitContext *gbit, int max)
Definition: magicyuv.c:476
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
int(* add_left_pred)(uint8_t *dst, const uint8_t *src, ptrdiff_t w, int left)
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:1753
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:407
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2562
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
const char * g
Definition: vf_curves.c:115
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have add an init_thread_copy() which re-allocates them for other threads.Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities.There will be very little speed gain at this point but it should work.If there are inter-frame dependencies
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:502
#define avpriv_request_sample(...)
int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
Definition: bitstream.c:273
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
int size
Definition: avcodec.h:1478
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:403
static int huff_build10(VLC *vlc, uint8_t *len)
Definition: magicyuv.c:95
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:36
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1775
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
int version
Definition: avisynth_c.h:858
unsigned int slices_size[4]
Definition: magicyuv.c:68
uint8_t len[4][4096]
Definition: magicyuv.c:69
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate.The lists are not just lists
AVCodec.
Definition: avcodec.h:3481
LLVidDSPContext llviddsp
Definition: magicyuv.c:74
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:368
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
uint8_t
#define av_cold
Definition: attributes.h:82
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Multithreading support functions.
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:252
Prediction
Definition: magicyuv.c:41
#define height
uint8_t * data
Definition: avcodec.h:1477
void(* add_median_pred)(uint8_t *dst, const uint8_t *top, const uint8_t *diff, ptrdiff_t w, int *left, int *left_top)
bitstream reader API header.
int hshift[4]
Definition: magicyuv.c:65
#define max(a, b)
Definition: cuda_runtime.h:33
uint32_t code
Definition: magicyuv.c:50
static int huff_cmp_len12(const void *a, const void *b)
Definition: magicyuv.c:89
#define av_log(a,...)
void(* add_gradient_pred)(uint8_t *src, const ptrdiff_t stride, const ptrdiff_t width)
static int magy_decode_slice(AVCodecContext *avctx, void *tdata, int j, int threadnr)
Definition: magicyuv.c:344
static int huff_cmp_len(const void *a, const void *b)
Definition: magicyuv.c:77
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int slice_height
Definition: magicyuv.c:57
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:164
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
const char * r
Definition: vf_curves.c:114
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:539
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:550
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:408
const char * name
Name of the codec implementation.
Definition: avcodec.h:3488
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:390
uint8_t bits
Definition: vp3data.h:202
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:1037
Definition: vlc.h:26
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:225
Slice * slices[4]
Definition: magicyuv.c:67
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:500
static int huff_build12(VLC *vlc, uint8_t *len)
Definition: magicyuv.c:127
#define b
Definition: input.c:41
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:378
int vshift[4]
Definition: magicyuv.c:66
#define FFMIN(a, b)
Definition: common.h:96
uint8_t interlaced
Definition: mxfenc.c:2217
#define width
static int huff_cmp_len10(const void *a, const void *b)
Definition: magicyuv.c:83
uint8_t w
Definition: llviddspenc.c:38
Definition: magicyuv.c:36
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:797
int(* huff_build)(VLC *vlc, uint8_t *len)
Definition: magicyuv.c:71
#define FF_ARRAY_ELEMS(a)
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:522
int bits
Definition: vlc.h:27
if(ret)
static const float pred[4]
Definition: siprdata.h:259
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: avcodec.h:1041
static av_cold int magy_decode_init(AVCodecContext *avctx)
Definition: magicyuv.c:774
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
Definition: avcodec.h:2884
int(* add_left_pred_int16)(uint16_t *dst, const uint16_t *src, unsigned mask, ptrdiff_t w, unsigned left)
#define src1
Definition: h264pred.c:139
uint8_t len
Definition: magicyuv.c:49
Libavcodec external API header.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
main external API structure.
Definition: avcodec.h:1565
static int magy_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: magicyuv.c:516
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:206
uint32_t size
Definition: magicyuv.c:38
void ff_llviddsp_init(LLVidDSPContext *c)
uint32_t start
Definition: magicyuv.c:37
int coded_height
Definition: avcodec.h:1753
AVCodec ff_magicyuv_decoder
Definition: magicyuv.c:795
Definition: magicyuv.c:42
#define mid_pred
Definition: mathops.h:97
VLC vlc[4]
Definition: magicyuv.c:70
AVFrame * p
Definition: magicyuv.c:54
static int magy_decode_slice10(AVCodecContext *avctx, void *tdata, int j, int threadnr)
Definition: magicyuv.c:212
uint8_t * buf
Definition: magicyuv.c:64
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:404
int(* magy_decode_slice)(AVCodecContext *avctx, void *tdata, int j, int threadnr)
Definition: magicyuv.c:72
#define flags(name, subs,...)
Definition: cbs_av1.c:561
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:388
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:521
static av_cold int magy_decode_end(AVCodecContext *avctx)
Definition: magicyuv.c:781
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
int
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
Y , 8bpp.
Definition: pixfmt.h:74
common internal api header.
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
uint16_t sym
Definition: magicyuv.c:48
unsigned bps
Definition: movenc.c:1497
void * priv_data
Definition: avcodec.h:1592
static int huff_build(VLC *vlc, uint8_t *len)
Definition: magicyuv.c:159
static av_always_inline int diff(const uint32_t a, const uint32_t b)
int len
VLC_TYPE(* table)[2]
code, bits
Definition: vlc.h:28
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:373
static void magicyuv_median_pred16(uint16_t *dst, const uint16_t *src1, const uint16_t *diff, intptr_t w, int *left, int *left_top, int max)
Definition: magicyuv.c:191
#define av_freep(p)
#define FFSWAP(type, a, b)
Definition: common.h:99
#define stride
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
void(* add_bytes)(uint8_t *dst, uint8_t *src, ptrdiff_t w)
#define MKTAG(a, b, c, d)
Definition: common.h:366
This structure stores compressed data.
Definition: avcodec.h:1454
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:359
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:981
#define AV_QSORT(p, num, type, cmp)
Quicksort This sort is fast, and fully inplace but not stable and it is possible to construct input t...
Definition: qsort.h:33
for(j=16;j >0;--j)
static av_always_inline int get_bitsz(GetBitContext *s, int n)
Read 0-25 bits.
Definition: get_bits.h:415
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
int color_matrix
Definition: magicyuv.c:61