FFmpeg
magicyuv.c
Go to the documentation of this file.
1 /*
2  * MagicYUV decoder
3  * Copyright (c) 2016 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include <stdlib.h>
23 #include <string.h>
24 
25 #include "libavutil/pixdesc.h"
26 #include "libavutil/qsort.h"
27 
28 #include "avcodec.h"
29 #include "bytestream.h"
30 #include "get_bits.h"
31 #include "huffyuvdsp.h"
32 #include "internal.h"
33 #include "lossless_videodsp.h"
34 #include "thread.h"
35 
36 typedef struct Slice {
37  uint32_t start;
38  uint32_t size;
39 } Slice;
40 
41 typedef enum Prediction {
42  LEFT = 1,
45 } Prediction;
46 
47 typedef struct HuffEntry {
48  uint16_t sym;
50  uint32_t code;
51 } HuffEntry;
52 
53 typedef struct MagicYUVContext {
55  int max;
56  int bps;
58  int nb_slices;
59  int planes; // number of encoded planes in bitstream
60  int decorrelate; // postprocessing work
61  int color_matrix; // video color matrix
62  int flags;
63  int interlaced; // video is interlaced
64  uint8_t *buf; // pointer to AVPacket->data
65  int hshift[4];
66  int vshift[4];
67  Slice *slices[4]; // slice bitstream positions for each plane
68  unsigned int slices_size[4]; // slice sizes for each plane
69  uint8_t len[4][4096]; // table of code lengths for each plane
70  VLC vlc[4]; // VLC for each plane
72  int (*magy_decode_slice)(AVCodecContext *avctx, void *tdata,
73  int j, int threadnr);
76 
77 static int huff_cmp_len(const void *a, const void *b)
78 {
79  const HuffEntry *aa = a, *bb = b;
80  return (aa->len - bb->len) * 256 + aa->sym - bb->sym;
81 }
82 
83 static int huff_cmp_len10(const void *a, const void *b)
84 {
85  const HuffEntry *aa = a, *bb = b;
86  return (aa->len - bb->len) * 1024 + aa->sym - bb->sym;
87 }
88 
89 static int huff_cmp_len12(const void *a, const void *b)
90 {
91  const HuffEntry *aa = a, *bb = b;
92  return (aa->len - bb->len) * 4096 + aa->sym - bb->sym;
93 }
94 
95 static int huff_build10(VLC *vlc, uint8_t *len)
96 {
97  HuffEntry he[1024];
98  uint32_t codes[1024];
99  uint8_t bits[1024];
100  uint16_t syms[1024];
101  uint32_t code;
102  int i;
103 
104  for (i = 0; i < 1024; i++) {
105  he[i].sym = 1023 - i;
106  he[i].len = len[i];
107  if (len[i] == 0 || len[i] > 32)
108  return AVERROR_INVALIDDATA;
109  }
110  AV_QSORT(he, 1024, HuffEntry, huff_cmp_len10);
111 
112  code = 1;
113  for (i = 1023; i >= 0; i--) {
114  codes[i] = code >> (32 - he[i].len);
115  bits[i] = he[i].len;
116  syms[i] = he[i].sym;
117  code += 0x80000000u >> (he[i].len - 1);
118  }
119 
120  ff_free_vlc(vlc);
121  return ff_init_vlc_sparse(vlc, FFMIN(he[1023].len, 12), 1024,
122  bits, sizeof(*bits), sizeof(*bits),
123  codes, sizeof(*codes), sizeof(*codes),
124  syms, sizeof(*syms), sizeof(*syms), 0);
125 }
126 
127 static int huff_build12(VLC *vlc, uint8_t *len)
128 {
129  HuffEntry he[4096];
130  uint32_t codes[4096];
131  uint8_t bits[4096];
132  uint16_t syms[4096];
133  uint32_t code;
134  int i;
135 
136  for (i = 0; i < 4096; i++) {
137  he[i].sym = 4095 - i;
138  he[i].len = len[i];
139  if (len[i] == 0 || len[i] > 32)
140  return AVERROR_INVALIDDATA;
141  }
142  AV_QSORT(he, 4096, HuffEntry, huff_cmp_len12);
143 
144  code = 1;
145  for (i = 4095; i >= 0; i--) {
146  codes[i] = code >> (32 - he[i].len);
147  bits[i] = he[i].len;
148  syms[i] = he[i].sym;
149  code += 0x80000000u >> (he[i].len - 1);
150  }
151 
152  ff_free_vlc(vlc);
153  return ff_init_vlc_sparse(vlc, FFMIN(he[4095].len, 14), 4096,
154  bits, sizeof(*bits), sizeof(*bits),
155  codes, sizeof(*codes), sizeof(*codes),
156  syms, sizeof(*syms), sizeof(*syms), 0);
157 }
158 
159 static int huff_build(VLC *vlc, uint8_t *len)
160 {
161  HuffEntry he[256];
162  uint32_t codes[256];
163  uint8_t bits[256];
164  uint8_t syms[256];
165  uint32_t code;
166  int i;
167 
168  for (i = 0; i < 256; i++) {
169  he[i].sym = 255 - i;
170  he[i].len = len[i];
171  if (len[i] == 0 || len[i] > 32)
172  return AVERROR_INVALIDDATA;
173  }
174  AV_QSORT(he, 256, HuffEntry, huff_cmp_len);
175 
176  code = 1;
177  for (i = 255; i >= 0; i--) {
178  codes[i] = code >> (32 - he[i].len);
179  bits[i] = he[i].len;
180  syms[i] = he[i].sym;
181  code += 0x80000000u >> (he[i].len - 1);
182  }
183 
184  ff_free_vlc(vlc);
185  return ff_init_vlc_sparse(vlc, FFMIN(he[255].len, 12), 256,
186  bits, sizeof(*bits), sizeof(*bits),
187  codes, sizeof(*codes), sizeof(*codes),
188  syms, sizeof(*syms), sizeof(*syms), 0);
189 }
190 
191 static void magicyuv_median_pred16(uint16_t *dst, const uint16_t *src1,
192  const uint16_t *diff, intptr_t w,
193  int *left, int *left_top, int max)
194 {
195  int i;
196  uint16_t l, lt;
197 
198  l = *left;
199  lt = *left_top;
200 
201  for (i = 0; i < w; i++) {
202  l = mid_pred(l, src1[i], (l + src1[i] - lt)) + diff[i];
203  l &= max;
204  lt = src1[i];
205  dst[i] = l;
206  }
207 
208  *left = l;
209  *left_top = lt;
210 }
211 
212 static int magy_decode_slice10(AVCodecContext *avctx, void *tdata,
213  int j, int threadnr)
214 {
215  MagicYUVContext *s = avctx->priv_data;
216  int interlaced = s->interlaced;
217  const int bps = s->bps;
218  const int max = s->max - 1;
219  AVFrame *p = s->p;
220  int i, k, x;
221  GetBitContext gb;
222  uint16_t *dst;
223 
224  for (i = 0; i < s->planes; i++) {
225  int left, lefttop, top;
226  int height = AV_CEIL_RSHIFT(FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height), s->vshift[i]);
227  int width = AV_CEIL_RSHIFT(avctx->coded_width, s->hshift[i]);
228  int sheight = AV_CEIL_RSHIFT(s->slice_height, s->vshift[i]);
229  ptrdiff_t fake_stride = (p->linesize[i] / 2) * (1 + interlaced);
230  ptrdiff_t stride = p->linesize[i] / 2;
231  int flags, pred;
232  int ret = init_get_bits8(&gb, s->buf + s->slices[i][j].start,
233  s->slices[i][j].size);
234 
235  if (ret < 0)
236  return ret;
237 
238  flags = get_bits(&gb, 8);
239  pred = get_bits(&gb, 8);
240 
241  dst = (uint16_t *)p->data[i] + j * sheight * stride;
242  if (flags & 1) {
243  if (get_bits_left(&gb) < bps * width * height)
244  return AVERROR_INVALIDDATA;
245  for (k = 0; k < height; k++) {
246  for (x = 0; x < width; x++)
247  dst[x] = get_bits(&gb, bps);
248 
249  dst += stride;
250  }
251  } else {
252  for (k = 0; k < height; k++) {
253  for (x = 0; x < width; x++) {
254  int pix;
255  if (get_bits_left(&gb) <= 0)
256  return AVERROR_INVALIDDATA;
257 
258  pix = get_vlc2(&gb, s->vlc[i].table, s->vlc[i].bits, 3);
259  if (pix < 0)
260  return AVERROR_INVALIDDATA;
261 
262  dst[x] = max - pix;
263  }
264  dst += stride;
265  }
266  }
267 
268  switch (pred) {
269  case LEFT:
270  dst = (uint16_t *)p->data[i] + j * sheight * stride;
271  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
272  dst += stride;
273  if (interlaced) {
274  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
275  dst += stride;
276  }
277  for (k = 1 + interlaced; k < height; k++) {
278  s->llviddsp.add_left_pred_int16(dst, dst, max, width, dst[-fake_stride]);
279  dst += stride;
280  }
281  break;
282  case GRADIENT:
283  dst = (uint16_t *)p->data[i] + j * sheight * stride;
284  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
285  dst += stride;
286  if (interlaced) {
287  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
288  dst += stride;
289  }
290  for (k = 1 + interlaced; k < height; k++) {
291  top = dst[-fake_stride];
292  left = top + dst[0];
293  dst[0] = left & max;
294  for (x = 1; x < width; x++) {
295  top = dst[x - fake_stride];
296  lefttop = dst[x - (fake_stride + 1)];
297  left += top - lefttop + dst[x];
298  dst[x] = left & max;
299  }
300  dst += stride;
301  }
302  break;
303  case MEDIAN:
304  dst = (uint16_t *)p->data[i] + j * sheight * stride;
305  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
306  dst += stride;
307  if (interlaced) {
308  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
309  dst += stride;
310  }
311  lefttop = left = dst[0];
312  for (k = 1 + interlaced; k < height; k++) {
313  magicyuv_median_pred16(dst, dst - fake_stride, dst, width, &left, &lefttop, max);
314  lefttop = left = dst[0];
315  dst += stride;
316  }
317  break;
318  default:
319  avpriv_request_sample(avctx, "Unknown prediction: %d", pred);
320  }
321  }
322 
323  if (s->decorrelate) {
324  int height = FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height);
325  int width = avctx->coded_width;
326  uint16_t *r = (uint16_t *)p->data[0] + j * s->slice_height * p->linesize[0] / 2;
327  uint16_t *g = (uint16_t *)p->data[1] + j * s->slice_height * p->linesize[1] / 2;
328  uint16_t *b = (uint16_t *)p->data[2] + j * s->slice_height * p->linesize[2] / 2;
329 
330  for (i = 0; i < height; i++) {
331  for (k = 0; k < width; k++) {
332  b[k] = (b[k] + g[k]) & max;
333  r[k] = (r[k] + g[k]) & max;
334  }
335  b += p->linesize[0] / 2;
336  g += p->linesize[1] / 2;
337  r += p->linesize[2] / 2;
338  }
339  }
340 
341  return 0;
342 }
343 
344 static int magy_decode_slice(AVCodecContext *avctx, void *tdata,
345  int j, int threadnr)
346 {
347  MagicYUVContext *s = avctx->priv_data;
348  int interlaced = s->interlaced;
349  AVFrame *p = s->p;
350  int i, k, x, min_width;
351  GetBitContext gb;
352  uint8_t *dst;
353 
354  for (i = 0; i < s->planes; i++) {
355  int left, lefttop, top;
356  int height = AV_CEIL_RSHIFT(FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height), s->vshift[i]);
357  int width = AV_CEIL_RSHIFT(avctx->coded_width, s->hshift[i]);
358  int sheight = AV_CEIL_RSHIFT(s->slice_height, s->vshift[i]);
359  ptrdiff_t fake_stride = p->linesize[i] * (1 + interlaced);
360  ptrdiff_t stride = p->linesize[i];
361  int flags, pred;
362  int ret = init_get_bits8(&gb, s->buf + s->slices[i][j].start,
363  s->slices[i][j].size);
364 
365  if (ret < 0)
366  return ret;
367 
368  flags = get_bits(&gb, 8);
369  pred = get_bits(&gb, 8);
370 
371  dst = p->data[i] + j * sheight * stride;
372  if (flags & 1) {
373  if (get_bits_left(&gb) < 8* width * height)
374  return AVERROR_INVALIDDATA;
375  for (k = 0; k < height; k++) {
376  for (x = 0; x < width; x++)
377  dst[x] = get_bits(&gb, 8);
378 
379  dst += stride;
380  }
381  } else {
382  for (k = 0; k < height; k++) {
383  for (x = 0; x < width; x++) {
384  int pix;
385  if (get_bits_left(&gb) <= 0)
386  return AVERROR_INVALIDDATA;
387 
388  pix = get_vlc2(&gb, s->vlc[i].table, s->vlc[i].bits, 3);
389  if (pix < 0)
390  return AVERROR_INVALIDDATA;
391 
392  dst[x] = 255 - pix;
393  }
394  dst += stride;
395  }
396  }
397 
398  switch (pred) {
399  case LEFT:
400  dst = p->data[i] + j * sheight * stride;
401  s->llviddsp.add_left_pred(dst, dst, width, 0);
402  dst += stride;
403  if (interlaced) {
404  s->llviddsp.add_left_pred(dst, dst, width, 0);
405  dst += stride;
406  }
407  for (k = 1 + interlaced; k < height; k++) {
408  s->llviddsp.add_left_pred(dst, dst, width, dst[-fake_stride]);
409  dst += stride;
410  }
411  break;
412  case GRADIENT:
413  dst = p->data[i] + j * sheight * stride;
414  s->llviddsp.add_left_pred(dst, dst, width, 0);
415  dst += stride;
416  if (interlaced) {
417  s->llviddsp.add_left_pred(dst, dst, width, 0);
418  dst += stride;
419  }
420  min_width = FFMIN(width, 32);
421  for (k = 1 + interlaced; k < height; k++) {
422  top = dst[-fake_stride];
423  left = top + dst[0];
424  dst[0] = left;
425  for (x = 1; x < min_width; x++) { /* dsp need aligned 32 */
426  top = dst[x - fake_stride];
427  lefttop = dst[x - (fake_stride + 1)];
428  left += top - lefttop + dst[x];
429  dst[x] = left;
430  }
431  if (width > 32)
432  s->llviddsp.add_gradient_pred(dst + 32, fake_stride, width - 32);
433  dst += stride;
434  }
435  break;
436  case MEDIAN:
437  dst = p->data[i] + j * sheight * stride;
438  s->llviddsp.add_left_pred(dst, dst, width, 0);
439  dst += stride;
440  if (interlaced) {
441  s->llviddsp.add_left_pred(dst, dst, width, 0);
442  dst += stride;
443  }
444  lefttop = left = dst[0];
445  for (k = 1 + interlaced; k < height; k++) {
446  s->llviddsp.add_median_pred(dst, dst - fake_stride,
447  dst, width, &left, &lefttop);
448  lefttop = left = dst[0];
449  dst += stride;
450  }
451  break;
452  default:
453  avpriv_request_sample(avctx, "Unknown prediction: %d", pred);
454  }
455  }
456 
457  if (s->decorrelate) {
458  int height = FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height);
459  int width = avctx->coded_width;
460  uint8_t *b = p->data[0] + j * s->slice_height * p->linesize[0];
461  uint8_t *g = p->data[1] + j * s->slice_height * p->linesize[1];
462  uint8_t *r = p->data[2] + j * s->slice_height * p->linesize[2];
463 
464  for (i = 0; i < height; i++) {
465  s->llviddsp.add_bytes(b, g, width);
466  s->llviddsp.add_bytes(r, g, width);
467  b += p->linesize[0];
468  g += p->linesize[1];
469  r += p->linesize[2];
470  }
471  }
472 
473  return 0;
474 }
475 
476 static int build_huffman(AVCodecContext *avctx, GetBitContext *gbit, int max)
477 {
478  MagicYUVContext *s = avctx->priv_data;
479  int i = 0, j = 0, k;
480 
481  memset(s->len, 0, sizeof(s->len));
482  while (get_bits_left(gbit) >= 8) {
483  int b = get_bits(gbit, 1);
484  int x = get_bits(gbit, 7);
485  int l = get_bitsz(gbit, b * 8) + 1;
486 
487  for (k = 0; k < l; k++)
488  if (j + k < max)
489  s->len[i][j + k] = x;
490 
491  j += l;
492  if (j == max) {
493  j = 0;
494  if (s->huff_build(&s->vlc[i], s->len[i])) {
495  av_log(avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
496  return AVERROR_INVALIDDATA;
497  }
498  i++;
499  if (i == s->planes) {
500  break;
501  }
502  } else if (j > max) {
503  av_log(avctx, AV_LOG_ERROR, "Invalid Huffman codes\n");
504  return AVERROR_INVALIDDATA;
505  }
506  }
507 
508  if (i != s->planes) {
509  av_log(avctx, AV_LOG_ERROR, "Huffman tables too short\n");
510  return AVERROR_INVALIDDATA;
511  }
512 
513  return 0;
514 }
515 
516 static int magy_decode_frame(AVCodecContext *avctx, void *data,
517  int *got_frame, AVPacket *avpkt)
518 {
519  MagicYUVContext *s = avctx->priv_data;
520  ThreadFrame frame = { .f = data };
521  AVFrame *p = data;
522  GetByteContext gbyte;
523  GetBitContext gbit;
524  uint32_t first_offset, offset, next_offset, header_size, slice_width;
525  int width, height, format, version, table_size;
526  int ret, i, j;
527 
528  bytestream2_init(&gbyte, avpkt->data, avpkt->size);
529  if (bytestream2_get_le32(&gbyte) != MKTAG('M', 'A', 'G', 'Y'))
530  return AVERROR_INVALIDDATA;
531 
532  header_size = bytestream2_get_le32(&gbyte);
533  if (header_size < 32 || header_size >= avpkt->size) {
534  av_log(avctx, AV_LOG_ERROR,
535  "header or packet too small %"PRIu32"\n", header_size);
536  return AVERROR_INVALIDDATA;
537  }
538 
539  version = bytestream2_get_byte(&gbyte);
540  if (version != 7) {
541  avpriv_request_sample(avctx, "Version %d", version);
542  return AVERROR_PATCHWELCOME;
543  }
544 
545  s->hshift[1] =
546  s->vshift[1] =
547  s->hshift[2] =
548  s->vshift[2] = 0;
549  s->decorrelate = 0;
550  s->max = 256;
551  s->bps = 8;
552  s->huff_build = huff_build;
553  s->magy_decode_slice = magy_decode_slice;
554 
555  format = bytestream2_get_byte(&gbyte);
556  switch (format) {
557  case 0x65:
558  avctx->pix_fmt = AV_PIX_FMT_GBRP;
559  s->decorrelate = 1;
560  break;
561  case 0x66:
562  avctx->pix_fmt = AV_PIX_FMT_GBRAP;
563  s->decorrelate = 1;
564  break;
565  case 0x67:
566  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
567  break;
568  case 0x68:
569  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
570  s->hshift[1] =
571  s->hshift[2] = 1;
572  break;
573  case 0x69:
574  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
575  s->hshift[1] =
576  s->vshift[1] =
577  s->hshift[2] =
578  s->vshift[2] = 1;
579  break;
580  case 0x6a:
581  avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
582  break;
583  case 0x6b:
584  avctx->pix_fmt = AV_PIX_FMT_GRAY8;
585  break;
586  case 0x6c:
587  avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
588  s->hshift[1] =
589  s->hshift[2] = 1;
590  s->max = 1024;
591  s->huff_build = huff_build10;
592  s->magy_decode_slice = magy_decode_slice10;
593  s->bps = 10;
594  break;
595  case 0x76:
596  avctx->pix_fmt = AV_PIX_FMT_YUV444P10;
597  s->max = 1024;
598  s->huff_build = huff_build10;
599  s->magy_decode_slice = magy_decode_slice10;
600  s->bps = 10;
601  break;
602  case 0x6d:
603  avctx->pix_fmt = AV_PIX_FMT_GBRP10;
604  s->decorrelate = 1;
605  s->max = 1024;
606  s->huff_build = huff_build10;
607  s->magy_decode_slice = magy_decode_slice10;
608  s->bps = 10;
609  break;
610  case 0x6e:
611  avctx->pix_fmt = AV_PIX_FMT_GBRAP10;
612  s->decorrelate = 1;
613  s->max = 1024;
614  s->huff_build = huff_build10;
615  s->magy_decode_slice = magy_decode_slice10;
616  s->bps = 10;
617  break;
618  case 0x6f:
619  avctx->pix_fmt = AV_PIX_FMT_GBRP12;
620  s->decorrelate = 1;
621  s->max = 4096;
622  s->huff_build = huff_build12;
623  s->magy_decode_slice = magy_decode_slice10;
624  s->bps = 12;
625  break;
626  case 0x70:
627  avctx->pix_fmt = AV_PIX_FMT_GBRAP12;
628  s->decorrelate = 1;
629  s->max = 4096;
630  s->huff_build = huff_build12;
631  s->magy_decode_slice = magy_decode_slice10;
632  s->bps = 12;
633  break;
634  case 0x73:
635  avctx->pix_fmt = AV_PIX_FMT_GRAY10;
636  s->max = 1024;
637  s->huff_build = huff_build10;
638  s->magy_decode_slice = magy_decode_slice10;
639  s->bps = 10;
640  break;
641  default:
642  avpriv_request_sample(avctx, "Format 0x%X", format);
643  return AVERROR_PATCHWELCOME;
644  }
645  s->planes = av_pix_fmt_count_planes(avctx->pix_fmt);
646 
647  bytestream2_skip(&gbyte, 1);
648  s->color_matrix = bytestream2_get_byte(&gbyte);
649  s->flags = bytestream2_get_byte(&gbyte);
650  s->interlaced = !!(s->flags & 2);
651  bytestream2_skip(&gbyte, 3);
652 
653  width = bytestream2_get_le32(&gbyte);
654  height = bytestream2_get_le32(&gbyte);
655  ret = ff_set_dimensions(avctx, width, height);
656  if (ret < 0)
657  return ret;
658 
659  slice_width = bytestream2_get_le32(&gbyte);
660  if (slice_width != avctx->coded_width) {
661  avpriv_request_sample(avctx, "Slice width %"PRIu32, slice_width);
662  return AVERROR_PATCHWELCOME;
663  }
664  s->slice_height = bytestream2_get_le32(&gbyte);
665  if (s->slice_height <= 0 || s->slice_height > INT_MAX - avctx->coded_height) {
666  av_log(avctx, AV_LOG_ERROR,
667  "invalid slice height: %d\n", s->slice_height);
668  return AVERROR_INVALIDDATA;
669  }
670 
671  bytestream2_skip(&gbyte, 4);
672 
673  s->nb_slices = (avctx->coded_height + s->slice_height - 1) / s->slice_height;
674  if (s->nb_slices > INT_MAX / sizeof(Slice)) {
675  av_log(avctx, AV_LOG_ERROR,
676  "invalid number of slices: %d\n", s->nb_slices);
677  return AVERROR_INVALIDDATA;
678  }
679 
680  if (s->interlaced) {
681  if ((s->slice_height >> s->vshift[1]) < 2) {
682  av_log(avctx, AV_LOG_ERROR, "impossible slice height\n");
683  return AVERROR_INVALIDDATA;
684  }
685  if ((avctx->coded_height % s->slice_height) && ((avctx->coded_height % s->slice_height) >> s->vshift[1]) < 2) {
686  av_log(avctx, AV_LOG_ERROR, "impossible height\n");
687  return AVERROR_INVALIDDATA;
688  }
689  }
690 
691  for (i = 0; i < s->planes; i++) {
692  av_fast_malloc(&s->slices[i], &s->slices_size[i], s->nb_slices * sizeof(Slice));
693  if (!s->slices[i])
694  return AVERROR(ENOMEM);
695 
696  offset = bytestream2_get_le32(&gbyte);
697  if (offset >= avpkt->size - header_size)
698  return AVERROR_INVALIDDATA;
699 
700  if (i == 0)
701  first_offset = offset;
702 
703  for (j = 0; j < s->nb_slices - 1; j++) {
704  s->slices[i][j].start = offset + header_size;
705 
706  next_offset = bytestream2_get_le32(&gbyte);
707  if (next_offset <= offset || next_offset >= avpkt->size - header_size)
708  return AVERROR_INVALIDDATA;
709 
710  s->slices[i][j].size = next_offset - offset;
711  offset = next_offset;
712  }
713 
714  s->slices[i][j].start = offset + header_size;
715  s->slices[i][j].size = avpkt->size - s->slices[i][j].start;
716 
717  if (s->slices[i][j].size < 2)
718  return AVERROR_INVALIDDATA;
719  }
720 
721  if (bytestream2_get_byte(&gbyte) != s->planes)
722  return AVERROR_INVALIDDATA;
723 
724  bytestream2_skip(&gbyte, s->nb_slices * s->planes);
725 
726  table_size = header_size + first_offset - bytestream2_tell(&gbyte);
727  if (table_size < 2)
728  return AVERROR_INVALIDDATA;
729 
730  ret = init_get_bits8(&gbit, avpkt->data + bytestream2_tell(&gbyte), table_size);
731  if (ret < 0)
732  return ret;
733 
734  ret = build_huffman(avctx, &gbit, s->max);
735  if (ret < 0)
736  return ret;
737 
739  p->key_frame = 1;
740 
741  if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
742  return ret;
743 
744  s->buf = avpkt->data;
745  s->p = p;
746  avctx->execute2(avctx, s->magy_decode_slice, NULL, NULL, s->nb_slices);
747 
748  if (avctx->pix_fmt == AV_PIX_FMT_GBRP ||
749  avctx->pix_fmt == AV_PIX_FMT_GBRAP ||
750  avctx->pix_fmt == AV_PIX_FMT_GBRP10 ||
751  avctx->pix_fmt == AV_PIX_FMT_GBRAP10||
752  avctx->pix_fmt == AV_PIX_FMT_GBRAP12||
753  avctx->pix_fmt == AV_PIX_FMT_GBRP12) {
754  FFSWAP(uint8_t*, p->data[0], p->data[1]);
755  FFSWAP(int, p->linesize[0], p->linesize[1]);
756  } else {
757  switch (s->color_matrix) {
758  case 1:
760  break;
761  case 2:
763  break;
764  }
765  p->color_range = (s->flags & 4) ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
766  }
767 
768  *got_frame = 1;
769 
770  return avpkt->size;
771 }
772 
773 #if HAVE_THREADS
774 static int magy_init_thread_copy(AVCodecContext *avctx)
775 {
776  MagicYUVContext *s = avctx->priv_data;
777  int i;
778 
779  for (i = 0; i < FF_ARRAY_ELEMS(s->slices); i++) {
780  s->slices[i] = NULL;
781  s->slices_size[i] = 0;
782  }
783 
784  return 0;
785 }
786 #endif
787 
789 {
790  MagicYUVContext *s = avctx->priv_data;
791  ff_llviddsp_init(&s->llviddsp);
792  return 0;
793 }
794 
796 {
797  MagicYUVContext * const s = avctx->priv_data;
798  int i;
799 
800  for (i = 0; i < FF_ARRAY_ELEMS(s->slices); i++) {
801  av_freep(&s->slices[i]);
802  s->slices_size[i] = 0;
803  ff_free_vlc(&s->vlc[i]);
804  }
805 
806  return 0;
807 }
808 
810  .name = "magicyuv",
811  .long_name = NULL_IF_CONFIG_SMALL("MagicYUV video"),
812  .type = AVMEDIA_TYPE_VIDEO,
813  .id = AV_CODEC_ID_MAGICYUV,
814  .priv_data_size = sizeof(MagicYUVContext),
816  .init_thread_copy = ONLY_IF_THREADS_ENABLED(magy_init_thread_copy),
817  .close = magy_decode_end,
818  .decode = magy_decode_frame,
819  .capabilities = AV_CODEC_CAP_DR1 |
822  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
823 };
huff_build
static int huff_build(VLC *vlc, uint8_t *len)
Definition: magicyuv.c:159
AVCodec
AVCodec.
Definition: avcodec.h:3481
stride
int stride
Definition: mace.c:144
MagicYUVContext::slices
Slice * slices[4]
Definition: magicyuv.c:67
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:539
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
r
const char * r
Definition: vf_curves.c:114
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
huff_build12
static int huff_build12(VLC *vlc, uint8_t *len)
Definition: magicyuv.c:127
Prediction
Definition: aptx.c:69
FFSWAP
#define FFSWAP(type, a, b)
Definition: common.h:99
GetByteContext
Definition: bytestream.h:33
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:252
HuffEntry::len
uint8_t len
Definition: magicyuv.c:49
MKTAG
#define MKTAG(a, b, c, d)
Definition: common.h:366
MEDIAN
@ MEDIAN
Definition: magicyuv.c:44
MagicYUVContext::nb_slices
int nb_slices
Definition: magicyuv.c:58
MagicYUVContext::hshift
int hshift[4]
Definition: magicyuv.c:65
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:550
MagicYUVContext::color_matrix
int color_matrix
Definition: magicyuv.c:61
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
pixdesc.h
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:522
MagicYUVContext::llviddsp
LLVidDSPContext llviddsp
Definition: magicyuv.c:74
internal.h
AVPacket::data
uint8_t * data
Definition: avcodec.h:1477
huffyuvdsp.h
b
#define b
Definition: input.c:41
data
const char data[16]
Definition: mxf.c:91
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:797
max
#define max(a, b)
Definition: cuda_runtime.h:33
magy_decode_frame
static int magy_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: magicyuv.c:516
ff_magicyuv_decoder
AVCodec ff_magicyuv_decoder
Definition: magicyuv.c:809
Slice::size
uint32_t size
Definition: magicyuv.c:38
magy_decode_slice10
static int magy_decode_slice10(AVCodecContext *avctx, void *tdata, int j, int threadnr)
Definition: magicyuv.c:212
thread.h
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
MagicYUVContext
Definition: magicyuv.c:53
Slice::start
uint32_t start
Definition: magicyuv.c:37
MagicYUVContext::max
int max
Definition: magicyuv.c:55
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:164
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2562
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:502
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
MagicYUVContext::bps
int bps
Definition: magicyuv.c:56
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:403
GetBitContext
Definition: get_bits.h:61
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:373
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:1753
LLVidDSPContext
Definition: lossless_videodsp.h:31
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:390
huff_cmp_len10
static int huff_cmp_len10(const void *a, const void *b)
Definition: magicyuv.c:83
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold
#define av_cold
Definition: attributes.h:84
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
MagicYUVContext::slices_size
unsigned int slices_size[4]
Definition: magicyuv.c:68
AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:407
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:408
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
g
const char * g
Definition: vf_curves.c:115
huff_cmp_len
static int huff_cmp_len(const void *a, const void *b)
Definition: magicyuv.c:77
HuffEntry::sym
uint16_t sym
Definition: magicyuv.c:48
bits
uint8_t bits
Definition: vp3data.h:202
GRADIENT
@ GRADIENT
Definition: magicyuv.c:43
get_bits.h
Slice
Definition: magicyuv.c:36
ff_free_vlc
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:359
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
version
int version
Definition: avisynth_c.h:858
MagicYUVContext::decorrelate
int decorrelate
Definition: magicyuv.c:60
AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:368
if
if(ret)
Definition: filter_design.txt:179
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:1037
build_huffman
static int build_huffman(AVCodecContext *avctx, GetBitContext *gbit, int max)
Definition: magicyuv.c:476
MagicYUVContext::vshift
int vshift[4]
Definition: magicyuv.c:66
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
magy_decode_init
static av_cold int magy_decode_init(AVCodecContext *avctx)
Definition: magicyuv.c:788
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
Prediction
Prediction
Definition: magicyuv.c:41
MagicYUVContext::magy_decode_slice
int(* magy_decode_slice)(AVCodecContext *avctx, void *tdata, int j, int threadnr)
Definition: magicyuv.c:72
ONLY_IF_THREADS_ENABLED
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:227
ff_thread_get_buffer
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:964
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:388
ff_init_vlc_sparse
int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
Definition: bitstream.c:273
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
AV_CODEC_ID_MAGICYUV
@ AV_CODEC_ID_MAGICYUV
Definition: avcodec.h:433
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
qsort.h
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:378
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:981
AVPacket::size
int size
Definition: avcodec.h:1478
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
bps
unsigned bps
Definition: movenc.c:1497
init_thread_copy
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have add an init_thread_copy() which re-allocates them for other threads. Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little speed gain at this point but it should work. If there are inter-frame dependencies
height
#define height
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
MagicYUVContext::buf
uint8_t * buf
Definition: magicyuv.c:64
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
MagicYUVContext::slice_height
int slice_height
Definition: magicyuv.c:57
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: avcodec.h:1041
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
MagicYUVContext::flags
int flags
Definition: magicyuv.c:62
src1
#define src1
Definition: h264pred.c:139
interlaced
uint8_t interlaced
Definition: mxfenc.c:2217
MagicYUVContext::vlc
VLC vlc[4]
Definition: magicyuv.c:70
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
AV_QSORT
#define AV_QSORT(p, num, type, cmp)
Quicksort This sort is fast, and fully inplace but not stable and it is possible to construct input t...
Definition: qsort.h:33
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:404
huff_build10
static int huff_build10(VLC *vlc, uint8_t *len)
Definition: magicyuv.c:95
uint8_t
uint8_t
Definition: audio_convert.c:194
AVCodec::name
const char * name
Name of the codec implementation.
Definition: avcodec.h:3488
len
int len
Definition: vorbis_enc_data.h:452
MagicYUVContext::interlaced
int interlaced
Definition: magicyuv.c:63
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1775
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:521
magy_decode_end
static av_cold int magy_decode_end(AVCodecContext *avctx)
Definition: magicyuv.c:795
avcodec.h
mid_pred
#define mid_pred
Definition: mathops.h:97
ret
ret
Definition: filter_design.txt:187
pred
static const float pred[4]
Definition: siprdata.h:259
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
HuffEntry::code
uint32_t code
Definition: magicyuv.c:50
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen_template.c:38
MagicYUVContext::len
uint8_t len[4][4096]
Definition: magicyuv.c:69
MagicYUVContext::huff_build
int(* huff_build)(VLC *vlc, uint8_t *len)
Definition: magicyuv.c:71
ff_llviddsp_init
void ff_llviddsp_init(LLVidDSPContext *c)
Definition: lossless_videodsp.c:112
AVCodecContext
main external API structure.
Definition: avcodec.h:1565
ThreadFrame
Definition: thread.h:34
magicyuv_median_pred16
static void magicyuv_median_pred16(uint16_t *dst, const uint16_t *src1, const uint16_t *diff, intptr_t w, int *left, int *left_top, int max)
Definition: magicyuv.c:191
VLC
Definition: vlc.h:26
HuffEntry
Definition: magicyuv.c:47
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:1753
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
get_bitsz
static av_always_inline int get_bitsz(GetBitContext *s, int n)
Read 0-25 bits.
Definition: get_bits.h:415
lossless_videodsp.h
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:39
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:136
huff_cmp_len12
static int huff_cmp_len12(const void *a, const void *b)
Definition: magicyuv.c:89
AVPacket
This structure stores compressed data.
Definition: avcodec.h:1454
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:1592
MagicYUVContext::planes
int planes
Definition: magicyuv.c:59
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:500
MagicYUVContext::p
AVFrame * p
Definition: magicyuv.c:54
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:565
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
magy_decode_slice
static int magy_decode_slice(AVCodecContext *avctx, void *tdata, int j, int threadnr)
Definition: magicyuv.c:344
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:498
int
int
Definition: ffmpeg_filter.c:191
AVCodecContext::execute2
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
Definition: avcodec.h:2884
LEFT
@ LEFT
Definition: magicyuv.c:42