FFmpeg
diracdec.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2007 Marco Gerards <marco@gnu.org>
3  * Copyright (C) 2009 David Conrad
4  * Copyright (C) 2011 Jordi Ortiz
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * Dirac Decoder
26  * @author Marco Gerards <marco@gnu.org>, David Conrad, Jordi Ortiz <nenjordi@gmail.com>
27  */
28 
29 #include "libavutil/mem_internal.h"
30 #include "libavutil/pixdesc.h"
31 #include "libavutil/thread.h"
32 #include "avcodec.h"
33 #include "get_bits.h"
34 #include "bytestream.h"
35 #include "codec_internal.h"
36 #include "internal.h"
37 #include "golomb.h"
38 #include "dirac_arith.h"
39 #include "dirac_vlc.h"
40 #include "mpeg12data.h"
41 #include "mpegpicture.h"
42 #include "mpegvideoencdsp.h"
43 #include "dirac_dwt.h"
44 #include "dirac.h"
45 #include "diractab.h"
46 #include "diracdsp.h"
47 #include "videodsp.h"
48 
49 /**
50  * The spec limits this to 3 for frame coding, but in practice can be as high as 6
51  */
52 #define MAX_REFERENCE_FRAMES 8
53 #define MAX_DELAY 5 /* limit for main profile for frame coding (TODO: field coding) */
54 #define MAX_FRAMES (MAX_REFERENCE_FRAMES + MAX_DELAY + 1)
55 #define MAX_QUANT 255 /* max quant for VC-2 */
56 #define MAX_BLOCKSIZE 32 /* maximum xblen/yblen we support */
57 
58 /**
59  * DiracBlock->ref flags, if set then the block does MC from the given ref
60  */
61 #define DIRAC_REF_MASK_REF1 1
62 #define DIRAC_REF_MASK_REF2 2
63 #define DIRAC_REF_MASK_GLOBAL 4
64 
65 /**
66  * Value of Picture.reference when Picture is not a reference picture, but
67  * is held for delayed output.
68  */
69 #define DELAYED_PIC_REF 4
70 
71 #define CALC_PADDING(size, depth) \
72  (((size + (1 << depth) - 1) >> depth) << depth)
73 
74 #define DIVRNDUP(a, b) (((a) + (b) - 1) / (b))
75 
76 typedef struct {
78  int interpolated[3]; /* 1 if hpel[] is valid */
79  uint8_t *hpel[3][4];
80  uint8_t *hpel_base[3][4];
81  int reference;
82 } DiracFrame;
83 
84 typedef struct {
85  union {
86  int16_t mv[2][2];
87  int16_t dc[3];
88  } u; /* anonymous unions aren't in C99 :( */
89  uint8_t ref;
90 } DiracBlock;
91 
92 typedef struct SubBand {
93  int level;
95  int stride; /* in bytes */
96  int width;
97  int height;
98  int pshift;
99  int quant;
100  uint8_t *ibuf;
101  struct SubBand *parent;
102 
103  /* for low delay */
104  unsigned length;
105  const uint8_t *coeff_data;
106 } SubBand;
107 
108 typedef struct Plane {
110 
111  int width;
112  int height;
113  ptrdiff_t stride;
114 
115  /* block length */
116  uint8_t xblen;
117  uint8_t yblen;
118  /* block separation (block n+1 starts after this many pixels in block n) */
119  uint8_t xbsep;
120  uint8_t ybsep;
121  /* amount of overspill on each edge (half of the overlap between blocks) */
122  uint8_t xoffset;
123  uint8_t yoffset;
124 
126 } Plane;
127 
128 /* Used by Low Delay and High Quality profiles */
129 typedef struct DiracSlice {
131  int slice_x;
132  int slice_y;
133  int bytes;
134 } DiracSlice;
135 
136 typedef struct DiracContext {
145  int64_t frame_number; /* number of the next frame to display */
149 
150  int bit_depth; /* bit depth */
151  int pshift; /* pixel shift = bit_depth > 8 */
152 
153  int zero_res; /* zero residue flag */
154  int is_arith; /* whether coeffs use arith or golomb coding */
155  int core_syntax; /* use core syntax only */
156  int low_delay; /* use the low delay syntax */
157  int hq_picture; /* high quality picture, enables low_delay */
158  int ld_picture; /* use low delay picture, turns on low_delay */
159  int dc_prediction; /* has dc prediction */
160  int globalmc_flag; /* use global motion compensation */
161  int num_refs; /* number of reference pictures */
162 
163  /* wavelet decoding */
164  unsigned wavelet_depth; /* depth of the IDWT */
165  unsigned wavelet_idx;
166 
167  /**
168  * schroedinger older than 1.0.8 doesn't store
169  * quant delta if only one codebook exists in a band
170  */
171  unsigned old_delta_quant;
172  unsigned codeblock_mode;
173 
174  unsigned num_x; /* number of horizontal slices */
175  unsigned num_y; /* number of vertical slices */
176 
177  uint8_t *thread_buf; /* Per-thread buffer for coefficient storage */
178  int threads_num_buf; /* Current # of buffers allocated */
179  int thread_buf_size; /* Each thread has a buffer this size */
180 
183 
184  struct {
185  unsigned width;
186  unsigned height;
188 
189  struct {
190  AVRational bytes; /* average bytes per slice */
191  uint8_t quant[MAX_DWT_LEVELS][4]; /* [DIRAC_STD] E.1 */
192  } lowdelay;
193 
194  struct {
195  unsigned prefix_bytes;
196  uint64_t size_scaler;
197  } highquality;
198 
199  struct {
200  int pan_tilt[2]; /* pan/tilt vector */
201  int zrs[2][2]; /* zoom/rotate/shear matrix */
202  int perspective[2]; /* perspective vector */
203  unsigned zrs_exp;
204  unsigned perspective_exp;
205  } globalmc[2];
206 
207  /* motion compensation */
208  uint8_t mv_precision; /* [DIRAC_STD] REFS_WT_PRECISION */
209  int16_t weight[2]; /* [DIRAC_STD] REF1_WT and REF2_WT */
210  unsigned weight_log2denom; /* [DIRAC_STD] REFS_WT_PRECISION */
211 
212  int blwidth; /* number of blocks (horizontally) */
213  int blheight; /* number of blocks (vertically) */
214  int sbwidth; /* number of superblocks (horizontally) */
215  int sbheight; /* number of superblocks (vertically) */
216 
217  uint8_t *sbsplit;
219 
220  uint8_t *edge_emu_buffer[4];
222 
223  uint16_t *mctmp; /* buffer holding the MC data multiplied by OBMC weights */
224  uint8_t *mcscratch;
226 
228 
229  void (*put_pixels_tab[4])(uint8_t *dst, const uint8_t *src[5], int stride, int h);
230  void (*avg_pixels_tab[4])(uint8_t *dst, const uint8_t *src[5], int stride, int h);
231  void (*add_obmc)(uint16_t *dst, const uint8_t *src, int stride, const uint8_t *obmc_weight, int yblen);
234 
237 
241 } DiracContext;
242 
249 };
250 
251 /* magic number division by 3 from schroedinger */
252 static inline int divide3(int x)
253 {
254  return (int)((x+1U)*21845 + 10922) >> 16;
255 }
256 
257 static DiracFrame *remove_frame(DiracFrame *framelist[], int picnum)
258 {
259  DiracFrame *remove_pic = NULL;
260  int i, remove_idx = -1;
261 
262  for (i = 0; framelist[i]; i++)
263  if (framelist[i]->avframe->display_picture_number == picnum) {
264  remove_pic = framelist[i];
265  remove_idx = i;
266  }
267 
268  if (remove_pic)
269  for (i = remove_idx; framelist[i]; i++)
270  framelist[i] = framelist[i+1];
271 
272  return remove_pic;
273 }
274 
275 static int add_frame(DiracFrame *framelist[], int maxframes, DiracFrame *frame)
276 {
277  int i;
278  for (i = 0; i < maxframes; i++)
279  if (!framelist[i]) {
280  framelist[i] = frame;
281  return 0;
282  }
283  return -1;
284 }
285 
287 {
288  int sbwidth = DIVRNDUP(s->seq.width, 4);
289  int sbheight = DIVRNDUP(s->seq.height, 4);
290  int i, w, h, top_padding;
291 
292  /* todo: think more about this / use or set Plane here */
293  for (i = 0; i < 3; i++) {
294  int max_xblen = MAX_BLOCKSIZE >> (i ? s->chroma_x_shift : 0);
295  int max_yblen = MAX_BLOCKSIZE >> (i ? s->chroma_y_shift : 0);
296  w = s->seq.width >> (i ? s->chroma_x_shift : 0);
297  h = s->seq.height >> (i ? s->chroma_y_shift : 0);
298 
299  /* we allocate the max we support here since num decompositions can
300  * change from frame to frame. Stride is aligned to 16 for SIMD, and
301  * 1<<MAX_DWT_LEVELS top padding to avoid if(y>0) in arith decoding
302  * MAX_BLOCKSIZE padding for MC: blocks can spill up to half of that
303  * on each side */
304  top_padding = FFMAX(1<<MAX_DWT_LEVELS, max_yblen/2);
305  w = FFALIGN(CALC_PADDING(w, MAX_DWT_LEVELS), 8); /* FIXME: Should this be 16 for SSE??? */
306  h = top_padding + CALC_PADDING(h, MAX_DWT_LEVELS) + max_yblen/2;
307 
308  s->plane[i].idwt.buf_base = av_calloc(w + max_xblen, h * (2 << s->pshift));
309  s->plane[i].idwt.tmp = av_malloc_array((w+16), 2 << s->pshift);
310  s->plane[i].idwt.buf = s->plane[i].idwt.buf_base + (top_padding*w)*(2 << s->pshift);
311  if (!s->plane[i].idwt.buf_base || !s->plane[i].idwt.tmp)
312  return AVERROR(ENOMEM);
313  }
314 
315  /* fixme: allocate using real stride here */
316  s->sbsplit = av_malloc_array(sbwidth, sbheight);
317  s->blmotion = av_malloc_array(sbwidth, sbheight * 16 * sizeof(*s->blmotion));
318 
319  if (!s->sbsplit || !s->blmotion)
320  return AVERROR(ENOMEM);
321  return 0;
322 }
323 
325 {
326  int w = s->seq.width;
327  int h = s->seq.height;
328 
329  av_assert0(stride >= w);
330  stride += 64;
331 
332  if (s->buffer_stride >= stride)
333  return 0;
334  s->buffer_stride = 0;
335 
336  av_freep(&s->edge_emu_buffer_base);
337  memset(s->edge_emu_buffer, 0, sizeof(s->edge_emu_buffer));
338  av_freep(&s->mctmp);
339  av_freep(&s->mcscratch);
340 
341  s->edge_emu_buffer_base = av_malloc_array(stride, MAX_BLOCKSIZE);
342 
343  s->mctmp = av_malloc_array((stride+MAX_BLOCKSIZE), (h+MAX_BLOCKSIZE) * sizeof(*s->mctmp));
344  s->mcscratch = av_malloc_array(stride, MAX_BLOCKSIZE);
345 
346  if (!s->edge_emu_buffer_base || !s->mctmp || !s->mcscratch)
347  return AVERROR(ENOMEM);
348 
349  s->buffer_stride = stride;
350  return 0;
351 }
352 
354 {
355  int i, j, k;
356 
357  for (i = 0; i < MAX_FRAMES; i++) {
358  if (s->all_frames[i].avframe->data[0]) {
359  av_frame_unref(s->all_frames[i].avframe);
360  memset(s->all_frames[i].interpolated, 0, sizeof(s->all_frames[i].interpolated));
361  }
362 
363  for (j = 0; j < 3; j++)
364  for (k = 1; k < 4; k++)
365  av_freep(&s->all_frames[i].hpel_base[j][k]);
366  }
367 
368  memset(s->ref_frames, 0, sizeof(s->ref_frames));
369  memset(s->delay_frames, 0, sizeof(s->delay_frames));
370 
371  for (i = 0; i < 3; i++) {
372  av_freep(&s->plane[i].idwt.buf_base);
373  av_freep(&s->plane[i].idwt.tmp);
374  }
375 
376  s->buffer_stride = 0;
377  av_freep(&s->sbsplit);
378  av_freep(&s->blmotion);
379  av_freep(&s->edge_emu_buffer_base);
380 
381  av_freep(&s->mctmp);
382  av_freep(&s->mcscratch);
383 }
384 
386 
388 {
389  DiracContext *s = avctx->priv_data;
390  int i, ret;
391 
392  s->avctx = avctx;
393  s->frame_number = -1;
394 
395  s->thread_buf = NULL;
396  s->threads_num_buf = -1;
397  s->thread_buf_size = -1;
398 
399  ff_diracdsp_init(&s->diracdsp);
400  ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
401  ff_videodsp_init(&s->vdsp, 8);
402 
403  for (i = 0; i < MAX_FRAMES; i++) {
404  s->all_frames[i].avframe = av_frame_alloc();
405  if (!s->all_frames[i].avframe) {
406  while (i > 0)
407  av_frame_free(&s->all_frames[--i].avframe);
408  return AVERROR(ENOMEM);
409  }
410  }
412  if (ret != 0)
413  return AVERROR_UNKNOWN;
414 
415  return 0;
416 }
417 
419 {
420  DiracContext *s = avctx->priv_data;
422  s->seen_sequence_header = 0;
423  s->frame_number = -1;
424 }
425 
427 {
428  DiracContext *s = avctx->priv_data;
429  int i;
430 
431  dirac_decode_flush(avctx);
432  for (i = 0; i < MAX_FRAMES; i++)
433  av_frame_free(&s->all_frames[i].avframe);
434 
435  av_freep(&s->thread_buf);
436  av_freep(&s->slice_params_buf);
437 
438  return 0;
439 }
440 
441 static inline int coeff_unpack_golomb(GetBitContext *gb, int qfactor, int qoffset)
442 {
443  int coeff = dirac_get_se_golomb(gb);
444  const unsigned sign = FFSIGN(coeff);
445  if (coeff)
446  coeff = sign*((sign * coeff * qfactor + qoffset) >> 2);
447  return coeff;
448 }
449 
450 #define SIGN_CTX(x) (CTX_SIGN_ZERO + ((x) > 0) - ((x) < 0))
451 
452 #define UNPACK_ARITH(n, type) \
453  static inline void coeff_unpack_arith_##n(DiracArith *c, int qfactor, int qoffset, \
454  SubBand *b, type *buf, int x, int y) \
455  { \
456  int sign, sign_pred = 0, pred_ctx = CTX_ZPZN_F1; \
457  unsigned coeff; \
458  const int mstride = -(b->stride >> (1+b->pshift)); \
459  if (b->parent) { \
460  const type *pbuf = (type *)b->parent->ibuf; \
461  const int stride = b->parent->stride >> (1+b->parent->pshift); \
462  pred_ctx += !!pbuf[stride * (y>>1) + (x>>1)] << 1; \
463  } \
464  if (b->orientation == subband_hl) \
465  sign_pred = buf[mstride]; \
466  if (x) { \
467  pred_ctx += !(buf[-1] | buf[mstride] | buf[-1 + mstride]); \
468  if (b->orientation == subband_lh) \
469  sign_pred = buf[-1]; \
470  } else { \
471  pred_ctx += !buf[mstride]; \
472  } \
473  coeff = dirac_get_arith_uint(c, pred_ctx, CTX_COEFF_DATA); \
474  if (coeff) { \
475  coeff = (coeff * qfactor + qoffset) >> 2; \
476  sign = dirac_get_arith_bit(c, SIGN_CTX(sign_pred)); \
477  coeff = (coeff ^ -sign) + sign; \
478  } \
479  *buf = coeff; \
480  } \
481 
482 UNPACK_ARITH(8, int16_t)
484 
485 /**
486  * Decode the coeffs in the rectangle defined by left, right, top, bottom
487  * [DIRAC_STD] 13.4.3.2 Codeblock unpacking loop. codeblock()
488  */
489 static inline int codeblock(DiracContext *s, SubBand *b,
490  GetBitContext *gb, DiracArith *c,
491  int left, int right, int top, int bottom,
492  int blockcnt_one, int is_arith)
493 {
494  int x, y, zero_block;
495  int qoffset, qfactor;
496  uint8_t *buf;
497 
498  /* check for any coded coefficients in this codeblock */
499  if (!blockcnt_one) {
500  if (is_arith)
501  zero_block = dirac_get_arith_bit(c, CTX_ZERO_BLOCK);
502  else
503  zero_block = get_bits1(gb);
504 
505  if (zero_block)
506  return 0;
507  }
508 
509  if (s->codeblock_mode && !(s->old_delta_quant && blockcnt_one)) {
510  int quant;
511  if (is_arith)
513  else
515  if (quant > INT_MAX - b->quant || b->quant + quant < 0) {
516  av_log(s->avctx, AV_LOG_ERROR, "Invalid quant\n");
517  return AVERROR_INVALIDDATA;
518  }
519  b->quant += quant;
520  }
521 
522  if (b->quant > (DIRAC_MAX_QUANT_INDEX - 1)) {
523  av_log(s->avctx, AV_LOG_ERROR, "Unsupported quant %d\n", b->quant);
524  b->quant = 0;
525  return AVERROR_INVALIDDATA;
526  }
527 
528  qfactor = ff_dirac_qscale_tab[b->quant];
529  /* TODO: context pointer? */
530  if (!s->num_refs)
531  qoffset = ff_dirac_qoffset_intra_tab[b->quant] + 2;
532  else
533  qoffset = ff_dirac_qoffset_inter_tab[b->quant] + 2;
534 
535  buf = b->ibuf + top * b->stride;
536  if (is_arith) {
537  for (y = top; y < bottom; y++) {
538  if (c->error)
539  return c->error;
540  for (x = left; x < right; x++) {
541  if (b->pshift) {
542  coeff_unpack_arith_10(c, qfactor, qoffset, b, (int32_t*)(buf)+x, x, y);
543  } else {
544  coeff_unpack_arith_8(c, qfactor, qoffset, b, (int16_t*)(buf)+x, x, y);
545  }
546  }
547  buf += b->stride;
548  }
549  } else {
550  for (y = top; y < bottom; y++) {
551  if (get_bits_left(gb) < 1)
552  return AVERROR_INVALIDDATA;
553  for (x = left; x < right; x++) {
554  int val = coeff_unpack_golomb(gb, qfactor, qoffset);
555  if (b->pshift) {
556  AV_WN32(&buf[4*x], val);
557  } else {
558  AV_WN16(&buf[2*x], val);
559  }
560  }
561  buf += b->stride;
562  }
563  }
564  return 0;
565 }
566 
567 /**
568  * Dirac Specification ->
569  * 13.3 intra_dc_prediction(band)
570  */
571 #define INTRA_DC_PRED(n, type) \
572  static inline void intra_dc_prediction_##n(SubBand *b) \
573  { \
574  type *buf = (type*)b->ibuf; \
575  int x, y; \
576  \
577  for (x = 1; x < b->width; x++) \
578  buf[x] += buf[x-1]; \
579  buf += (b->stride >> (1+b->pshift)); \
580  \
581  for (y = 1; y < b->height; y++) { \
582  buf[0] += buf[-(b->stride >> (1+b->pshift))]; \
583  \
584  for (x = 1; x < b->width; x++) { \
585  int pred = buf[x - 1] + buf[x - (b->stride >> (1+b->pshift))] + buf[x - (b->stride >> (1+b->pshift))-1]; \
586  buf[x] += divide3(pred); \
587  } \
588  buf += (b->stride >> (1+b->pshift)); \
589  } \
590  } \
591 
592 INTRA_DC_PRED(8, int16_t)
593 INTRA_DC_PRED(10, uint32_t)
594 
595 /**
596  * Dirac Specification ->
597  * 13.4.2 Non-skipped subbands. subband_coeffs()
598  */
600 {
601  int cb_x, cb_y, left, right, top, bottom;
602  DiracArith c;
603  GetBitContext gb;
604  int cb_width = s->codeblock[b->level + (b->orientation != subband_ll)].width;
605  int cb_height = s->codeblock[b->level + (b->orientation != subband_ll)].height;
606  int blockcnt_one = (cb_width + cb_height) == 2;
607  int ret;
608 
609  if (!b->length)
610  return 0;
611 
612  init_get_bits8(&gb, b->coeff_data, b->length);
613 
614  if (is_arith)
615  ff_dirac_init_arith_decoder(&c, &gb, b->length);
616 
617  top = 0;
618  for (cb_y = 0; cb_y < cb_height; cb_y++) {
619  bottom = (b->height * (cb_y+1LL)) / cb_height;
620  left = 0;
621  for (cb_x = 0; cb_x < cb_width; cb_x++) {
622  right = (b->width * (cb_x+1LL)) / cb_width;
623  ret = codeblock(s, b, &gb, &c, left, right, top, bottom, blockcnt_one, is_arith);
624  if (ret < 0)
625  return ret;
626  left = right;
627  }
628  top = bottom;
629  }
630 
631  if (b->orientation == subband_ll && s->num_refs == 0) {
632  if (s->pshift) {
633  intra_dc_prediction_10(b);
634  } else {
635  intra_dc_prediction_8(b);
636  }
637  }
638  return 0;
639 }
640 
641 static int decode_subband_arith(AVCodecContext *avctx, void *b)
642 {
643  DiracContext *s = avctx->priv_data;
644  return decode_subband_internal(s, b, 1);
645 }
646 
647 static int decode_subband_golomb(AVCodecContext *avctx, void *arg)
648 {
649  DiracContext *s = avctx->priv_data;
650  SubBand **b = arg;
651  return decode_subband_internal(s, *b, 0);
652 }
653 
654 /**
655  * Dirac Specification ->
656  * [DIRAC_STD] 13.4.1 core_transform_data()
657  */
659 {
660  AVCodecContext *avctx = s->avctx;
662  enum dirac_subband orientation;
663  int level, num_bands = 0;
664  int ret[3*MAX_DWT_LEVELS+1];
665  int i;
666  int damaged_count = 0;
667 
668  /* Unpack all subbands at all levels. */
669  for (level = 0; level < s->wavelet_depth; level++) {
670  for (orientation = !!level; orientation < 4; orientation++) {
671  SubBand *b = &s->plane[comp].band[level][orientation];
672  bands[num_bands++] = b;
673 
674  align_get_bits(&s->gb);
675  /* [DIRAC_STD] 13.4.2 subband() */
676  b->length = get_interleaved_ue_golomb(&s->gb);
677  if (b->length) {
678  b->quant = get_interleaved_ue_golomb(&s->gb);
679  if (b->quant > (DIRAC_MAX_QUANT_INDEX - 1)) {
680  av_log(s->avctx, AV_LOG_ERROR, "Unsupported quant %d\n", b->quant);
681  b->quant = 0;
682  return AVERROR_INVALIDDATA;
683  }
684  align_get_bits(&s->gb);
685  b->coeff_data = s->gb.buffer + get_bits_count(&s->gb)/8;
686  if (b->length > FFMAX(get_bits_left(&s->gb)/8, 0)) {
687  b->length = FFMAX(get_bits_left(&s->gb)/8, 0);
688  damaged_count ++;
689  }
690  skip_bits_long(&s->gb, b->length*8);
691  }
692  }
693  /* arithmetic coding has inter-level dependencies, so we can only execute one level at a time */
694  if (s->is_arith)
695  avctx->execute(avctx, decode_subband_arith, &s->plane[comp].band[level][!!level],
696  ret + 3*level + !!level, 4-!!level, sizeof(SubBand));
697  }
698  /* golomb coding has no inter-level dependencies, so we can execute all subbands in parallel */
699  if (!s->is_arith)
700  avctx->execute(avctx, decode_subband_golomb, bands, ret, num_bands, sizeof(SubBand*));
701 
702  for (i = 0; i < s->wavelet_depth * 3 + 1; i++) {
703  if (ret[i] < 0)
704  damaged_count++;
705  }
706  if (damaged_count > (s->wavelet_depth * 3 + 1) /2)
707  return AVERROR_INVALIDDATA;
708 
709  return 0;
710 }
711 
712 #define PARSE_VALUES(type, x, gb, ebits, buf1, buf2) \
713  type *buf = (type *)buf1; \
714  buf[x] = coeff_unpack_golomb(gb, qfactor, qoffset); \
715  if (get_bits_count(gb) >= ebits) \
716  return; \
717  if (buf2) { \
718  buf = (type *)buf2; \
719  buf[x] = coeff_unpack_golomb(gb, qfactor, qoffset); \
720  if (get_bits_count(gb) >= ebits) \
721  return; \
722  } \
723 
725  int slice_x, int slice_y, int bits_end,
726  SubBand *b1, SubBand *b2)
727 {
728  int left = b1->width * slice_x / s->num_x;
729  int right = b1->width *(slice_x+1) / s->num_x;
730  int top = b1->height * slice_y / s->num_y;
731  int bottom = b1->height *(slice_y+1) / s->num_y;
732 
733  int qfactor, qoffset;
734 
735  uint8_t *buf1 = b1->ibuf + top * b1->stride;
736  uint8_t *buf2 = b2 ? b2->ibuf + top * b2->stride: NULL;
737  int x, y;
738 
739  if (quant > (DIRAC_MAX_QUANT_INDEX - 1)) {
740  av_log(s->avctx, AV_LOG_ERROR, "Unsupported quant %d\n", quant);
741  return;
742  }
743  qfactor = ff_dirac_qscale_tab[quant];
744  qoffset = ff_dirac_qoffset_intra_tab[quant] + 2;
745  /* we have to constantly check for overread since the spec explicitly
746  requires this, with the meaning that all remaining coeffs are set to 0 */
747  if (get_bits_count(gb) >= bits_end)
748  return;
749 
750  if (s->pshift) {
751  for (y = top; y < bottom; y++) {
752  for (x = left; x < right; x++) {
753  PARSE_VALUES(int32_t, x, gb, bits_end, buf1, buf2);
754  }
755  buf1 += b1->stride;
756  if (buf2)
757  buf2 += b2->stride;
758  }
759  }
760  else {
761  for (y = top; y < bottom; y++) {
762  for (x = left; x < right; x++) {
763  PARSE_VALUES(int16_t, x, gb, bits_end, buf1, buf2);
764  }
765  buf1 += b1->stride;
766  if (buf2)
767  buf2 += b2->stride;
768  }
769  }
770 }
771 
772 /**
773  * Dirac Specification ->
774  * 13.5.2 Slices. slice(sx,sy)
775  */
776 static int decode_lowdelay_slice(AVCodecContext *avctx, void *arg)
777 {
778  DiracContext *s = avctx->priv_data;
779  DiracSlice *slice = arg;
780  GetBitContext *gb = &slice->gb;
781  enum dirac_subband orientation;
782  int level, quant, chroma_bits, chroma_end;
783 
784  int quant_base = get_bits(gb, 7); /*[DIRAC_STD] qindex */
785  int length_bits = av_log2(8 * slice->bytes)+1;
786  int luma_bits = get_bits_long(gb, length_bits);
787  int luma_end = get_bits_count(gb) + FFMIN(luma_bits, get_bits_left(gb));
788 
789  /* [DIRAC_STD] 13.5.5.2 luma_slice_band */
790  for (level = 0; level < s->wavelet_depth; level++)
791  for (orientation = !!level; orientation < 4; orientation++) {
792  quant = FFMAX(quant_base - s->lowdelay.quant[level][orientation], 0);
793  decode_subband(s, gb, quant, slice->slice_x, slice->slice_y, luma_end,
794  &s->plane[0].band[level][orientation], NULL);
795  }
796 
797  /* consume any unused bits from luma */
798  skip_bits_long(gb, get_bits_count(gb) - luma_end);
799 
800  chroma_bits = 8*slice->bytes - 7 - length_bits - luma_bits;
801  chroma_end = get_bits_count(gb) + FFMIN(chroma_bits, get_bits_left(gb));
802  /* [DIRAC_STD] 13.5.5.3 chroma_slice_band */
803  for (level = 0; level < s->wavelet_depth; level++)
804  for (orientation = !!level; orientation < 4; orientation++) {
805  quant = FFMAX(quant_base - s->lowdelay.quant[level][orientation], 0);
806  decode_subband(s, gb, quant, slice->slice_x, slice->slice_y, chroma_end,
807  &s->plane[1].band[level][orientation],
808  &s->plane[2].band[level][orientation]);
809  }
810 
811  return 0;
812 }
813 
814 typedef struct SliceCoeffs {
815  int left;
816  int top;
817  int tot_h;
818  int tot_v;
819  int tot;
820 } SliceCoeffs;
821 
822 static int subband_coeffs(DiracContext *s, int x, int y, int p,
824 {
825  int level, coef = 0;
826  for (level = 0; level < s->wavelet_depth; level++) {
827  SliceCoeffs *o = &c[level];
828  SubBand *b = &s->plane[p].band[level][3]; /* orientation doens't matter */
829  o->top = b->height * y / s->num_y;
830  o->left = b->width * x / s->num_x;
831  o->tot_h = ((b->width * (x + 1)) / s->num_x) - o->left;
832  o->tot_v = ((b->height * (y + 1)) / s->num_y) - o->top;
833  o->tot = o->tot_h*o->tot_v;
834  coef += o->tot * (4 - !!level);
835  }
836  return coef;
837 }
838 
839 /**
840  * VC-2 Specification ->
841  * 13.5.3 hq_slice(sx,sy)
842  */
843 static int decode_hq_slice(DiracContext *s, DiracSlice *slice, uint8_t *tmp_buf)
844 {
845  int i, level, orientation, quant_idx;
846  int qfactor[MAX_DWT_LEVELS][4], qoffset[MAX_DWT_LEVELS][4];
847  GetBitContext *gb = &slice->gb;
848  SliceCoeffs coeffs_num[MAX_DWT_LEVELS];
849 
850  skip_bits_long(gb, 8*s->highquality.prefix_bytes);
851  quant_idx = get_bits(gb, 8);
852 
853  if (quant_idx > DIRAC_MAX_QUANT_INDEX - 1) {
854  av_log(s->avctx, AV_LOG_ERROR, "Invalid quantization index - %i\n", quant_idx);
855  return AVERROR_INVALIDDATA;
856  }
857 
858  /* Slice quantization (slice_quantizers() in the specs) */
859  for (level = 0; level < s->wavelet_depth; level++) {
860  for (orientation = !!level; orientation < 4; orientation++) {
861  const int quant = FFMAX(quant_idx - s->lowdelay.quant[level][orientation], 0);
862  qfactor[level][orientation] = ff_dirac_qscale_tab[quant];
863  qoffset[level][orientation] = ff_dirac_qoffset_intra_tab[quant] + 2;
864  }
865  }
866 
867  /* Luma + 2 Chroma planes */
868  for (i = 0; i < 3; i++) {
869  int coef_num, coef_par, off = 0;
870  int64_t length = s->highquality.size_scaler*get_bits(gb, 8);
871  int64_t bits_end = get_bits_count(gb) + 8*length;
872  const uint8_t *addr = align_get_bits(gb);
873 
874  if (length*8 > get_bits_left(gb)) {
875  av_log(s->avctx, AV_LOG_ERROR, "end too far away\n");
876  return AVERROR_INVALIDDATA;
877  }
878 
879  coef_num = subband_coeffs(s, slice->slice_x, slice->slice_y, i, coeffs_num);
880 
881  if (s->pshift)
882  coef_par = ff_dirac_golomb_read_32bit(addr, length,
883  tmp_buf, coef_num);
884  else
885  coef_par = ff_dirac_golomb_read_16bit(addr, length,
886  tmp_buf, coef_num);
887 
888  if (coef_num > coef_par) {
889  const int start_b = coef_par * (1 << (s->pshift + 1));
890  const int end_b = coef_num * (1 << (s->pshift + 1));
891  memset(&tmp_buf[start_b], 0, end_b - start_b);
892  }
893 
894  for (level = 0; level < s->wavelet_depth; level++) {
895  const SliceCoeffs *c = &coeffs_num[level];
896  for (orientation = !!level; orientation < 4; orientation++) {
897  const SubBand *b1 = &s->plane[i].band[level][orientation];
898  uint8_t *buf = b1->ibuf + c->top * b1->stride + (c->left << (s->pshift + 1));
899 
900  /* Change to c->tot_h <= 4 for AVX2 dequantization */
901  const int qfunc = s->pshift + 2*(c->tot_h <= 2);
902  s->diracdsp.dequant_subband[qfunc](&tmp_buf[off], buf, b1->stride,
903  qfactor[level][orientation],
904  qoffset[level][orientation],
905  c->tot_v, c->tot_h);
906 
907  off += c->tot << (s->pshift + 1);
908  }
909  }
910 
911  skip_bits_long(gb, bits_end - get_bits_count(gb));
912  }
913 
914  return 0;
915 }
916 
917 static int decode_hq_slice_row(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
918 {
919  int i;
920  DiracContext *s = avctx->priv_data;
921  DiracSlice *slices = ((DiracSlice *)arg) + s->num_x*jobnr;
922  uint8_t *thread_buf = &s->thread_buf[s->thread_buf_size*threadnr];
923  for (i = 0; i < s->num_x; i++)
924  decode_hq_slice(s, &slices[i], thread_buf);
925  return 0;
926 }
927 
928 /**
929  * Dirac Specification ->
930  * 13.5.1 low_delay_transform_data()
931  */
933 {
934  AVCodecContext *avctx = s->avctx;
935  int slice_x, slice_y, bufsize;
936  int64_t coef_buf_size, bytes = 0;
937  const uint8_t *buf;
938  DiracSlice *slices;
940  int slice_num = 0;
941 
942  if (s->slice_params_num_buf != (s->num_x * s->num_y)) {
943  s->slice_params_buf = av_realloc_f(s->slice_params_buf, s->num_x * s->num_y, sizeof(DiracSlice));
944  if (!s->slice_params_buf) {
945  av_log(s->avctx, AV_LOG_ERROR, "slice params buffer allocation failure\n");
946  s->slice_params_num_buf = 0;
947  return AVERROR(ENOMEM);
948  }
949  s->slice_params_num_buf = s->num_x * s->num_y;
950  }
951  slices = s->slice_params_buf;
952 
953  /* 8 becacuse that's how much the golomb reader could overread junk data
954  * from another plane/slice at most, and 512 because SIMD */
955  coef_buf_size = subband_coeffs(s, s->num_x - 1, s->num_y - 1, 0, tmp) + 8;
956  coef_buf_size = (coef_buf_size << (1 + s->pshift)) + 512;
957 
958  if (s->threads_num_buf != avctx->thread_count ||
959  s->thread_buf_size != coef_buf_size) {
960  s->threads_num_buf = avctx->thread_count;
961  s->thread_buf_size = coef_buf_size;
962  s->thread_buf = av_realloc_f(s->thread_buf, avctx->thread_count, s->thread_buf_size);
963  if (!s->thread_buf) {
964  av_log(s->avctx, AV_LOG_ERROR, "thread buffer allocation failure\n");
965  return AVERROR(ENOMEM);
966  }
967  }
968 
969  align_get_bits(&s->gb);
970  /*[DIRAC_STD] 13.5.2 Slices. slice(sx,sy) */
971  buf = s->gb.buffer + get_bits_count(&s->gb)/8;
972  bufsize = get_bits_left(&s->gb);
973 
974  if (s->hq_picture) {
975  int i;
976 
977  for (slice_y = 0; bufsize > 0 && slice_y < s->num_y; slice_y++) {
978  for (slice_x = 0; bufsize > 0 && slice_x < s->num_x; slice_x++) {
979  bytes = s->highquality.prefix_bytes + 1;
980  for (i = 0; i < 3; i++) {
981  if (bytes <= bufsize/8)
982  bytes += buf[bytes] * s->highquality.size_scaler + 1;
983  }
984  if (bytes >= INT_MAX || bytes*8 > bufsize) {
985  av_log(s->avctx, AV_LOG_ERROR, "too many bytes\n");
986  return AVERROR_INVALIDDATA;
987  }
988 
989  slices[slice_num].bytes = bytes;
990  slices[slice_num].slice_x = slice_x;
991  slices[slice_num].slice_y = slice_y;
992  init_get_bits(&slices[slice_num].gb, buf, bufsize);
993  slice_num++;
994 
995  buf += bytes;
996  if (bufsize/8 >= bytes)
997  bufsize -= bytes*8;
998  else
999  bufsize = 0;
1000  }
1001  }
1002 
1003  if (s->num_x*s->num_y != slice_num) {
1004  av_log(s->avctx, AV_LOG_ERROR, "too few slices\n");
1005  return AVERROR_INVALIDDATA;
1006  }
1007 
1008  avctx->execute2(avctx, decode_hq_slice_row, slices, NULL, s->num_y);
1009  } else {
1010  for (slice_y = 0; bufsize > 0 && slice_y < s->num_y; slice_y++) {
1011  for (slice_x = 0; bufsize > 0 && slice_x < s->num_x; slice_x++) {
1012  bytes = (slice_num+1) * (int64_t)s->lowdelay.bytes.num / s->lowdelay.bytes.den
1013  - slice_num * (int64_t)s->lowdelay.bytes.num / s->lowdelay.bytes.den;
1014  if (bytes >= INT_MAX || bytes*8 > bufsize) {
1015  av_log(s->avctx, AV_LOG_ERROR, "too many bytes\n");
1016  return AVERROR_INVALIDDATA;
1017  }
1018  slices[slice_num].bytes = bytes;
1019  slices[slice_num].slice_x = slice_x;
1020  slices[slice_num].slice_y = slice_y;
1021  init_get_bits(&slices[slice_num].gb, buf, bufsize);
1022  slice_num++;
1023 
1024  buf += bytes;
1025  if (bufsize/8 >= bytes)
1026  bufsize -= bytes*8;
1027  else
1028  bufsize = 0;
1029  }
1030  }
1031  avctx->execute(avctx, decode_lowdelay_slice, slices, NULL, slice_num,
1032  sizeof(DiracSlice)); /* [DIRAC_STD] 13.5.2 Slices */
1033  }
1034 
1035  if (s->dc_prediction) {
1036  if (s->pshift) {
1037  intra_dc_prediction_10(&s->plane[0].band[0][0]); /* [DIRAC_STD] 13.3 intra_dc_prediction() */
1038  intra_dc_prediction_10(&s->plane[1].band[0][0]); /* [DIRAC_STD] 13.3 intra_dc_prediction() */
1039  intra_dc_prediction_10(&s->plane[2].band[0][0]); /* [DIRAC_STD] 13.3 intra_dc_prediction() */
1040  } else {
1041  intra_dc_prediction_8(&s->plane[0].band[0][0]);
1042  intra_dc_prediction_8(&s->plane[1].band[0][0]);
1043  intra_dc_prediction_8(&s->plane[2].band[0][0]);
1044  }
1045  }
1046 
1047  return 0;
1048 }
1049 
1051 {
1052  int i, w, h, level, orientation;
1053 
1054  for (i = 0; i < 3; i++) {
1055  Plane *p = &s->plane[i];
1056 
1057  p->width = s->seq.width >> (i ? s->chroma_x_shift : 0);
1058  p->height = s->seq.height >> (i ? s->chroma_y_shift : 0);
1059  p->idwt.width = w = CALC_PADDING(p->width , s->wavelet_depth);
1060  p->idwt.height = h = CALC_PADDING(p->height, s->wavelet_depth);
1061  p->idwt.stride = FFALIGN(p->idwt.width, 8) << (1 + s->pshift);
1062 
1063  for (level = s->wavelet_depth-1; level >= 0; level--) {
1064  w = w>>1;
1065  h = h>>1;
1066  for (orientation = !!level; orientation < 4; orientation++) {
1067  SubBand *b = &p->band[level][orientation];
1068 
1069  b->pshift = s->pshift;
1070  b->ibuf = p->idwt.buf;
1071  b->level = level;
1072  b->stride = p->idwt.stride << (s->wavelet_depth - level);
1073  b->width = w;
1074  b->height = h;
1075  b->orientation = orientation;
1076 
1077  if (orientation & 1)
1078  b->ibuf += w << (1+b->pshift);
1079  if (orientation > 1)
1080  b->ibuf += (b->stride>>1);
1081 
1082  if (level)
1083  b->parent = &p->band[level-1][orientation];
1084  }
1085  }
1086 
1087  if (i > 0) {
1088  p->xblen = s->plane[0].xblen >> s->chroma_x_shift;
1089  p->yblen = s->plane[0].yblen >> s->chroma_y_shift;
1090  p->xbsep = s->plane[0].xbsep >> s->chroma_x_shift;
1091  p->ybsep = s->plane[0].ybsep >> s->chroma_y_shift;
1092  }
1093 
1094  p->xoffset = (p->xblen - p->xbsep)/2;
1095  p->yoffset = (p->yblen - p->ybsep)/2;
1096  }
1097 }
1098 
1099 /**
1100  * Unpack the motion compensation parameters
1101  * Dirac Specification ->
1102  * 11.2 Picture prediction data. picture_prediction()
1103  */
1105 {
1106  static const uint8_t default_blen[] = { 4, 12, 16, 24 };
1107 
1108  GetBitContext *gb = &s->gb;
1109  unsigned idx, ref;
1110 
1111  align_get_bits(gb);
1112  /* [DIRAC_STD] 11.2.2 Block parameters. block_parameters() */
1113  /* Luma and Chroma are equal. 11.2.3 */
1114  idx = get_interleaved_ue_golomb(gb); /* [DIRAC_STD] index */
1115 
1116  if (idx > 4) {
1117  av_log(s->avctx, AV_LOG_ERROR, "Block prediction index too high\n");
1118  return AVERROR_INVALIDDATA;
1119  }
1120 
1121  if (idx == 0) {
1122  s->plane[0].xblen = get_interleaved_ue_golomb(gb);
1123  s->plane[0].yblen = get_interleaved_ue_golomb(gb);
1124  s->plane[0].xbsep = get_interleaved_ue_golomb(gb);
1125  s->plane[0].ybsep = get_interleaved_ue_golomb(gb);
1126  } else {
1127  /*[DIRAC_STD] preset_block_params(index). Table 11.1 */
1128  s->plane[0].xblen = default_blen[idx-1];
1129  s->plane[0].yblen = default_blen[idx-1];
1130  s->plane[0].xbsep = 4 * idx;
1131  s->plane[0].ybsep = 4 * idx;
1132  }
1133  /*[DIRAC_STD] 11.2.4 motion_data_dimensions()
1134  Calculated in function dirac_unpack_block_motion_data */
1135 
1136  if (s->plane[0].xblen % (1 << s->chroma_x_shift) != 0 ||
1137  s->plane[0].yblen % (1 << s->chroma_y_shift) != 0 ||
1138  !s->plane[0].xblen || !s->plane[0].yblen) {
1139  av_log(s->avctx, AV_LOG_ERROR,
1140  "invalid x/y block length (%d/%d) for x/y chroma shift (%d/%d)\n",
1141  s->plane[0].xblen, s->plane[0].yblen, s->chroma_x_shift, s->chroma_y_shift);
1142  return AVERROR_INVALIDDATA;
1143  }
1144  if (!s->plane[0].xbsep || !s->plane[0].ybsep || s->plane[0].xbsep < s->plane[0].xblen/2 || s->plane[0].ybsep < s->plane[0].yblen/2) {
1145  av_log(s->avctx, AV_LOG_ERROR, "Block separation too small\n");
1146  return AVERROR_INVALIDDATA;
1147  }
1148  if (s->plane[0].xbsep > s->plane[0].xblen || s->plane[0].ybsep > s->plane[0].yblen) {
1149  av_log(s->avctx, AV_LOG_ERROR, "Block separation greater than size\n");
1150  return AVERROR_INVALIDDATA;
1151  }
1152  if (FFMAX(s->plane[0].xblen, s->plane[0].yblen) > MAX_BLOCKSIZE) {
1153  av_log(s->avctx, AV_LOG_ERROR, "Unsupported large block size\n");
1154  return AVERROR_PATCHWELCOME;
1155  }
1156 
1157  /*[DIRAC_STD] 11.2.5 Motion vector precision. motion_vector_precision()
1158  Read motion vector precision */
1159  s->mv_precision = get_interleaved_ue_golomb(gb);
1160  if (s->mv_precision > 3) {
1161  av_log(s->avctx, AV_LOG_ERROR, "MV precision finer than eighth-pel\n");
1162  return AVERROR_INVALIDDATA;
1163  }
1164 
1165  /*[DIRAC_STD] 11.2.6 Global motion. global_motion()
1166  Read the global motion compensation parameters */
1167  s->globalmc_flag = get_bits1(gb);
1168  if (s->globalmc_flag) {
1169  memset(s->globalmc, 0, sizeof(s->globalmc));
1170  /* [DIRAC_STD] pan_tilt(gparams) */
1171  for (ref = 0; ref < s->num_refs; ref++) {
1172  if (get_bits1(gb)) {
1173  s->globalmc[ref].pan_tilt[0] = dirac_get_se_golomb(gb);
1174  s->globalmc[ref].pan_tilt[1] = dirac_get_se_golomb(gb);
1175  }
1176  /* [DIRAC_STD] zoom_rotate_shear(gparams)
1177  zoom/rotation/shear parameters */
1178  if (get_bits1(gb)) {
1179  s->globalmc[ref].zrs_exp = get_interleaved_ue_golomb(gb);
1180  s->globalmc[ref].zrs[0][0] = dirac_get_se_golomb(gb);
1181  s->globalmc[ref].zrs[0][1] = dirac_get_se_golomb(gb);
1182  s->globalmc[ref].zrs[1][0] = dirac_get_se_golomb(gb);
1183  s->globalmc[ref].zrs[1][1] = dirac_get_se_golomb(gb);
1184  } else {
1185  s->globalmc[ref].zrs[0][0] = 1;
1186  s->globalmc[ref].zrs[1][1] = 1;
1187  }
1188  /* [DIRAC_STD] perspective(gparams) */
1189  if (get_bits1(gb)) {
1190  s->globalmc[ref].perspective_exp = get_interleaved_ue_golomb(gb);
1191  s->globalmc[ref].perspective[0] = dirac_get_se_golomb(gb);
1192  s->globalmc[ref].perspective[1] = dirac_get_se_golomb(gb);
1193  }
1194  if (s->globalmc[ref].perspective_exp + (uint64_t)s->globalmc[ref].zrs_exp > 30) {
1195  return AVERROR_INVALIDDATA;
1196  }
1197 
1198  }
1199  }
1200 
1201  /*[DIRAC_STD] 11.2.7 Picture prediction mode. prediction_mode()
1202  Picture prediction mode, not currently used. */
1203  if (get_interleaved_ue_golomb(gb)) {
1204  av_log(s->avctx, AV_LOG_ERROR, "Unknown picture prediction mode\n");
1205  return AVERROR_INVALIDDATA;
1206  }
1207 
1208  /* [DIRAC_STD] 11.2.8 Reference picture weight. reference_picture_weights()
1209  just data read, weight calculation will be done later on. */
1210  s->weight_log2denom = 1;
1211  s->weight[0] = 1;
1212  s->weight[1] = 1;
1213 
1214  if (get_bits1(gb)) {
1215  s->weight_log2denom = get_interleaved_ue_golomb(gb);
1216  if (s->weight_log2denom < 1 || s->weight_log2denom > 8) {
1217  av_log(s->avctx, AV_LOG_ERROR, "weight_log2denom unsupported or invalid\n");
1218  s->weight_log2denom = 1;
1219  return AVERROR_INVALIDDATA;
1220  }
1221  s->weight[0] = dirac_get_se_golomb(gb);
1222  if (s->num_refs == 2)
1223  s->weight[1] = dirac_get_se_golomb(gb);
1224  }
1225  return 0;
1226 }
1227 
1228 /**
1229  * Dirac Specification ->
1230  * 11.3 Wavelet transform data. wavelet_transform()
1231  */
1233 {
1234  GetBitContext *gb = &s->gb;
1235  int i, level;
1236  unsigned tmp;
1237 
1238 #define CHECKEDREAD(dst, cond, errmsg) \
1239  tmp = get_interleaved_ue_golomb(gb); \
1240  if (cond) { \
1241  av_log(s->avctx, AV_LOG_ERROR, errmsg); \
1242  return AVERROR_INVALIDDATA; \
1243  }\
1244  dst = tmp;
1245 
1246  align_get_bits(gb);
1247 
1248  s->zero_res = s->num_refs ? get_bits1(gb) : 0;
1249  if (s->zero_res)
1250  return 0;
1251 
1252  /*[DIRAC_STD] 11.3.1 Transform parameters. transform_parameters() */
1253  CHECKEDREAD(s->wavelet_idx, tmp > 6, "wavelet_idx is too big\n")
1254 
1255  CHECKEDREAD(s->wavelet_depth, tmp > MAX_DWT_LEVELS || tmp < 1, "invalid number of DWT decompositions\n")
1256 
1257  if (!s->low_delay) {
1258  /* Codeblock parameters (core syntax only) */
1259  if (get_bits1(gb)) {
1260  for (i = 0; i <= s->wavelet_depth; i++) {
1261  CHECKEDREAD(s->codeblock[i].width , tmp < 1 || tmp > (s->avctx->width >>s->wavelet_depth-i), "codeblock width invalid\n")
1262  CHECKEDREAD(s->codeblock[i].height, tmp < 1 || tmp > (s->avctx->height>>s->wavelet_depth-i), "codeblock height invalid\n")
1263  }
1264 
1265  CHECKEDREAD(s->codeblock_mode, tmp > 1, "unknown codeblock mode\n")
1266  }
1267  else {
1268  for (i = 0; i <= s->wavelet_depth; i++)
1269  s->codeblock[i].width = s->codeblock[i].height = 1;
1270  }
1271  }
1272  else {
1273  s->num_x = get_interleaved_ue_golomb(gb);
1274  s->num_y = get_interleaved_ue_golomb(gb);
1275  if (s->num_x * s->num_y == 0 || s->num_x * (uint64_t)s->num_y > INT_MAX ||
1276  s->num_x * (uint64_t)s->avctx->width > INT_MAX ||
1277  s->num_y * (uint64_t)s->avctx->height > INT_MAX ||
1278  s->num_x > s->avctx->width ||
1279  s->num_y > s->avctx->height
1280  ) {
1281  av_log(s->avctx,AV_LOG_ERROR,"Invalid numx/y\n");
1282  s->num_x = s->num_y = 0;
1283  return AVERROR_INVALIDDATA;
1284  }
1285  if (s->ld_picture) {
1286  s->lowdelay.bytes.num = get_interleaved_ue_golomb(gb);
1287  s->lowdelay.bytes.den = get_interleaved_ue_golomb(gb);
1288  if (s->lowdelay.bytes.den <= 0) {
1289  av_log(s->avctx,AV_LOG_ERROR,"Invalid lowdelay.bytes.den\n");
1290  return AVERROR_INVALIDDATA;
1291  }
1292  } else if (s->hq_picture) {
1293  s->highquality.prefix_bytes = get_interleaved_ue_golomb(gb);
1294  s->highquality.size_scaler = get_interleaved_ue_golomb(gb);
1295  if (s->highquality.prefix_bytes >= INT_MAX / 8) {
1296  av_log(s->avctx,AV_LOG_ERROR,"too many prefix bytes\n");
1297  return AVERROR_INVALIDDATA;
1298  }
1299  }
1300 
1301  /* [DIRAC_STD] 11.3.5 Quantisation matrices (low-delay syntax). quant_matrix() */
1302  if (get_bits1(gb)) {
1303  av_log(s->avctx,AV_LOG_DEBUG,"Low Delay: Has Custom Quantization Matrix!\n");
1304  /* custom quantization matrix */
1305  for (level = 0; level < s->wavelet_depth; level++) {
1306  for (i = !!level; i < 4; i++) {
1307  s->lowdelay.quant[level][i] = get_interleaved_ue_golomb(gb);
1308  }
1309  }
1310  } else {
1311  if (s->wavelet_depth > 4) {
1312  av_log(s->avctx,AV_LOG_ERROR,"Mandatory custom low delay matrix missing for depth %d\n", s->wavelet_depth);
1313  return AVERROR_INVALIDDATA;
1314  }
1315  /* default quantization matrix */
1316  for (level = 0; level < s->wavelet_depth; level++)
1317  for (i = 0; i < 4; i++) {
1318  s->lowdelay.quant[level][i] = ff_dirac_default_qmat[s->wavelet_idx][level][i];
1319  /* haar with no shift differs for different depths */
1320  if (s->wavelet_idx == 3)
1321  s->lowdelay.quant[level][i] += 4*(s->wavelet_depth-1 - level);
1322  }
1323  }
1324  }
1325  return 0;
1326 }
1327 
1328 static inline int pred_sbsplit(uint8_t *sbsplit, int stride, int x, int y)
1329 {
1330  static const uint8_t avgsplit[7] = { 0, 0, 1, 1, 1, 2, 2 };
1331 
1332  if (!(x|y))
1333  return 0;
1334  else if (!y)
1335  return sbsplit[-1];
1336  else if (!x)
1337  return sbsplit[-stride];
1338 
1339  return avgsplit[sbsplit[-1] + sbsplit[-stride] + sbsplit[-stride-1]];
1340 }
1341 
1342 static inline int pred_block_mode(DiracBlock *block, int stride, int x, int y, int refmask)
1343 {
1344  int pred;
1345 
1346  if (!(x|y))
1347  return 0;
1348  else if (!y)
1349  return block[-1].ref & refmask;
1350  else if (!x)
1351  return block[-stride].ref & refmask;
1352 
1353  /* return the majority */
1354  pred = (block[-1].ref & refmask) + (block[-stride].ref & refmask) + (block[-stride-1].ref & refmask);
1355  return (pred >> 1) & refmask;
1356 }
1357 
1358 static inline void pred_block_dc(DiracBlock *block, int stride, int x, int y)
1359 {
1360  int i, n = 0;
1361 
1362  memset(block->u.dc, 0, sizeof(block->u.dc));
1363 
1364  if (x && !(block[-1].ref & 3)) {
1365  for (i = 0; i < 3; i++)
1366  block->u.dc[i] += block[-1].u.dc[i];
1367  n++;
1368  }
1369 
1370  if (y && !(block[-stride].ref & 3)) {
1371  for (i = 0; i < 3; i++)
1372  block->u.dc[i] += block[-stride].u.dc[i];
1373  n++;
1374  }
1375 
1376  if (x && y && !(block[-1-stride].ref & 3)) {
1377  for (i = 0; i < 3; i++)
1378  block->u.dc[i] += block[-1-stride].u.dc[i];
1379  n++;
1380  }
1381 
1382  if (n == 2) {
1383  for (i = 0; i < 3; i++)
1384  block->u.dc[i] = (block->u.dc[i]+1)>>1;
1385  } else if (n == 3) {
1386  for (i = 0; i < 3; i++)
1387  block->u.dc[i] = divide3(block->u.dc[i]);
1388  }
1389 }
1390 
1391 static inline void pred_mv(DiracBlock *block, int stride, int x, int y, int ref)
1392 {
1393  int16_t *pred[3];
1394  int refmask = ref+1;
1395  int mask = refmask | DIRAC_REF_MASK_GLOBAL; /* exclude gmc blocks */
1396  int n = 0;
1397 
1398  if (x && (block[-1].ref & mask) == refmask)
1399  pred[n++] = block[-1].u.mv[ref];
1400 
1401  if (y && (block[-stride].ref & mask) == refmask)
1402  pred[n++] = block[-stride].u.mv[ref];
1403 
1404  if (x && y && (block[-stride-1].ref & mask) == refmask)
1405  pred[n++] = block[-stride-1].u.mv[ref];
1406 
1407  switch (n) {
1408  case 0:
1409  block->u.mv[ref][0] = 0;
1410  block->u.mv[ref][1] = 0;
1411  break;
1412  case 1:
1413  block->u.mv[ref][0] = pred[0][0];
1414  block->u.mv[ref][1] = pred[0][1];
1415  break;
1416  case 2:
1417  block->u.mv[ref][0] = (pred[0][0] + pred[1][0] + 1) >> 1;
1418  block->u.mv[ref][1] = (pred[0][1] + pred[1][1] + 1) >> 1;
1419  break;
1420  case 3:
1421  block->u.mv[ref][0] = mid_pred(pred[0][0], pred[1][0], pred[2][0]);
1422  block->u.mv[ref][1] = mid_pred(pred[0][1], pred[1][1], pred[2][1]);
1423  break;
1424  }
1425 }
1426 
1427 static void global_mv(DiracContext *s, DiracBlock *block, int x, int y, int ref)
1428 {
1429  int ez = s->globalmc[ref].zrs_exp;
1430  int ep = s->globalmc[ref].perspective_exp;
1431  int (*A)[2] = s->globalmc[ref].zrs;
1432  int *b = s->globalmc[ref].pan_tilt;
1433  int *c = s->globalmc[ref].perspective;
1434 
1435  int64_t m = (1<<ep) - (c[0]*(int64_t)x + c[1]*(int64_t)y);
1436  int64_t mx = m * (uint64_t)((A[0][0] * (int64_t)x + A[0][1]*(int64_t)y) + (1LL<<ez) * b[0]);
1437  int64_t my = m * (uint64_t)((A[1][0] * (int64_t)x + A[1][1]*(int64_t)y) + (1LL<<ez) * b[1]);
1438 
1439  block->u.mv[ref][0] = (mx + (1<<(ez+ep))) >> (ez+ep);
1440  block->u.mv[ref][1] = (my + (1<<(ez+ep))) >> (ez+ep);
1441 }
1442 
1444  int stride, int x, int y)
1445 {
1446  int i;
1447 
1449  block->ref ^= dirac_get_arith_bit(arith, CTX_PMODE_REF1);
1450 
1451  if (s->num_refs == 2) {
1453  block->ref ^= dirac_get_arith_bit(arith, CTX_PMODE_REF2) << 1;
1454  }
1455 
1456  if (!block->ref) {
1457  pred_block_dc(block, stride, x, y);
1458  for (i = 0; i < 3; i++)
1459  block->u.dc[i] += (unsigned)dirac_get_arith_int(arith+1+i, CTX_DC_F1, CTX_DC_DATA);
1460  return;
1461  }
1462 
1463  if (s->globalmc_flag) {
1465  block->ref ^= dirac_get_arith_bit(arith, CTX_GLOBAL_BLOCK) << 2;
1466  }
1467 
1468  for (i = 0; i < s->num_refs; i++)
1469  if (block->ref & (i+1)) {
1470  if (block->ref & DIRAC_REF_MASK_GLOBAL) {
1471  global_mv(s, block, x, y, i);
1472  } else {
1473  pred_mv(block, stride, x, y, i);
1474  block->u.mv[i][0] += (unsigned)dirac_get_arith_int(arith + 4 + 2 * i, CTX_MV_F1, CTX_MV_DATA);
1475  block->u.mv[i][1] += (unsigned)dirac_get_arith_int(arith + 5 + 2 * i, CTX_MV_F1, CTX_MV_DATA);
1476  }
1477  }
1478 }
1479 
1480 /**
1481  * Copies the current block to the other blocks covered by the current superblock split mode
1482  */
1484 {
1485  int x, y;
1486  DiracBlock *dst = block;
1487 
1488  for (x = 1; x < size; x++)
1489  dst[x] = *block;
1490 
1491  for (y = 1; y < size; y++) {
1492  dst += stride;
1493  for (x = 0; x < size; x++)
1494  dst[x] = *block;
1495  }
1496 }
1497 
1498 /**
1499  * Dirac Specification ->
1500  * 12. Block motion data syntax
1501  */
1503 {
1504  GetBitContext *gb = &s->gb;
1505  uint8_t *sbsplit = s->sbsplit;
1506  int i, x, y, q, p;
1507  DiracArith arith[8];
1508 
1509  align_get_bits(gb);
1510 
1511  /* [DIRAC_STD] 11.2.4 and 12.2.1 Number of blocks and superblocks */
1512  s->sbwidth = DIVRNDUP(s->seq.width, 4*s->plane[0].xbsep);
1513  s->sbheight = DIVRNDUP(s->seq.height, 4*s->plane[0].ybsep);
1514  s->blwidth = 4 * s->sbwidth;
1515  s->blheight = 4 * s->sbheight;
1516 
1517  /* [DIRAC_STD] 12.3.1 Superblock splitting modes. superblock_split_modes()
1518  decode superblock split modes */
1519  ff_dirac_init_arith_decoder(arith, gb, get_interleaved_ue_golomb(gb)); /* get_interleaved_ue_golomb(gb) is the length */
1520  for (y = 0; y < s->sbheight; y++) {
1521  for (x = 0; x < s->sbwidth; x++) {
1522  unsigned int split = dirac_get_arith_uint(arith, CTX_SB_F1, CTX_SB_DATA);
1523  if (split > 2)
1524  return AVERROR_INVALIDDATA;
1525  sbsplit[x] = (split + pred_sbsplit(sbsplit+x, s->sbwidth, x, y)) % 3;
1526  }
1527  sbsplit += s->sbwidth;
1528  }
1529 
1530  /* setup arith decoding */
1532  for (i = 0; i < s->num_refs; i++) {
1533  ff_dirac_init_arith_decoder(arith + 4 + 2 * i, gb, get_interleaved_ue_golomb(gb));
1534  ff_dirac_init_arith_decoder(arith + 5 + 2 * i, gb, get_interleaved_ue_golomb(gb));
1535  }
1536  for (i = 0; i < 3; i++)
1538 
1539  for (y = 0; y < s->sbheight; y++)
1540  for (x = 0; x < s->sbwidth; x++) {
1541  int blkcnt = 1 << s->sbsplit[y * s->sbwidth + x];
1542  int step = 4 >> s->sbsplit[y * s->sbwidth + x];
1543 
1544  for (q = 0; q < blkcnt; q++)
1545  for (p = 0; p < blkcnt; p++) {
1546  int bx = 4 * x + p*step;
1547  int by = 4 * y + q*step;
1548  DiracBlock *block = &s->blmotion[by*s->blwidth + bx];
1549  decode_block_params(s, arith, block, s->blwidth, bx, by);
1550  propagate_block_data(block, s->blwidth, step);
1551  }
1552  }
1553 
1554  for (i = 0; i < 4 + 2*s->num_refs; i++) {
1555  if (arith[i].error)
1556  return arith[i].error;
1557  }
1558 
1559  return 0;
1560 }
1561 
1562 static int weight(int i, int blen, int offset)
1563 {
1564 #define ROLLOFF(i) offset == 1 ? ((i) ? 5 : 3) : \
1565  (1 + (6*(i) + offset - 1) / (2*offset - 1))
1566 
1567  if (i < 2*offset)
1568  return ROLLOFF(i);
1569  else if (i > blen-1 - 2*offset)
1570  return ROLLOFF(blen-1 - i);
1571  return 8;
1572 }
1573 
1574 static void init_obmc_weight_row(Plane *p, uint8_t *obmc_weight, int stride,
1575  int left, int right, int wy)
1576 {
1577  int x;
1578  for (x = 0; left && x < p->xblen >> 1; x++)
1579  obmc_weight[x] = wy*8;
1580  for (; x < p->xblen >> right; x++)
1581  obmc_weight[x] = wy*weight(x, p->xblen, p->xoffset);
1582  for (; x < p->xblen; x++)
1583  obmc_weight[x] = wy*8;
1584  for (; x < stride; x++)
1585  obmc_weight[x] = 0;
1586 }
1587 
1588 static void init_obmc_weight(Plane *p, uint8_t *obmc_weight, int stride,
1589  int left, int right, int top, int bottom)
1590 {
1591  int y;
1592  for (y = 0; top && y < p->yblen >> 1; y++) {
1593  init_obmc_weight_row(p, obmc_weight, stride, left, right, 8);
1594  obmc_weight += stride;
1595  }
1596  for (; y < p->yblen >> bottom; y++) {
1597  int wy = weight(y, p->yblen, p->yoffset);
1598  init_obmc_weight_row(p, obmc_weight, stride, left, right, wy);
1599  obmc_weight += stride;
1600  }
1601  for (; y < p->yblen; y++) {
1602  init_obmc_weight_row(p, obmc_weight, stride, left, right, 8);
1603  obmc_weight += stride;
1604  }
1605 }
1606 
1607 static void init_obmc_weights(DiracContext *s, Plane *p, int by)
1608 {
1609  int top = !by;
1610  int bottom = by == s->blheight-1;
1611 
1612  /* don't bother re-initing for rows 2 to blheight-2, the weights don't change */
1613  if (top || bottom || by == 1) {
1614  init_obmc_weight(p, s->obmc_weight[0], MAX_BLOCKSIZE, 1, 0, top, bottom);
1615  init_obmc_weight(p, s->obmc_weight[1], MAX_BLOCKSIZE, 0, 0, top, bottom);
1616  init_obmc_weight(p, s->obmc_weight[2], MAX_BLOCKSIZE, 0, 1, top, bottom);
1617  }
1618 }
1619 
1620 static const uint8_t epel_weights[4][4][4] = {
1621  {{ 16, 0, 0, 0 },
1622  { 12, 4, 0, 0 },
1623  { 8, 8, 0, 0 },
1624  { 4, 12, 0, 0 }},
1625  {{ 12, 0, 4, 0 },
1626  { 9, 3, 3, 1 },
1627  { 6, 6, 2, 2 },
1628  { 3, 9, 1, 3 }},
1629  {{ 8, 0, 8, 0 },
1630  { 6, 2, 6, 2 },
1631  { 4, 4, 4, 4 },
1632  { 2, 6, 2, 6 }},
1633  {{ 4, 0, 12, 0 },
1634  { 3, 1, 9, 3 },
1635  { 2, 2, 6, 6 },
1636  { 1, 3, 3, 9 }}
1637 };
1638 
1639 /**
1640  * For block x,y, determine which of the hpel planes to do bilinear
1641  * interpolation from and set src[] to the location in each hpel plane
1642  * to MC from.
1643  *
1644  * @return the index of the put_dirac_pixels_tab function to use
1645  * 0 for 1 plane (fpel,hpel), 1 for 2 planes (qpel), 2 for 4 planes (qpel), and 3 for epel
1646  */
1647 static int mc_subpel(DiracContext *s, DiracBlock *block, const uint8_t *src[5],
1648  int x, int y, int ref, int plane)
1649 {
1650  Plane *p = &s->plane[plane];
1651  uint8_t **ref_hpel = s->ref_pics[ref]->hpel[plane];
1652  int motion_x = block->u.mv[ref][0];
1653  int motion_y = block->u.mv[ref][1];
1654  int mx, my, i, epel, nplanes = 0;
1655 
1656  if (plane) {
1657  motion_x >>= s->chroma_x_shift;
1658  motion_y >>= s->chroma_y_shift;
1659  }
1660 
1661  mx = motion_x & ~(-1U << s->mv_precision);
1662  my = motion_y & ~(-1U << s->mv_precision);
1663  motion_x >>= s->mv_precision;
1664  motion_y >>= s->mv_precision;
1665  /* normalize subpel coordinates to epel */
1666  /* TODO: template this function? */
1667  mx <<= 3 - s->mv_precision;
1668  my <<= 3 - s->mv_precision;
1669 
1670  x += motion_x;
1671  y += motion_y;
1672  epel = (mx|my)&1;
1673 
1674  /* hpel position */
1675  if (!((mx|my)&3)) {
1676  nplanes = 1;
1677  src[0] = ref_hpel[(my>>1)+(mx>>2)] + y*p->stride + x;
1678  } else {
1679  /* qpel or epel */
1680  nplanes = 4;
1681  for (i = 0; i < 4; i++)
1682  src[i] = ref_hpel[i] + y*p->stride + x;
1683 
1684  /* if we're interpolating in the right/bottom halves, adjust the planes as needed
1685  we increment x/y because the edge changes for half of the pixels */
1686  if (mx > 4) {
1687  src[0] += 1;
1688  src[2] += 1;
1689  x++;
1690  }
1691  if (my > 4) {
1692  src[0] += p->stride;
1693  src[1] += p->stride;
1694  y++;
1695  }
1696 
1697  /* hpel planes are:
1698  [0]: F [1]: H
1699  [2]: V [3]: C */
1700  if (!epel) {
1701  /* check if we really only need 2 planes since either mx or my is
1702  a hpel position. (epel weights of 0 handle this there) */
1703  if (!(mx&3)) {
1704  /* mx == 0: average [0] and [2]
1705  mx == 4: average [1] and [3] */
1706  src[!mx] = src[2 + !!mx];
1707  nplanes = 2;
1708  } else if (!(my&3)) {
1709  src[0] = src[(my>>1) ];
1710  src[1] = src[(my>>1)+1];
1711  nplanes = 2;
1712  }
1713  } else {
1714  /* adjust the ordering if needed so the weights work */
1715  if (mx > 4) {
1716  FFSWAP(const uint8_t *, src[0], src[1]);
1717  FFSWAP(const uint8_t *, src[2], src[3]);
1718  }
1719  if (my > 4) {
1720  FFSWAP(const uint8_t *, src[0], src[2]);
1721  FFSWAP(const uint8_t *, src[1], src[3]);
1722  }
1723  src[4] = epel_weights[my&3][mx&3];
1724  }
1725  }
1726 
1727  /* fixme: v/h _edge_pos */
1728  if (x + p->xblen > p->width +EDGE_WIDTH/2 ||
1729  y + p->yblen > p->height+EDGE_WIDTH/2 ||
1730  x < 0 || y < 0) {
1731  for (i = 0; i < nplanes; i++) {
1732  s->vdsp.emulated_edge_mc(s->edge_emu_buffer[i], src[i],
1733  p->stride, p->stride,
1734  p->xblen, p->yblen, x, y,
1735  p->width+EDGE_WIDTH/2, p->height+EDGE_WIDTH/2);
1736  src[i] = s->edge_emu_buffer[i];
1737  }
1738  }
1739  return (nplanes>>1) + epel;
1740 }
1741 
1742 static void add_dc(uint16_t *dst, int dc, int stride,
1743  uint8_t *obmc_weight, int xblen, int yblen)
1744 {
1745  int x, y;
1746  dc += 128;
1747 
1748  for (y = 0; y < yblen; y++) {
1749  for (x = 0; x < xblen; x += 2) {
1750  dst[x ] += dc * obmc_weight[x ];
1751  dst[x+1] += dc * obmc_weight[x+1];
1752  }
1753  dst += stride;
1754  obmc_weight += MAX_BLOCKSIZE;
1755  }
1756 }
1757 
1759  uint16_t *mctmp, uint8_t *obmc_weight,
1760  int plane, int dstx, int dsty)
1761 {
1762  Plane *p = &s->plane[plane];
1763  const uint8_t *src[5];
1764  int idx;
1765 
1766  switch (block->ref&3) {
1767  case 0: /* DC */
1768  add_dc(mctmp, block->u.dc[plane], p->stride, obmc_weight, p->xblen, p->yblen);
1769  return;
1770  case 1:
1771  case 2:
1772  idx = mc_subpel(s, block, src, dstx, dsty, (block->ref&3)-1, plane);
1773  s->put_pixels_tab[idx](s->mcscratch, src, p->stride, p->yblen);
1774  if (s->weight_func)
1775  s->weight_func(s->mcscratch, p->stride, s->weight_log2denom,
1776  s->weight[0] + s->weight[1], p->yblen);
1777  break;
1778  case 3:
1779  idx = mc_subpel(s, block, src, dstx, dsty, 0, plane);
1780  s->put_pixels_tab[idx](s->mcscratch, src, p->stride, p->yblen);
1781  idx = mc_subpel(s, block, src, dstx, dsty, 1, plane);
1782  if (s->biweight_func) {
1783  /* fixme: +32 is a quick hack */
1784  s->put_pixels_tab[idx](s->mcscratch + 32, src, p->stride, p->yblen);
1785  s->biweight_func(s->mcscratch, s->mcscratch+32, p->stride, s->weight_log2denom,
1786  s->weight[0], s->weight[1], p->yblen);
1787  } else
1788  s->avg_pixels_tab[idx](s->mcscratch, src, p->stride, p->yblen);
1789  break;
1790  }
1791  s->add_obmc(mctmp, s->mcscratch, p->stride, obmc_weight, p->yblen);
1792 }
1793 
1794 static void mc_row(DiracContext *s, DiracBlock *block, uint16_t *mctmp, int plane, int dsty)
1795 {
1796  Plane *p = &s->plane[plane];
1797  int x, dstx = p->xbsep - p->xoffset;
1798 
1799  block_mc(s, block, mctmp, s->obmc_weight[0], plane, -p->xoffset, dsty);
1800  mctmp += p->xbsep;
1801 
1802  for (x = 1; x < s->blwidth-1; x++) {
1803  block_mc(s, block+x, mctmp, s->obmc_weight[1], plane, dstx, dsty);
1804  dstx += p->xbsep;
1805  mctmp += p->xbsep;
1806  }
1807  block_mc(s, block+x, mctmp, s->obmc_weight[2], plane, dstx, dsty);
1808 }
1809 
1810 static void select_dsp_funcs(DiracContext *s, int width, int height, int xblen, int yblen)
1811 {
1812  int idx = 0;
1813  if (xblen > 8)
1814  idx = 1;
1815  if (xblen > 16)
1816  idx = 2;
1817 
1818  memcpy(s->put_pixels_tab, s->diracdsp.put_dirac_pixels_tab[idx], sizeof(s->put_pixels_tab));
1819  memcpy(s->avg_pixels_tab, s->diracdsp.avg_dirac_pixels_tab[idx], sizeof(s->avg_pixels_tab));
1820  s->add_obmc = s->diracdsp.add_dirac_obmc[idx];
1821  if (s->weight_log2denom > 1 || s->weight[0] != 1 || s->weight[1] != 1) {
1822  s->weight_func = s->diracdsp.weight_dirac_pixels_tab[idx];
1823  s->biweight_func = s->diracdsp.biweight_dirac_pixels_tab[idx];
1824  } else {
1825  s->weight_func = NULL;
1826  s->biweight_func = NULL;
1827  }
1828 }
1829 
1830 static int interpolate_refplane(DiracContext *s, DiracFrame *ref, int plane, int width, int height)
1831 {
1832  /* chroma allocates an edge of 8 when subsampled
1833  which for 4:2:2 means an h edge of 16 and v edge of 8
1834  just use 8 for everything for the moment */
1835  int i, edge = EDGE_WIDTH/2;
1836 
1837  ref->hpel[plane][0] = ref->avframe->data[plane];
1838  s->mpvencdsp.draw_edges(ref->hpel[plane][0], ref->avframe->linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM); /* EDGE_TOP | EDGE_BOTTOM values just copied to make it build, this needs to be ensured */
1839 
1840  /* no need for hpel if we only have fpel vectors */
1841  if (!s->mv_precision)
1842  return 0;
1843 
1844  for (i = 1; i < 4; i++) {
1845  if (!ref->hpel_base[plane][i])
1846  ref->hpel_base[plane][i] = av_malloc((height+2*edge) * ref->avframe->linesize[plane] + 32);
1847  if (!ref->hpel_base[plane][i]) {
1848  return AVERROR(ENOMEM);
1849  }
1850  /* we need to be 16-byte aligned even for chroma */
1851  ref->hpel[plane][i] = ref->hpel_base[plane][i] + edge*ref->avframe->linesize[plane] + 16;
1852  }
1853 
1854  if (!ref->interpolated[plane]) {
1855  s->diracdsp.dirac_hpel_filter(ref->hpel[plane][1], ref->hpel[plane][2],
1856  ref->hpel[plane][3], ref->hpel[plane][0],
1857  ref->avframe->linesize[plane], width, height);
1858  s->mpvencdsp.draw_edges(ref->hpel[plane][1], ref->avframe->linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM);
1859  s->mpvencdsp.draw_edges(ref->hpel[plane][2], ref->avframe->linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM);
1860  s->mpvencdsp.draw_edges(ref->hpel[plane][3], ref->avframe->linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM);
1861  }
1862  ref->interpolated[plane] = 1;
1863 
1864  return 0;
1865 }
1866 
1867 /**
1868  * Dirac Specification ->
1869  * 13.0 Transform data syntax. transform_data()
1870  */
1872 {
1873  DWTContext d;
1874  int y, i, comp, dsty;
1875  int ret;
1876 
1877  if (s->low_delay) {
1878  /* [DIRAC_STD] 13.5.1 low_delay_transform_data() */
1879  if (!s->hq_picture) {
1880  for (comp = 0; comp < 3; comp++) {
1881  Plane *p = &s->plane[comp];
1882  memset(p->idwt.buf, 0, p->idwt.stride * p->idwt.height);
1883  }
1884  }
1885  if (!s->zero_res) {
1886  if ((ret = decode_lowdelay(s)) < 0)
1887  return ret;
1888  }
1889  }
1890 
1891  for (comp = 0; comp < 3; comp++) {
1892  Plane *p = &s->plane[comp];
1893  uint8_t *frame = s->current_picture->avframe->data[comp];
1894 
1895  /* FIXME: small resolutions */
1896  for (i = 0; i < 4; i++)
1897  s->edge_emu_buffer[i] = s->edge_emu_buffer_base + i*FFALIGN(p->width, 16);
1898 
1899  if (!s->zero_res && !s->low_delay)
1900  {
1901  memset(p->idwt.buf, 0, p->idwt.stride * p->idwt.height);
1902  ret = decode_component(s, comp); /* [DIRAC_STD] 13.4.1 core_transform_data() */
1903  if (ret < 0)
1904  return ret;
1905  }
1906  ret = ff_spatial_idwt_init(&d, &p->idwt, s->wavelet_idx+2,
1907  s->wavelet_depth, s->bit_depth);
1908  if (ret < 0)
1909  return ret;
1910 
1911  if (!s->num_refs) { /* intra */
1912  for (y = 0; y < p->height; y += 16) {
1913  int idx = (s->bit_depth - 8) >> 1;
1914  ff_spatial_idwt_slice2(&d, y+16); /* decode */
1915  s->diracdsp.put_signed_rect_clamped[idx](frame + y*p->stride,
1916  p->stride,
1917  p->idwt.buf + y*p->idwt.stride,
1918  p->idwt.stride, p->width, 16);
1919  }
1920  } else { /* inter */
1921  int rowheight = p->ybsep*p->stride;
1922 
1923  select_dsp_funcs(s, p->width, p->height, p->xblen, p->yblen);
1924 
1925  for (i = 0; i < s->num_refs; i++) {
1926  int ret = interpolate_refplane(s, s->ref_pics[i], comp, p->width, p->height);
1927  if (ret < 0)
1928  return ret;
1929  }
1930 
1931  memset(s->mctmp, 0, 4*p->yoffset*p->stride);
1932 
1933  dsty = -p->yoffset;
1934  for (y = 0; y < s->blheight; y++) {
1935  int h = 0,
1936  start = FFMAX(dsty, 0);
1937  uint16_t *mctmp = s->mctmp + y*rowheight;
1938  DiracBlock *blocks = s->blmotion + y*s->blwidth;
1939 
1940  init_obmc_weights(s, p, y);
1941 
1942  if (y == s->blheight-1 || start+p->ybsep > p->height)
1943  h = p->height - start;
1944  else
1945  h = p->ybsep - (start - dsty);
1946  if (h < 0)
1947  break;
1948 
1949  memset(mctmp+2*p->yoffset*p->stride, 0, 2*rowheight);
1950  mc_row(s, blocks, mctmp, comp, dsty);
1951 
1952  mctmp += (start - dsty)*p->stride + p->xoffset;
1953  ff_spatial_idwt_slice2(&d, start + h); /* decode */
1954  /* NOTE: add_rect_clamped hasn't been templated hence the shifts.
1955  * idwt.stride is passed as pixels, not in bytes as in the rest of the decoder */
1956  s->diracdsp.add_rect_clamped(frame + start*p->stride, mctmp, p->stride,
1957  (int16_t*)(p->idwt.buf) + start*(p->idwt.stride >> 1), (p->idwt.stride >> 1), p->width, h);
1958 
1959  dsty += p->ybsep;
1960  }
1961  }
1962  }
1963 
1964 
1965  return 0;
1966 }
1967 
1969 {
1970  int ret, i;
1971  int chroma_x_shift, chroma_y_shift;
1972  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &chroma_x_shift,
1973  &chroma_y_shift);
1974  if (ret < 0)
1975  return ret;
1976 
1977  f->width = avctx->width + 2 * EDGE_WIDTH;
1978  f->height = avctx->height + 2 * EDGE_WIDTH + 2;
1979  ret = ff_get_buffer(avctx, f, flags);
1980  if (ret < 0)
1981  return ret;
1982 
1983  for (i = 0; f->data[i]; i++) {
1984  int offset = (EDGE_WIDTH >> (i && i<3 ? chroma_y_shift : 0)) *
1985  f->linesize[i] + 32;
1986  f->data[i] += offset;
1987  }
1988  f->width = avctx->width;
1989  f->height = avctx->height;
1990 
1991  return 0;
1992 }
1993 
1994 /**
1995  * Dirac Specification ->
1996  * 11.1.1 Picture Header. picture_header()
1997  */
1999 {
2000  unsigned retire, picnum;
2001  int i, j, ret;
2002  int64_t refdist, refnum;
2003  GetBitContext *gb = &s->gb;
2004 
2005  /* [DIRAC_STD] 11.1.1 Picture Header. picture_header() PICTURE_NUM */
2006  picnum = s->current_picture->avframe->display_picture_number = get_bits_long(gb, 32);
2007 
2008 
2009  av_log(s->avctx,AV_LOG_DEBUG,"PICTURE_NUM: %d\n",picnum);
2010 
2011  /* if this is the first keyframe after a sequence header, start our
2012  reordering from here */
2013  if (s->frame_number < 0)
2014  s->frame_number = picnum;
2015 
2016  s->ref_pics[0] = s->ref_pics[1] = NULL;
2017  for (i = 0; i < s->num_refs; i++) {
2018  refnum = (picnum + dirac_get_se_golomb(gb)) & 0xFFFFFFFF;
2019  refdist = INT64_MAX;
2020 
2021  /* find the closest reference to the one we want */
2022  /* Jordi: this is needed if the referenced picture hasn't yet arrived */
2023  for (j = 0; j < MAX_REFERENCE_FRAMES && refdist; j++)
2024  if (s->ref_frames[j]
2025  && FFABS(s->ref_frames[j]->avframe->display_picture_number - refnum) < refdist) {
2026  s->ref_pics[i] = s->ref_frames[j];
2027  refdist = FFABS(s->ref_frames[j]->avframe->display_picture_number - refnum);
2028  }
2029 
2030  if (!s->ref_pics[i] || refdist)
2031  av_log(s->avctx, AV_LOG_DEBUG, "Reference not found\n");
2032 
2033  /* if there were no references at all, allocate one */
2034  if (!s->ref_pics[i])
2035  for (j = 0; j < MAX_FRAMES; j++)
2036  if (!s->all_frames[j].avframe->data[0]) {
2037  s->ref_pics[i] = &s->all_frames[j];
2038  ret = get_buffer_with_edge(s->avctx, s->ref_pics[i]->avframe, AV_GET_BUFFER_FLAG_REF);
2039  if (ret < 0)
2040  return ret;
2041  break;
2042  }
2043 
2044  if (!s->ref_pics[i]) {
2045  av_log(s->avctx, AV_LOG_ERROR, "Reference could not be allocated\n");
2046  return AVERROR_INVALIDDATA;
2047  }
2048 
2049  }
2050 
2051  /* retire the reference frames that are not used anymore */
2052  if (s->current_picture->reference) {
2053  retire = (picnum + dirac_get_se_golomb(gb)) & 0xFFFFFFFF;
2054  if (retire != picnum) {
2055  DiracFrame *retire_pic = remove_frame(s->ref_frames, retire);
2056 
2057  if (retire_pic)
2058  retire_pic->reference &= DELAYED_PIC_REF;
2059  else
2060  av_log(s->avctx, AV_LOG_DEBUG, "Frame to retire not found\n");
2061  }
2062 
2063  /* if reference array is full, remove the oldest as per the spec */
2064  while (add_frame(s->ref_frames, MAX_REFERENCE_FRAMES, s->current_picture)) {
2065  av_log(s->avctx, AV_LOG_ERROR, "Reference frame overflow\n");
2066  remove_frame(s->ref_frames, s->ref_frames[0]->avframe->display_picture_number)->reference &= DELAYED_PIC_REF;
2067  }
2068  }
2069 
2070  if (s->num_refs) {
2071  ret = dirac_unpack_prediction_parameters(s); /* [DIRAC_STD] 11.2 Picture Prediction Data. picture_prediction() */
2072  if (ret < 0)
2073  return ret;
2074  ret = dirac_unpack_block_motion_data(s); /* [DIRAC_STD] 12. Block motion data syntax */
2075  if (ret < 0)
2076  return ret;
2077  }
2078  ret = dirac_unpack_idwt_params(s); /* [DIRAC_STD] 11.3 Wavelet transform data */
2079  if (ret < 0)
2080  return ret;
2081 
2082  init_planes(s);
2083  return 0;
2084 }
2085 
2086 static int get_delayed_pic(DiracContext *s, AVFrame *picture, int *got_frame)
2087 {
2088  DiracFrame *out = s->delay_frames[0];
2089  int i, out_idx = 0;
2090  int ret;
2091 
2092  /* find frame with lowest picture number */
2093  for (i = 1; s->delay_frames[i]; i++)
2094  if (s->delay_frames[i]->avframe->display_picture_number < out->avframe->display_picture_number) {
2095  out = s->delay_frames[i];
2096  out_idx = i;
2097  }
2098 
2099  for (i = out_idx; s->delay_frames[i]; i++)
2100  s->delay_frames[i] = s->delay_frames[i+1];
2101 
2102  if (out) {
2103  out->reference ^= DELAYED_PIC_REF;
2104  if((ret = av_frame_ref(picture, out->avframe)) < 0)
2105  return ret;
2106  *got_frame = 1;
2107  }
2108 
2109  return 0;
2110 }
2111 
2112 /**
2113  * Dirac Specification ->
2114  * 9.6 Parse Info Header Syntax. parse_info()
2115  * 4 byte start code + byte parse code + 4 byte size + 4 byte previous size
2116  */
2117 #define DATA_UNIT_HEADER_SIZE 13
2118 
2119 /* [DIRAC_STD] dirac_decode_data_unit makes reference to the while defined in 9.3
2120  inside the function parse_sequence() */
2121 static int dirac_decode_data_unit(AVCodecContext *avctx, const uint8_t *buf, int size)
2122 {
2123  DiracContext *s = avctx->priv_data;
2124  DiracFrame *pic = NULL;
2125  AVDiracSeqHeader *dsh;
2126  int ret, i;
2127  uint8_t parse_code;
2128  unsigned tmp;
2129 
2131  return AVERROR_INVALIDDATA;
2132 
2133  parse_code = buf[4];
2134 
2135  init_get_bits(&s->gb, &buf[13], 8*(size - DATA_UNIT_HEADER_SIZE));
2136 
2137  if (parse_code == DIRAC_PCODE_SEQ_HEADER) {
2138  if (s->seen_sequence_header)
2139  return 0;
2140 
2141  /* [DIRAC_STD] 10. Sequence header */
2143  if (ret < 0) {
2144  av_log(avctx, AV_LOG_ERROR, "error parsing sequence header");
2145  return ret;
2146  }
2147 
2148  if (CALC_PADDING((int64_t)dsh->width, MAX_DWT_LEVELS) * CALC_PADDING((int64_t)dsh->height, MAX_DWT_LEVELS) * 5LL > avctx->max_pixels)
2149  ret = AVERROR(ERANGE);
2150  if (ret >= 0)
2151  ret = ff_set_dimensions(avctx, dsh->width, dsh->height);
2152  if (ret < 0) {
2153  av_freep(&dsh);
2154  return ret;
2155  }
2156 
2157  ff_set_sar(avctx, dsh->sample_aspect_ratio);
2158  avctx->pix_fmt = dsh->pix_fmt;
2159  avctx->color_range = dsh->color_range;
2160  avctx->color_trc = dsh->color_trc;
2161  avctx->color_primaries = dsh->color_primaries;
2162  avctx->colorspace = dsh->colorspace;
2163  avctx->profile = dsh->profile;
2164  avctx->level = dsh->level;
2165  avctx->framerate = dsh->framerate;
2166  s->bit_depth = dsh->bit_depth;
2167  s->version.major = dsh->version.major;
2168  s->version.minor = dsh->version.minor;
2169  s->seq = *dsh;
2170  av_freep(&dsh);
2171 
2172  s->pshift = s->bit_depth > 8;
2173 
2175  &s->chroma_x_shift,
2176  &s->chroma_y_shift);
2177  if (ret < 0)
2178  return ret;
2179 
2181  if (ret < 0)
2182  return ret;
2183 
2184  s->seen_sequence_header = 1;
2185  } else if (parse_code == DIRAC_PCODE_END_SEQ) { /* [DIRAC_STD] End of Sequence */
2187  s->seen_sequence_header = 0;
2188  } else if (parse_code == DIRAC_PCODE_AUX) {
2189  if (buf[13] == 1) { /* encoder implementation/version */
2190  int ver[3];
2191  /* versions older than 1.0.8 don't store quant delta for
2192  subbands with only one codeblock */
2193  if (sscanf(buf+14, "Schroedinger %d.%d.%d", ver, ver+1, ver+2) == 3)
2194  if (ver[0] == 1 && ver[1] == 0 && ver[2] <= 7)
2195  s->old_delta_quant = 1;
2196  }
2197  } else if (parse_code & 0x8) { /* picture data unit */
2198  if (!s->seen_sequence_header) {
2199  av_log(avctx, AV_LOG_DEBUG, "Dropping frame without sequence header\n");
2200  return AVERROR_INVALIDDATA;
2201  }
2202 
2203  /* find an unused frame */
2204  for (i = 0; i < MAX_FRAMES; i++)
2205  if (s->all_frames[i].avframe->data[0] == NULL)
2206  pic = &s->all_frames[i];
2207  if (!pic) {
2208  av_log(avctx, AV_LOG_ERROR, "framelist full\n");
2209  return AVERROR_INVALIDDATA;
2210  }
2211 
2212  av_frame_unref(pic->avframe);
2213 
2214  /* [DIRAC_STD] Defined in 9.6.1 ... */
2215  tmp = parse_code & 0x03; /* [DIRAC_STD] num_refs() */
2216  if (tmp > 2) {
2217  av_log(avctx, AV_LOG_ERROR, "num_refs of 3\n");
2218  return AVERROR_INVALIDDATA;
2219  }
2220  s->num_refs = tmp;
2221  s->is_arith = (parse_code & 0x48) == 0x08; /* [DIRAC_STD] using_ac() */
2222  s->low_delay = (parse_code & 0x88) == 0x88; /* [DIRAC_STD] is_low_delay() */
2223  s->core_syntax = (parse_code & 0x88) == 0x08; /* [DIRAC_STD] is_core_syntax() */
2224  s->ld_picture = (parse_code & 0xF8) == 0xC8; /* [DIRAC_STD] is_ld_picture() */
2225  s->hq_picture = (parse_code & 0xF8) == 0xE8; /* [DIRAC_STD] is_hq_picture() */
2226  s->dc_prediction = (parse_code & 0x28) == 0x08; /* [DIRAC_STD] using_dc_prediction() */
2227  pic->reference = (parse_code & 0x0C) == 0x0C; /* [DIRAC_STD] is_reference() */
2228  pic->avframe->key_frame = s->num_refs == 0; /* [DIRAC_STD] is_intra() */
2229  pic->avframe->pict_type = s->num_refs + 1; /* Definition of AVPictureType in avutil.h */
2230 
2231  /* VC-2 Low Delay has a different parse code than the Dirac Low Delay */
2232  if (s->version.minor == 2 && parse_code == 0x88)
2233  s->ld_picture = 1;
2234 
2235  if (s->low_delay && !(s->ld_picture || s->hq_picture) ) {
2236  av_log(avctx, AV_LOG_ERROR, "Invalid low delay flag\n");
2237  return AVERROR_INVALIDDATA;
2238  }
2239 
2240  if ((ret = get_buffer_with_edge(avctx, pic->avframe, (parse_code & 0x0C) == 0x0C ? AV_GET_BUFFER_FLAG_REF : 0)) < 0)
2241  return ret;
2242  s->current_picture = pic;
2243  s->plane[0].stride = pic->avframe->linesize[0];
2244  s->plane[1].stride = pic->avframe->linesize[1];
2245  s->plane[2].stride = pic->avframe->linesize[2];
2246 
2247  if (alloc_buffers(s, FFMAX3(FFABS(s->plane[0].stride), FFABS(s->plane[1].stride), FFABS(s->plane[2].stride))) < 0)
2248  return AVERROR(ENOMEM);
2249 
2250  /* [DIRAC_STD] 11.1 Picture parse. picture_parse() */
2252  if (ret < 0)
2253  return ret;
2254 
2255  /* [DIRAC_STD] 13.0 Transform data syntax. transform_data() */
2257  if (ret < 0)
2258  return ret;
2259  }
2260  return 0;
2261 }
2262 
2263 static int dirac_decode_frame(AVCodecContext *avctx, AVFrame *picture,
2264  int *got_frame, AVPacket *pkt)
2265 {
2266  DiracContext *s = avctx->priv_data;
2267  const uint8_t *buf = pkt->data;
2268  int buf_size = pkt->size;
2269  int i, buf_idx = 0;
2270  int ret;
2271  unsigned data_unit_size;
2272 
2273  /* release unused frames */
2274  for (i = 0; i < MAX_FRAMES; i++)
2275  if (s->all_frames[i].avframe->data[0] && !s->all_frames[i].reference) {
2276  av_frame_unref(s->all_frames[i].avframe);
2277  memset(s->all_frames[i].interpolated, 0, sizeof(s->all_frames[i].interpolated));
2278  }
2279 
2280  s->current_picture = NULL;
2281  *got_frame = 0;
2282 
2283  /* end of stream, so flush delayed pics */
2284  if (buf_size == 0)
2285  return get_delayed_pic(s, picture, got_frame);
2286 
2287  for (;;) {
2288  /*[DIRAC_STD] Here starts the code from parse_info() defined in 9.6
2289  [DIRAC_STD] PARSE_INFO_PREFIX = "BBCD" as defined in ISO/IEC 646
2290  BBCD start code search */
2291  for (; buf_idx + DATA_UNIT_HEADER_SIZE < buf_size; buf_idx++) {
2292  if (buf[buf_idx ] == 'B' && buf[buf_idx+1] == 'B' &&
2293  buf[buf_idx+2] == 'C' && buf[buf_idx+3] == 'D')
2294  break;
2295  }
2296  /* BBCD found or end of data */
2297  if (buf_idx + DATA_UNIT_HEADER_SIZE >= buf_size)
2298  break;
2299 
2300  data_unit_size = AV_RB32(buf+buf_idx+5);
2301  if (data_unit_size > buf_size - buf_idx || !data_unit_size) {
2302  if(data_unit_size > buf_size - buf_idx)
2303  av_log(s->avctx, AV_LOG_ERROR,
2304  "Data unit with size %d is larger than input buffer, discarding\n",
2305  data_unit_size);
2306  buf_idx += 4;
2307  continue;
2308  }
2309  /* [DIRAC_STD] dirac_decode_data_unit makes reference to the while defined in 9.3 inside the function parse_sequence() */
2310  ret = dirac_decode_data_unit(avctx, buf+buf_idx, data_unit_size);
2311  if (ret < 0)
2312  {
2313  av_log(s->avctx, AV_LOG_ERROR,"Error in dirac_decode_data_unit\n");
2314  return ret;
2315  }
2316  buf_idx += data_unit_size;
2317  }
2318 
2319  if (!s->current_picture)
2320  return buf_size;
2321 
2322  if (s->current_picture->avframe->display_picture_number > s->frame_number) {
2323  DiracFrame *delayed_frame = remove_frame(s->delay_frames, s->frame_number);
2324 
2325  s->current_picture->reference |= DELAYED_PIC_REF;
2326 
2327  if (add_frame(s->delay_frames, MAX_DELAY, s->current_picture)) {
2328  int min_num = s->delay_frames[0]->avframe->display_picture_number;
2329  /* Too many delayed frames, so we display the frame with the lowest pts */
2330  av_log(avctx, AV_LOG_ERROR, "Delay frame overflow\n");
2331 
2332  for (i = 1; s->delay_frames[i]; i++)
2333  if (s->delay_frames[i]->avframe->display_picture_number < min_num)
2334  min_num = s->delay_frames[i]->avframe->display_picture_number;
2335 
2336  delayed_frame = remove_frame(s->delay_frames, min_num);
2337  add_frame(s->delay_frames, MAX_DELAY, s->current_picture);
2338  }
2339 
2340  if (delayed_frame) {
2341  delayed_frame->reference ^= DELAYED_PIC_REF;
2342  if((ret = av_frame_ref(picture, delayed_frame->avframe)) < 0)
2343  return ret;
2344  *got_frame = 1;
2345  }
2346  } else if (s->current_picture->avframe->display_picture_number == s->frame_number) {
2347  /* The right frame at the right time :-) */
2348  if((ret = av_frame_ref(picture, s->current_picture->avframe)) < 0)
2349  return ret;
2350  *got_frame = 1;
2351  }
2352 
2353  if (*got_frame)
2354  s->frame_number = picture->display_picture_number + 1LL;
2355 
2356  return buf_idx;
2357 }
2358 
2360  .p.name = "dirac",
2361  .p.long_name = NULL_IF_CONFIG_SMALL("BBC Dirac VC-2"),
2362  .p.type = AVMEDIA_TYPE_VIDEO,
2363  .p.id = AV_CODEC_ID_DIRAC,
2364  .priv_data_size = sizeof(DiracContext),
2366  .close = dirac_decode_end,
2369  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
2370  .flush = dirac_decode_flush,
2371 };
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:31
DWTPlane::buf
uint8_t * buf
Definition: dirac_dwt.h:41
DATA_UNIT_HEADER_SIZE
#define DATA_UNIT_HEADER_SIZE
Dirac Specification -> 9.6 Parse Info Header Syntax.
Definition: diracdec.c:2117
DiracContext::put_pixels_tab
void(* put_pixels_tab[4])(uint8_t *dst, const uint8_t *src[5], int stride, int h)
Definition: diracdec.c:229
DiracContext::blmotion
DiracBlock * blmotion
Definition: diracdec.c:218
av_dirac_parse_sequence_header
int av_dirac_parse_sequence_header(AVDiracSeqHeader **pdsh, const uint8_t *buf, size_t buf_size, void *log_ctx)
Parse a Dirac sequence header.
Definition: dirac.c:400
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:291
DiracContext::num_y
unsigned num_y
Definition: diracdec.c:175
level
uint8_t level
Definition: svq3.c:206
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:839
DiracContext::blwidth
int blwidth
Definition: diracdec.c:212
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
DiracVersionInfo
Definition: dirac.h:76
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:966
SliceCoeffs::left
int left
Definition: diracdec.c:815
mem_internal.h
out
FILE * out
Definition: movenc.c:54
EDGE_BOTTOM
#define EDGE_BOTTOM
Definition: mpegvideoencdsp.h:30
comp
static void comp(unsigned char *dst, ptrdiff_t dst_stride, unsigned char *src, ptrdiff_t src_stride, int add)
Definition: eamad.c:86
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:262
thread.h
DiracBlock::ref
uint8_t ref
Definition: diracdec.c:89
subband_hh
@ subband_hh
Definition: diracdec.c:247
CTX_MV_DATA
#define CTX_MV_DATA
Definition: dirac_arith.h:71
MAX_DWT_LEVELS
#define MAX_DWT_LEVELS
The spec limits the number of wavelet decompositions to 4 for both level 1 (VC-2) and 128 (long-gop d...
Definition: dirac.h:45
free_sequence_buffers
static void free_sequence_buffers(DiracContext *s)
Definition: diracdec.c:353
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:546
epel_weights
static const uint8_t epel_weights[4][4][4]
Definition: diracdec.c:1620
AV_CODEC_ID_DIRAC
@ AV_CODEC_ID_DIRAC
Definition: codec_id.h:166
dirac_decode_picture_header
static int dirac_decode_picture_header(DiracContext *s)
Dirac Specification -> 11.1.1 Picture Header.
Definition: diracdec.c:1998
SliceCoeffs::tot
int tot
Definition: diracdec.c:819
mv
static const int8_t mv[256][2]
Definition: 4xm.c:80
DiracContext::wavelet_idx
unsigned wavelet_idx
Definition: diracdec.c:165
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
dirac_unpack_prediction_parameters
static int dirac_unpack_prediction_parameters(DiracContext *s)
Unpack the motion compensation parameters Dirac Specification -> 11.2 Picture prediction data.
Definition: diracdec.c:1104
DIRAC_REF_MASK_REF1
#define DIRAC_REF_MASK_REF1
DiracBlock->ref flags, if set then the block does MC from the given ref.
Definition: diracdec.c:61
DiracContext::avctx
AVCodecContext * avctx
Definition: diracdec.c:137
get_interleaved_ue_golomb
static unsigned get_interleaved_ue_golomb(GetBitContext *gb)
Definition: golomb.h:143
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:111
AVDiracSeqHeader::color_trc
enum AVColorTransferCharacteristic color_trc
Definition: dirac.h:109
DiracVersionInfo::major
int major
Definition: dirac.h:77
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
pixdesc.h
SubBand::stride
int stride
Definition: diracdec.c:95
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
DWTPlane
Definition: dirac_dwt.h:37
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:959
w
uint8_t w
Definition: llviddspenc.c:38
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:374
SubBand::width
int width
Definition: cfhd.h:112
SubBand::level
int level
Definition: diracdec.c:93
DiracContext::biweight_func
dirac_biweight_func biweight_func
Definition: diracdec.c:233
CTX_SB_F1
#define CTX_SB_F1
Definition: dirac_arith.h:65
CTX_ZERO_BLOCK
@ CTX_ZERO_BLOCK
Definition: dirac_arith.h:54
b
#define b
Definition: input.c:34
DiracContext::perspective_exp
unsigned perspective_exp
Definition: diracdec.c:204
DiracContext::bit_depth
int bit_depth
Definition: diracdec.c:150
decode_lowdelay_slice
static int decode_lowdelay_slice(AVCodecContext *avctx, void *arg)
Dirac Specification -> 13.5.2 Slices.
Definition: diracdec.c:776
FFCodec
Definition: codec_internal.h:112
DiracContext::mpvencdsp
MpegvideoEncDSPContext mpvencdsp
Definition: diracdec.c:138
dirac_biweight_func
void(* dirac_biweight_func)(uint8_t *dst, const uint8_t *src, int stride, int log2_denom, int weightd, int weights, int h)
Definition: diracdsp.h:28
init_planes
static void init_planes(DiracContext *s)
Definition: diracdec.c:1050
dirac_dwt.h
DIRAC_REF_MASK_GLOBAL
#define DIRAC_REF_MASK_GLOBAL
Definition: diracdec.c:63
dirac_arith_init
static AVOnce dirac_arith_init
Definition: diracdec.c:385
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
DiracContext::delay_frames
DiracFrame * delay_frames[MAX_DELAY+1]
Definition: diracdec.c:239
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
ff_dirac_qscale_tab
const int32_t ff_dirac_qscale_tab[116]
Definition: diractab.c:34
AVDiracSeqHeader::color_range
enum AVColorRange color_range
Definition: dirac.h:107
MAX_DELAY
#define MAX_DELAY
Definition: diracdec.c:53
DiracArith
Definition: dirac_arith.h:75
dirac_get_arith_int
static int dirac_get_arith_int(DiracArith *c, int follow_ctx, int data_ctx)
Definition: dirac_arith.h:190
codeblock
static int codeblock(DiracContext *s, SubBand *b, GetBitContext *gb, DiracArith *c, int left, int right, int top, int bottom, int blockcnt_one, int is_arith)
Decode the coeffs in the rectangle defined by left, right, top, bottom [DIRAC_STD] 13....
Definition: diracdec.c:489
CHECKEDREAD
#define CHECKEDREAD(dst, cond, errmsg)
DiracContext::mcscratch
uint8_t * mcscratch
Definition: diracdec.c:224
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:649
AVDiracSeqHeader::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: dirac.h:104
DiracContext::current_picture
DiracFrame * current_picture
Definition: diracdec.c:235
alloc_buffers
static int alloc_buffers(DiracContext *s, int stride)
Definition: diracdec.c:324
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
DiracContext::zrs
int zrs[2][2]
Definition: diracdec.c:201
init
static int init
Definition: av_tx.c:47
diractab.h
ff_dirac_default_qmat
const uint8_t ff_dirac_default_qmat[7][4][4]
Definition: diractab.c:24
A
#define A(x)
Definition: vp56_arith.h:28
DiracFrame
Definition: diracdec.c:76
CTX_DC_F1
#define CTX_DC_F1
Definition: dirac_arith.h:72
ff_mpegvideoencdsp_init
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
Definition: mpegvideoencdsp.c:232
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:1732
golomb.h
exp golomb vlc stuff
decode_subband_arith
static int decode_subband_arith(AVCodecContext *avctx, void *b)
Definition: diracdec.c:641
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
AVDiracSeqHeader::level
int level
Definition: dirac.h:101
SubBand::parent
struct SubBand * parent
Definition: diracdec.c:101
subband_lh
@ subband_lh
Definition: diracdec.c:246
DiracContext::ld_picture
int ld_picture
Definition: diracdec.c:158
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:116
b1
static double b1(void *priv, double x, double y)
Definition: vf_xfade.c:1771
DiracContext::edge_emu_buffer
uint8_t * edge_emu_buffer[4]
Definition: diracdec.c:220
AVDiracSeqHeader::version
DiracVersionInfo version
Definition: dirac.h:112
DiracContext::num_refs
int num_refs
Definition: diracdec.c:161
ff_spatial_idwt_init
int ff_spatial_idwt_init(DWTContext *d, DWTPlane *p, enum dwt_type type, int decomposition_count, int bit_depth)
Definition: dirac_dwt.c:35
U
#define U(x)
Definition: vp56_arith.h:37
DiracContext::sbheight
int sbheight
Definition: diracdec.c:215
AVCodecContext::thread_count
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:1463
FFSIGN
#define FFSIGN(a)
Definition: common.h:65
GetBitContext
Definition: get_bits.h:61
DiracContext::blheight
int blheight
Definition: diracdec.c:213
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:417
val
static double val(void *priv, double ch)
Definition: aeval.c:77
dirac_unpack_block_motion_data
static int dirac_unpack_block_motion_data(DiracContext *s)
Dirac Specification ->
Definition: diracdec.c:1502
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2690
decode_component
static int decode_component(DiracContext *s, int comp)
Dirac Specification -> [DIRAC_STD] 13.4.1 core_transform_data()
Definition: diracdec.c:658
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
DiracSlice::gb
GetBitContext gb
Definition: diracdec.c:130
pred_sbsplit
static int pred_sbsplit(uint8_t *sbsplit, int stride, int x, int y)
Definition: diracdec.c:1328
pred_block_dc
static void pred_block_dc(DiracBlock *block, int stride, int x, int y)
Definition: diracdec.c:1358
quant
static int quant(float coef, const float Q, const float rounding)
Quantize one coefficient.
Definition: aacenc_utils.h:59
dirac.h
select_dsp_funcs
static void select_dsp_funcs(DiracContext *s, int width, int height, int xblen, int yblen)
Definition: diracdec.c:1810
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:99
diracdsp.h
DiracContext::hq_picture
int hq_picture
Definition: diracdec.c:157
DiracSlice::bytes
int bytes
Definition: diracdec.c:133
dirac_weight_func
void(* dirac_weight_func)(uint8_t *block, int stride, int log2_denom, int weight, int h)
Definition: diracdsp.h:27
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:952
DiracContext::perspective
int perspective[2]
Definition: diracdec.c:202
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:179
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
AVDiracSeqHeader::bit_depth
int bit_depth
Definition: dirac.h:113
av_cold
#define av_cold
Definition: attributes.h:90
DiracContext::sbwidth
int sbwidth
Definition: diracdec.c:214
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:667
coeff_unpack_golomb
static int coeff_unpack_golomb(GetBitContext *gb, int qfactor, int qoffset)
Definition: diracdec.c:441
DiracContext::chroma_y_shift
int chroma_y_shift
Definition: diracdec.c:148
mask
static const uint16_t mask[17]
Definition: lzw.c:38
ff_dirac_decoder
const FFCodec ff_dirac_decoder
Definition: diracdec.c:2359
ROLLOFF
#define ROLLOFF(i)
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:254
s
#define s(width, name)
Definition: cbs_vp9.c:256
DiracSlice::slice_x
int slice_x
Definition: diracdec.c:131
DiracContext::zero_res
int zero_res
Definition: diracdec.c:153
DiracContext::mctmp
uint16_t * mctmp
Definition: diracdec.c:223
Plane::xbsep
uint8_t xbsep
Definition: diracdec.c:119
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:367
MAX_REFERENCE_FRAMES
#define MAX_REFERENCE_FRAMES
The spec limits this to 3 for frame coding, but in practice can be as high as 6.
Definition: diracdec.c:52
ff_dirac_init_arith_decoder
void ff_dirac_init_arith_decoder(DiracArith *c, GetBitContext *gb, int length)
Definition: dirac_arith.c:96
decode_subband_internal
static av_always_inline int decode_subband_internal(DiracContext *s, SubBand *b, int is_arith)
Dirac Specification -> 13.4.2 Non-skipped subbands.
Definition: diracdec.c:599
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AVDiracSeqHeader::profile
int profile
Definition: dirac.h:100
CTX_DELTA_Q_F
@ CTX_DELTA_Q_F
Definition: dirac_arith.h:55
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
DiracContext::version
DiracVersionInfo version
Definition: diracdec.c:141
get_bits.h
DiracContext::size_scaler
uint64_t size_scaler
Definition: diracdec.c:196
DWTPlane::stride
int stride
Definition: dirac_dwt.h:40
AVCodecContext::max_pixels
int64_t max_pixels
The number of pixels per image to maximally accept.
Definition: avcodec.h:1908
Plane::idwt
DWTPlane idwt
Definition: diracdec.c:109
bands
static const float bands[]
Definition: af_superequalizer.c:56
DiracContext::seen_sequence_header
int seen_sequence_header
Definition: diracdec.c:144
DiracContext::diracdsp
DiracDSPContext diracdsp
Definition: diracdec.c:140
arg
const char * arg
Definition: jacosubdec.c:67
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:64
DiracContext::globalmc
struct DiracContext::@55 globalmc[2]
if
if(ret)
Definition: filter_design.txt:179
DiracContext::weight
int16_t weight[2]
Definition: diracdec.c:209
AVDiracSeqHeader::framerate
AVRational framerate
Definition: dirac.h:103
av_realloc_f
#define av_realloc_f(p, o, n)
Definition: tableprint_vlc.h:32
DiracContext::slice_params_buf
DiracSlice * slice_params_buf
Definition: diracdec.c:181
DiracContext::edge_emu_buffer_base
uint8_t * edge_emu_buffer_base
Definition: diracdec.c:221
dirac_get_arith_bit
static int dirac_get_arith_bit(DiracArith *c, int ctx)
Definition: dirac_arith.h:133
mc_row
static void mc_row(DiracContext *s, DiracBlock *block, uint16_t *mctmp, int plane, int dsty)
Definition: diracdec.c:1794
ff_dirac_qoffset_inter_tab
const int ff_dirac_qoffset_inter_tab[122]
Definition: diractab.c:72
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:177
DIVRNDUP
#define DIVRNDUP(a, b)
Definition: diracdec.c:74
decode_hq_slice_row
static int decode_hq_slice_row(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
Definition: diracdec.c:917
DiracContext::weight_func
dirac_weight_func weight_func
Definition: diracdec.c:232
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
dirac_decode_frame_internal
static int dirac_decode_frame_internal(DiracContext *s)
Dirac Specification -> 13.0 Transform data syntax.
Definition: diracdec.c:1871
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:973
SliceCoeffs::tot_v
int tot_v
Definition: diracdec.c:818
decode_lowdelay
static int decode_lowdelay(DiracContext *s)
Dirac Specification -> 13.5.1 low_delay_transform_data()
Definition: diracdec.c:932
EDGE_WIDTH
#define EDGE_WIDTH
Definition: mpegpicture.h:34
DiracContext::dc_prediction
int dc_prediction
Definition: diracdec.c:159
DiracContext::wavelet_depth
unsigned wavelet_depth
Definition: diracdec.c:164
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AVDiracSeqHeader::colorspace
enum AVColorSpace colorspace
Definition: dirac.h:110
DiracSlice::slice_y
int slice_y
Definition: diracdec.c:132
DiracContext::ref_frames
DiracFrame * ref_frames[MAX_REFERENCE_FRAMES+1]
Definition: diracdec.c:238
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
DiracContext::old_delta_quant
unsigned old_delta_quant
schroedinger older than 1.0.8 doesn't store quant delta if only one codebook exists in a band
Definition: diracdec.c:171
dirac_get_arith_uint
static int dirac_get_arith_uint(DiracArith *c, int follow_ctx, int data_ctx)
Definition: dirac_arith.h:174
CTX_MV_F1
#define CTX_MV_F1
Definition: dirac_arith.h:70
DIRAC_MAX_QUANT_INDEX
#define DIRAC_MAX_QUANT_INDEX
Definition: diractab.h:41
DIRAC_PCODE_AUX
@ DIRAC_PCODE_AUX
Definition: dirac.h:60
AVCodecContext::level
int level
level
Definition: avcodec.h:1673
DiracContext::thread_buf_size
int thread_buf_size
Definition: diracdec.c:179
subband_ll
@ subband_ll
Definition: diracdec.c:244
AVOnce
#define AVOnce
Definition: thread.h:176
DiracContext::is_arith
int is_arith
Definition: diracdec.c:154
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
DiracContext::width
unsigned width
Definition: diracdec.c:185
weight
static int weight(int i, int blen, int offset)
Definition: diracdec.c:1562
ff_spatial_idwt_slice2
void ff_spatial_idwt_slice2(DWTContext *d, int y)
Definition: dirac_dwt.c:68
dirac_subband
dirac_subband
Definition: diracdec.c:243
DELAYED_PIC_REF
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output.
Definition: diracdec.c:69
add_frame
static int add_frame(DiracFrame *framelist[], int maxframes, DiracFrame *frame)
Definition: diracdec.c:275
INTRA_DC_PRED
#define INTRA_DC_PRED(n, type)
Dirac Specification -> 13.3 intra_dc_prediction(band)
Definition: diracdec.c:571
f
f
Definition: af_crystalizer.c:122
dirac_decode_end
static av_cold int dirac_decode_end(AVCodecContext *avctx)
Definition: diracdec.c:426
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:422
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1403
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
DiracContext::highquality
struct DiracContext::@54 highquality
Plane::yoffset
uint8_t yoffset
Definition: diracdec.c:123
Plane::yblen
uint8_t yblen
Definition: diracdec.c:117
AVPacket::size
int size
Definition: packet.h:375
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:343
codec_internal.h
Plane::height
int height
Definition: cfhd.h:120
AV_WN32
#define AV_WN32(p, v)
Definition: intreadwrite.h:376
DiracContext::gb
GetBitContext gb
Definition: diracdec.c:142
init_obmc_weight_row
static void init_obmc_weight_row(Plane *p, uint8_t *obmc_weight, int stride, int left, int right, int wy)
Definition: diracdec.c:1574
DiracContext::codeblock_mode
unsigned codeblock_mode
Definition: diracdec.c:172
size
int size
Definition: twinvq_data.h:10344
DiracContext::chroma_x_shift
int chroma_x_shift
Definition: diracdec.c:147
SubBand::length
unsigned length
Definition: diracdec.c:104
DiracContext::seq
AVDiracSeqHeader seq
Definition: diracdec.c:143
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
DiracContext::bytes
AVRational bytes
Definition: diracdec.c:190
dirac_vlc.h
DiracContext::weight_log2denom
unsigned weight_log2denom
Definition: diracdec.c:210
SubBand
Definition: cfhd.h:109
DiracContext::thread_buf
uint8_t * thread_buf
Definition: diracdec.c:177
split
static char * split(char *message, char delim)
Definition: af_channelmap.c:81
height
#define height
b2
static double b2(void *priv, double x, double y)
Definition: vf_xfade.c:1772
Plane::width
int width
Definition: cfhd.h:119
mpegpicture.h
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:117
dirac_get_se_golomb
static int dirac_get_se_golomb(GetBitContext *gb)
Definition: golomb.h:359
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
add_dc
static void add_dc(uint16_t *dst, int dc, int stride, uint8_t *obmc_weight, int xblen, int yblen)
Definition: diracdec.c:1742
DIRAC_PCODE_SEQ_HEADER
@ DIRAC_PCODE_SEQ_HEADER
Definition: dirac.h:58
get_buffer_with_edge
static int get_buffer_with_edge(AVCodecContext *avctx, AVFrame *f, int flags)
Definition: diracdec.c:1968
pred_block_mode
static int pred_block_mode(DiracBlock *block, int stride, int x, int y, int refmask)
Definition: diracdec.c:1342
decode_hq_slice
static int decode_hq_slice(DiracContext *s, DiracSlice *slice, uint8_t *tmp_buf)
VC-2 Specification -> 13.5.3 hq_slice(sx,sy)
Definition: diracdec.c:843
subband_coeffs
static int subband_coeffs(DiracContext *s, int x, int y, int p, SliceCoeffs c[MAX_DWT_LEVELS])
Definition: diracdec.c:822
subband_nb
@ subband_nb
Definition: diracdec.c:248
init_obmc_weights
static void init_obmc_weights(DiracContext *s, Plane *p, int by)
Definition: diracdec.c:1607
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem.h:116
Plane::stride
ptrdiff_t stride
Definition: cfhd.h:121
DiracContext::num_x
unsigned num_x
Definition: diracdec.c:174
DiracContext::prefix_bytes
unsigned prefix_bytes
Definition: diracdec.c:195
MpegvideoEncDSPContext
Definition: mpegvideoencdsp.h:32
DiracBlock
Definition: diracdec.c:84
dirac_decode_frame
static int dirac_decode_frame(AVCodecContext *avctx, AVFrame *picture, int *got_frame, AVPacket *pkt)
Definition: diracdec.c:2263
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
DiracContext::low_delay
int low_delay
Definition: diracdec.c:156
pred_mv
static void pred_mv(DiracBlock *block, int stride, int x, int y, int ref)
Definition: diracdec.c:1391
UNPACK_ARITH
#define UNPACK_ARITH(n, type)
Definition: diracdec.c:452
alloc_sequence_buffers
static int alloc_sequence_buffers(DiracContext *s)
Definition: diracdec.c:286
DiracDSPContext
Definition: diracdsp.h:30
DIRAC_REF_MASK_REF2
#define DIRAC_REF_MASK_REF2
Definition: diracdec.c:62
PARSE_VALUES
#define PARSE_VALUES(type, x, gb, ebits, buf1, buf2)
Definition: diracdec.c:712
decode_block_params
static void decode_block_params(DiracContext *s, DiracArith arith[8], DiracBlock *block, int stride, int x, int y)
Definition: diracdec.c:1443
ff_dirac_golomb_read_16bit
int ff_dirac_golomb_read_16bit(const uint8_t *buf, int bytes, uint8_t *_dst, int coeffs)
Definition: dirac_vlc.c:1095
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
SubBand::ibuf
uint8_t * ibuf
Definition: diracdec.c:100
DiracContext::globalmc_flag
int globalmc_flag
Definition: diracdec.c:160
CTX_PMODE_REF2
#define CTX_PMODE_REF2
Definition: dirac_arith.h:68
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
DiracFrame::avframe
AVFrame * avframe
Definition: diracdec.c:77
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: codec_internal.h:31
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:477
dirac_arith.h
ff_dirac_init_arith_tables
av_cold void ff_dirac_init_arith_tables(void)
Definition: dirac_arith.c:86
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:203
MAX_BLOCKSIZE
#define MAX_BLOCKSIZE
Definition: diracdec.c:56
mc_subpel
static int mc_subpel(DiracContext *s, DiracBlock *block, const uint8_t *src[5], int x, int y, int ref, int plane)
For block x,y, determine which of the hpel planes to do bilinear interpolation from and set src[] to ...
Definition: diracdec.c:1647
AVCodecContext::height
int height
Definition: avcodec.h:562
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:599
DiracSlice
Definition: diracdec.c:129
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:272
interpolate_refplane
static int interpolate_refplane(DiracContext *s, DiracFrame *ref, int plane, int width, int height)
Definition: diracdec.c:1830
DWTContext
Definition: dirac_dwt.h:54
SliceCoeffs::tot_h
int tot_h
Definition: diracdec.c:817
DiracContext::core_syntax
int core_syntax
Definition: diracdec.c:155
DiracVersionInfo::minor
int minor
Definition: dirac.h:78
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
DiracContext::mv_precision
uint8_t mv_precision
Definition: diracdec.c:208
DiracContext::height
unsigned height
Definition: diracdec.c:186
SubBand::coeff_data
const uint8_t * coeff_data
Definition: diracdec.c:105
AVDiracSeqHeader
Definition: dirac.h:81
mid_pred
#define mid_pred
Definition: mathops.h:97
SliceCoeffs::top
int top
Definition: diracdec.c:816
ret
ret
Definition: filter_design.txt:187
pred
static const float pred[4]
Definition: siprdata.h:259
Plane::xoffset
uint8_t xoffset
Definition: diracdec.c:122
SliceCoeffs
Definition: diracdec.c:814
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
SubBand::pshift
int pshift
Definition: diracdec.c:98
DiracContext::pan_tilt
int pan_tilt[2]
Definition: diracdec.c:200
SubBand::quant
int quant
Definition: diracdec.c:99
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:683
remove_frame
static DiracFrame * remove_frame(DiracFrame *framelist[], int picnum)
Definition: diracdec.c:257
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
mpeg12data.h
ff_dirac_golomb_read_32bit
int ff_dirac_golomb_read_32bit(const uint8_t *buf, int bytes, uint8_t *_dst, int coeffs)
Definition: dirac_vlc.c:1115
DIRAC_PCODE_END_SEQ
@ DIRAC_PCODE_END_SEQ
Definition: dirac.h:59
ff_set_sar
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:105
AVDiracSeqHeader::pix_fmt
enum AVPixelFormat pix_fmt
Definition: dirac.h:106
decode_subband_golomb
static int decode_subband_golomb(AVCodecContext *avctx, void *arg)
Definition: diracdec.c:647
DiracContext
Definition: diracdec.c:136
ff_dirac_qoffset_intra_tab
const int32_t ff_dirac_qoffset_intra_tab[120]
Definition: diractab.c:53
CTX_SB_DATA
#define CTX_SB_DATA
Definition: dirac_arith.h:66
DiracContext::add_obmc
void(* add_obmc)(uint16_t *dst, const uint8_t *src, int stride, const uint8_t *obmc_weight, int yblen)
Definition: diracdec.c:231
AVCodecContext
main external API structure.
Definition: avcodec.h:389
EDGE_TOP
#define EDGE_TOP
Definition: mpegvideoencdsp.h:29
AVCodecContext::execute
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:1514
subband_hl
@ subband_hl
Definition: diracdec.c:245
DiracContext::all_frames
DiracFrame all_frames[MAX_FRAMES]
Definition: diracdec.c:240
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1547
mpegvideoencdsp.h
CALC_PADDING
#define CALC_PADDING(size, depth)
Definition: diracdec.c:71
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
DiracContext::threads_num_buf
int threads_num_buf
Definition: diracdec.c:178
DiracContext::vdsp
VideoDSPContext vdsp
Definition: diracdec.c:139
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:82
CTX_GLOBAL_BLOCK
#define CTX_GLOBAL_BLOCK
Definition: dirac_arith.h:69
Plane
Definition: cfhd.h:118
DiracFrame::reference
int reference
Definition: diracdec.c:81
divide3
static int divide3(int x)
Definition: diracdec.c:252
VideoDSPContext
Definition: videodsp.h:40
Plane::xblen
uint8_t xblen
Definition: diracdec.c:116
DiracContext::quant
uint8_t quant[MAX_DWT_LEVELS][4]
Definition: diracdec.c:191
DiracContext::plane
Plane plane[3]
Definition: diracdec.c:146
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:90
Plane::band
SubBand band[DWT_LEVELS_3D][4]
Definition: cfhd.h:131
propagate_block_data
static void propagate_block_data(DiracBlock *block, int stride, int size)
Copies the current block to the other blocks covered by the current superblock split mode.
Definition: diracdec.c:1483
DiracContext::buffer_stride
int buffer_stride
Definition: diracdec.c:225
DiracContext::slice_params_num_buf
int slice_params_num_buf
Definition: diracdec.c:182
Plane::ybsep
uint8_t ybsep
Definition: diracdec.c:120
AVDiracSeqHeader::width
unsigned width
Definition: dirac.h:82
CTX_PMODE_REF1
#define CTX_PMODE_REF1
Definition: dirac_arith.h:67
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AVPacket
This structure stores compressed data.
Definition: packet.h:351
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:416
global_mv
static void global_mv(DiracContext *s, DiracBlock *block, int x, int y, int ref)
Definition: diracdec.c:1427
decode_subband
static void decode_subband(DiracContext *s, GetBitContext *gb, int quant, int slice_x, int slice_y, int bits_end, SubBand *b1, SubBand *b2)
Definition: diracdec.c:724
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
DiracContext::avg_pixels_tab
void(* avg_pixels_tab[4])(uint8_t *dst, const uint8_t *src[5], int stride, int h)
Definition: diracdec.c:230
videodsp.h
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
DiracContext::ref_pics
DiracFrame * ref_pics[2]
Definition: diracdec.c:236
d
d
Definition: ffmpeg_filter.c:153
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:562
int32_t
int32_t
Definition: audioconvert.c:56
bytestream.h
DiracContext::sbsplit
uint8_t * sbsplit
Definition: diracdec.c:217
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:370
AVDiracSeqHeader::height
unsigned height
Definition: dirac.h:83
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:78
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
CTX_DELTA_Q_DATA
@ CTX_DELTA_Q_DATA
Definition: dirac_arith.h:56
dirac_decode_data_unit
static int dirac_decode_data_unit(AVCodecContext *avctx, const uint8_t *buf, int size)
Definition: diracdec.c:2121
DiracContext::lowdelay
struct DiracContext::@53 lowdelay
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
dirac_decode_init
static av_cold int dirac_decode_init(AVCodecContext *avctx)
Definition: diracdec.c:387
h
h
Definition: vp9dsp_template.c:2038
ff_diracdsp_init
av_cold void ff_diracdsp_init(DiracDSPContext *c)
Definition: diracdsp.c:221
dirac_unpack_idwt_params
static int dirac_unpack_idwt_params(DiracContext *s)
Dirac Specification -> 11.3 Wavelet transform data.
Definition: diracdec.c:1232
DiracContext::zrs_exp
unsigned zrs_exp
Definition: diracdec.c:203
DiracContext::obmc_weight
uint8_t obmc_weight[3][MAX_BLOCKSIZE *MAX_BLOCKSIZE]
Definition: diracdec.c:227
DWTPlane::width
int width
Definition: dirac_dwt.h:38
dirac_decode_flush
static void dirac_decode_flush(AVCodecContext *avctx)
Definition: diracdec.c:418
int
int
Definition: ffmpeg_filter.c:153
AVFrame::display_picture_number
int display_picture_number
picture number in display order
Definition: frame.h:456
DiracContext::frame_number
int64_t frame_number
Definition: diracdec.c:145
DiracArith::error
int error
Definition: dirac_arith.h:84
AVDiracSeqHeader::color_primaries
enum AVColorPrimaries color_primaries
Definition: dirac.h:108
AVCodecContext::execute2
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
Definition: avcodec.h:1533
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
CTX_DC_DATA
#define CTX_DC_DATA
Definition: dirac_arith.h:73
DiracContext::codeblock
struct DiracContext::@52 codeblock[MAX_DWT_LEVELS+1]
get_delayed_pic
static int get_delayed_pic(DiracContext *s, AVFrame *picture, int *got_frame)
Definition: diracdec.c:2086
SubBand::height
int height
Definition: cfhd.h:114
DiracContext::pshift
int pshift
Definition: diracdec.c:151
MAX_FRAMES
#define MAX_FRAMES
Definition: diracdec.c:54
SubBand::orientation
int orientation
Definition: diracdec.c:94
init_obmc_weight
static void init_obmc_weight(Plane *p, uint8_t *obmc_weight, int stride, int left, int right, int top, int bottom)
Definition: diracdec.c:1588
block_mc
static void block_mc(DiracContext *s, DiracBlock *block, uint16_t *mctmp, uint8_t *obmc_weight, int plane, int dstx, int dsty)
Definition: diracdec.c:1758
DWTPlane::height
int height
Definition: dirac_dwt.h:39
AV_WN16
#define AV_WN16(p, v)
Definition: intreadwrite.h:372