FFmpeg
diracdec.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2007 Marco Gerards <marco@gnu.org>
3  * Copyright (C) 2009 David Conrad
4  * Copyright (C) 2011 Jordi Ortiz
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * Dirac Decoder
26  * @author Marco Gerards <marco@gnu.org>, David Conrad, Jordi Ortiz <nenjordi@gmail.com>
27  */
28 
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/thread.h"
31 #include "avcodec.h"
32 #include "get_bits.h"
33 #include "bytestream.h"
34 #include "internal.h"
35 #include "golomb.h"
36 #include "dirac_arith.h"
37 #include "dirac_vlc.h"
38 #include "mpeg12data.h"
39 #include "libavcodec/mpegvideo.h"
40 #include "mpegvideoencdsp.h"
41 #include "dirac_dwt.h"
42 #include "dirac.h"
43 #include "diractab.h"
44 #include "diracdsp.h"
45 #include "videodsp.h"
46 
47 /**
48  * The spec limits this to 3 for frame coding, but in practice can be as high as 6
49  */
50 #define MAX_REFERENCE_FRAMES 8
51 #define MAX_DELAY 5 /* limit for main profile for frame coding (TODO: field coding) */
52 #define MAX_FRAMES (MAX_REFERENCE_FRAMES + MAX_DELAY + 1)
53 #define MAX_QUANT 255 /* max quant for VC-2 */
54 #define MAX_BLOCKSIZE 32 /* maximum xblen/yblen we support */
55 
56 /**
57  * DiracBlock->ref flags, if set then the block does MC from the given ref
58  */
59 #define DIRAC_REF_MASK_REF1 1
60 #define DIRAC_REF_MASK_REF2 2
61 #define DIRAC_REF_MASK_GLOBAL 4
62 
63 /**
64  * Value of Picture.reference when Picture is not a reference picture, but
65  * is held for delayed output.
66  */
67 #define DELAYED_PIC_REF 4
68 
69 #define CALC_PADDING(size, depth) \
70  (((size + (1 << depth) - 1) >> depth) << depth)
71 
72 #define DIVRNDUP(a, b) (((a) + (b) - 1) / (b))
73 
74 typedef struct {
76  int interpolated[3]; /* 1 if hpel[] is valid */
77  uint8_t *hpel[3][4];
78  uint8_t *hpel_base[3][4];
79  int reference;
80 } DiracFrame;
81 
82 typedef struct {
83  union {
84  int16_t mv[2][2];
85  int16_t dc[3];
86  } u; /* anonymous unions aren't in C99 :( */
88 } DiracBlock;
89 
90 typedef struct SubBand {
91  int level;
92  int orientation;
93  int stride; /* in bytes */
94  int width;
95  int height;
96  int pshift;
97  int quant;
98  uint8_t *ibuf;
99  struct SubBand *parent;
100 
101  /* for low delay */
102  unsigned length;
104 } SubBand;
105 
106 typedef struct Plane {
108 
109  int width;
110  int height;
111  ptrdiff_t stride;
112 
113  /* block length */
116  /* block separation (block n+1 starts after this many pixels in block n) */
119  /* amount of overspill on each edge (half of the overlap between blocks) */
122 
124 } Plane;
125 
126 /* Used by Low Delay and High Quality profiles */
127 typedef struct DiracSlice {
129  int slice_x;
130  int slice_y;
131  int bytes;
132 } DiracSlice;
133 
134 typedef struct DiracContext {
144  int64_t frame_number; /* number of the next frame to display */
148 
149  int bit_depth; /* bit depth */
150  int pshift; /* pixel shift = bit_depth > 8 */
151 
152  int zero_res; /* zero residue flag */
153  int is_arith; /* whether coeffs use arith or golomb coding */
154  int core_syntax; /* use core syntax only */
155  int low_delay; /* use the low delay syntax */
156  int hq_picture; /* high quality picture, enables low_delay */
157  int ld_picture; /* use low delay picture, turns on low_delay */
158  int dc_prediction; /* has dc prediction */
159  int globalmc_flag; /* use global motion compensation */
160  int num_refs; /* number of reference pictures */
161 
162  /* wavelet decoding */
163  unsigned wavelet_depth; /* depth of the IDWT */
164  unsigned wavelet_idx;
165 
166  /**
167  * schroedinger older than 1.0.8 doesn't store
168  * quant delta if only one codebook exists in a band
169  */
170  unsigned old_delta_quant;
171  unsigned codeblock_mode;
172 
173  unsigned num_x; /* number of horizontal slices */
174  unsigned num_y; /* number of vertical slices */
175 
176  uint8_t *thread_buf; /* Per-thread buffer for coefficient storage */
177  int threads_num_buf; /* Current # of buffers allocated */
178  int thread_buf_size; /* Each thread has a buffer this size */
179 
182 
183  struct {
184  unsigned width;
185  unsigned height;
187 
188  struct {
189  AVRational bytes; /* average bytes per slice */
190  uint8_t quant[MAX_DWT_LEVELS][4]; /* [DIRAC_STD] E.1 */
191  } lowdelay;
192 
193  struct {
194  unsigned prefix_bytes;
195  uint64_t size_scaler;
196  } highquality;
197 
198  struct {
199  int pan_tilt[2]; /* pan/tilt vector */
200  int zrs[2][2]; /* zoom/rotate/shear matrix */
201  int perspective[2]; /* perspective vector */
202  unsigned zrs_exp;
203  unsigned perspective_exp;
204  } globalmc[2];
205 
206  /* motion compensation */
207  uint8_t mv_precision; /* [DIRAC_STD] REFS_WT_PRECISION */
208  int16_t weight[2]; /* [DIRAC_STD] REF1_WT and REF2_WT */
209  unsigned weight_log2denom; /* [DIRAC_STD] REFS_WT_PRECISION */
210 
211  int blwidth; /* number of blocks (horizontally) */
212  int blheight; /* number of blocks (vertically) */
213  int sbwidth; /* number of superblocks (horizontally) */
214  int sbheight; /* number of superblocks (vertically) */
215 
218 
221 
222  uint16_t *mctmp; /* buffer holding the MC data multiplied by OBMC weights */
225 
227 
228  void (*put_pixels_tab[4])(uint8_t *dst, const uint8_t *src[5], int stride, int h);
229  void (*avg_pixels_tab[4])(uint8_t *dst, const uint8_t *src[5], int stride, int h);
230  void (*add_obmc)(uint16_t *dst, const uint8_t *src, int stride, const uint8_t *obmc_weight, int yblen);
233 
236 
240 } DiracContext;
241 
248 };
249 
250 /* magic number division by 3 from schroedinger */
251 static inline int divide3(int x)
252 {
253  return (int)((x+1U)*21845 + 10922) >> 16;
254 }
255 
256 static DiracFrame *remove_frame(DiracFrame *framelist[], int picnum)
257 {
258  DiracFrame *remove_pic = NULL;
259  int i, remove_idx = -1;
260 
261  for (i = 0; framelist[i]; i++)
262  if (framelist[i]->avframe->display_picture_number == picnum) {
263  remove_pic = framelist[i];
264  remove_idx = i;
265  }
266 
267  if (remove_pic)
268  for (i = remove_idx; framelist[i]; i++)
269  framelist[i] = framelist[i+1];
270 
271  return remove_pic;
272 }
273 
274 static int add_frame(DiracFrame *framelist[], int maxframes, DiracFrame *frame)
275 {
276  int i;
277  for (i = 0; i < maxframes; i++)
278  if (!framelist[i]) {
279  framelist[i] = frame;
280  return 0;
281  }
282  return -1;
283 }
284 
286 {
287  int sbwidth = DIVRNDUP(s->seq.width, 4);
288  int sbheight = DIVRNDUP(s->seq.height, 4);
289  int i, w, h, top_padding;
290 
291  /* todo: think more about this / use or set Plane here */
292  for (i = 0; i < 3; i++) {
293  int max_xblen = MAX_BLOCKSIZE >> (i ? s->chroma_x_shift : 0);
294  int max_yblen = MAX_BLOCKSIZE >> (i ? s->chroma_y_shift : 0);
295  w = s->seq.width >> (i ? s->chroma_x_shift : 0);
296  h = s->seq.height >> (i ? s->chroma_y_shift : 0);
297 
298  /* we allocate the max we support here since num decompositions can
299  * change from frame to frame. Stride is aligned to 16 for SIMD, and
300  * 1<<MAX_DWT_LEVELS top padding to avoid if(y>0) in arith decoding
301  * MAX_BLOCKSIZE padding for MC: blocks can spill up to half of that
302  * on each side */
303  top_padding = FFMAX(1<<MAX_DWT_LEVELS, max_yblen/2);
304  w = FFALIGN(CALC_PADDING(w, MAX_DWT_LEVELS), 8); /* FIXME: Should this be 16 for SSE??? */
305  h = top_padding + CALC_PADDING(h, MAX_DWT_LEVELS) + max_yblen/2;
306 
307  s->plane[i].idwt.buf_base = av_mallocz_array((w+max_xblen), h * (2 << s->pshift));
308  s->plane[i].idwt.tmp = av_malloc_array((w+16), 2 << s->pshift);
309  s->plane[i].idwt.buf = s->plane[i].idwt.buf_base + (top_padding*w)*(2 << s->pshift);
310  if (!s->plane[i].idwt.buf_base || !s->plane[i].idwt.tmp)
311  return AVERROR(ENOMEM);
312  }
313 
314  /* fixme: allocate using real stride here */
315  s->sbsplit = av_malloc_array(sbwidth, sbheight);
316  s->blmotion = av_malloc_array(sbwidth, sbheight * 16 * sizeof(*s->blmotion));
317 
318  if (!s->sbsplit || !s->blmotion)
319  return AVERROR(ENOMEM);
320  return 0;
321 }
322 
324 {
325  int w = s->seq.width;
326  int h = s->seq.height;
327 
328  av_assert0(stride >= w);
329  stride += 64;
330 
331  if (s->buffer_stride >= stride)
332  return 0;
333  s->buffer_stride = 0;
334 
335  av_freep(&s->edge_emu_buffer_base);
336  memset(s->edge_emu_buffer, 0, sizeof(s->edge_emu_buffer));
337  av_freep(&s->mctmp);
338  av_freep(&s->mcscratch);
339 
340  s->edge_emu_buffer_base = av_malloc_array(stride, MAX_BLOCKSIZE);
341 
342  s->mctmp = av_malloc_array((stride+MAX_BLOCKSIZE), (h+MAX_BLOCKSIZE) * sizeof(*s->mctmp));
343  s->mcscratch = av_malloc_array(stride, MAX_BLOCKSIZE);
344 
345  if (!s->edge_emu_buffer_base || !s->mctmp || !s->mcscratch)
346  return AVERROR(ENOMEM);
347 
348  s->buffer_stride = stride;
349  return 0;
350 }
351 
353 {
354  int i, j, k;
355 
356  for (i = 0; i < MAX_FRAMES; i++) {
357  if (s->all_frames[i].avframe->data[0]) {
358  av_frame_unref(s->all_frames[i].avframe);
359  memset(s->all_frames[i].interpolated, 0, sizeof(s->all_frames[i].interpolated));
360  }
361 
362  for (j = 0; j < 3; j++)
363  for (k = 1; k < 4; k++)
364  av_freep(&s->all_frames[i].hpel_base[j][k]);
365  }
366 
367  memset(s->ref_frames, 0, sizeof(s->ref_frames));
368  memset(s->delay_frames, 0, sizeof(s->delay_frames));
369 
370  for (i = 0; i < 3; i++) {
371  av_freep(&s->plane[i].idwt.buf_base);
372  av_freep(&s->plane[i].idwt.tmp);
373  }
374 
375  s->buffer_stride = 0;
376  av_freep(&s->sbsplit);
377  av_freep(&s->blmotion);
378  av_freep(&s->edge_emu_buffer_base);
379 
380  av_freep(&s->mctmp);
381  av_freep(&s->mcscratch);
382 }
383 
385 
387 {
388  DiracContext *s = avctx->priv_data;
389  int i, ret;
390 
391  s->avctx = avctx;
392  s->frame_number = -1;
393 
394  s->thread_buf = NULL;
395  s->threads_num_buf = -1;
396  s->thread_buf_size = -1;
397 
398  ff_dirac_golomb_reader_init(&s->reader_ctx);
399  ff_diracdsp_init(&s->diracdsp);
400  ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
401  ff_videodsp_init(&s->vdsp, 8);
402 
403  for (i = 0; i < MAX_FRAMES; i++) {
404  s->all_frames[i].avframe = av_frame_alloc();
405  if (!s->all_frames[i].avframe) {
406  while (i > 0)
407  av_frame_free(&s->all_frames[--i].avframe);
408  return AVERROR(ENOMEM);
409  }
410  }
412  if (ret != 0)
413  return AVERROR_UNKNOWN;
414 
415  return 0;
416 }
417 
419 {
420  DiracContext *s = avctx->priv_data;
422  s->seen_sequence_header = 0;
423  s->frame_number = -1;
424 }
425 
427 {
428  DiracContext *s = avctx->priv_data;
429  int i;
430 
431  ff_dirac_golomb_reader_end(&s->reader_ctx);
432 
433  dirac_decode_flush(avctx);
434  for (i = 0; i < MAX_FRAMES; i++)
435  av_frame_free(&s->all_frames[i].avframe);
436 
437  av_freep(&s->thread_buf);
438  av_freep(&s->slice_params_buf);
439 
440  return 0;
441 }
442 
443 static inline int coeff_unpack_golomb(GetBitContext *gb, int qfactor, int qoffset)
444 {
445  int coeff = dirac_get_se_golomb(gb);
446  const unsigned sign = FFSIGN(coeff);
447  if (coeff)
448  coeff = sign*((sign * coeff * qfactor + qoffset) >> 2);
449  return coeff;
450 }
451 
452 #define SIGN_CTX(x) (CTX_SIGN_ZERO + ((x) > 0) - ((x) < 0))
453 
454 #define UNPACK_ARITH(n, type) \
455  static inline void coeff_unpack_arith_##n(DiracArith *c, int qfactor, int qoffset, \
456  SubBand *b, type *buf, int x, int y) \
457  { \
458  int sign, sign_pred = 0, pred_ctx = CTX_ZPZN_F1; \
459  unsigned coeff; \
460  const int mstride = -(b->stride >> (1+b->pshift)); \
461  if (b->parent) { \
462  const type *pbuf = (type *)b->parent->ibuf; \
463  const int stride = b->parent->stride >> (1+b->parent->pshift); \
464  pred_ctx += !!pbuf[stride * (y>>1) + (x>>1)] << 1; \
465  } \
466  if (b->orientation == subband_hl) \
467  sign_pred = buf[mstride]; \
468  if (x) { \
469  pred_ctx += !(buf[-1] | buf[mstride] | buf[-1 + mstride]); \
470  if (b->orientation == subband_lh) \
471  sign_pred = buf[-1]; \
472  } else { \
473  pred_ctx += !buf[mstride]; \
474  } \
475  coeff = dirac_get_arith_uint(c, pred_ctx, CTX_COEFF_DATA); \
476  if (coeff) { \
477  coeff = (coeff * qfactor + qoffset) >> 2; \
478  sign = dirac_get_arith_bit(c, SIGN_CTX(sign_pred)); \
479  coeff = (coeff ^ -sign) + sign; \
480  } \
481  *buf = coeff; \
482  } \
483 
484 UNPACK_ARITH(8, int16_t)
486 
487 /**
488  * Decode the coeffs in the rectangle defined by left, right, top, bottom
489  * [DIRAC_STD] 13.4.3.2 Codeblock unpacking loop. codeblock()
490  */
491 static inline int codeblock(DiracContext *s, SubBand *b,
492  GetBitContext *gb, DiracArith *c,
493  int left, int right, int top, int bottom,
494  int blockcnt_one, int is_arith)
495 {
496  int x, y, zero_block;
497  int qoffset, qfactor;
498  uint8_t *buf;
499 
500  /* check for any coded coefficients in this codeblock */
501  if (!blockcnt_one) {
502  if (is_arith)
503  zero_block = dirac_get_arith_bit(c, CTX_ZERO_BLOCK);
504  else
505  zero_block = get_bits1(gb);
506 
507  if (zero_block)
508  return 0;
509  }
510 
511  if (s->codeblock_mode && !(s->old_delta_quant && blockcnt_one)) {
512  int quant;
513  if (is_arith)
515  else
517  if (quant > INT_MAX - b->quant || b->quant + quant < 0) {
518  av_log(s->avctx, AV_LOG_ERROR, "Invalid quant\n");
519  return AVERROR_INVALIDDATA;
520  }
521  b->quant += quant;
522  }
523 
524  if (b->quant > (DIRAC_MAX_QUANT_INDEX - 1)) {
525  av_log(s->avctx, AV_LOG_ERROR, "Unsupported quant %d\n", b->quant);
526  b->quant = 0;
527  return AVERROR_INVALIDDATA;
528  }
529 
530  qfactor = ff_dirac_qscale_tab[b->quant];
531  /* TODO: context pointer? */
532  if (!s->num_refs)
533  qoffset = ff_dirac_qoffset_intra_tab[b->quant] + 2;
534  else
535  qoffset = ff_dirac_qoffset_inter_tab[b->quant] + 2;
536 
537  buf = b->ibuf + top * b->stride;
538  if (is_arith) {
539  for (y = top; y < bottom; y++) {
540  if (c->error)
541  return c->error;
542  for (x = left; x < right; x++) {
543  if (b->pshift) {
544  coeff_unpack_arith_10(c, qfactor, qoffset, b, (int32_t*)(buf)+x, x, y);
545  } else {
546  coeff_unpack_arith_8(c, qfactor, qoffset, b, (int16_t*)(buf)+x, x, y);
547  }
548  }
549  buf += b->stride;
550  }
551  } else {
552  for (y = top; y < bottom; y++) {
553  if (get_bits_left(gb) < 1)
554  return AVERROR_INVALIDDATA;
555  for (x = left; x < right; x++) {
556  int val = coeff_unpack_golomb(gb, qfactor, qoffset);
557  if (b->pshift) {
558  AV_WN32(&buf[4*x], val);
559  } else {
560  AV_WN16(&buf[2*x], val);
561  }
562  }
563  buf += b->stride;
564  }
565  }
566  return 0;
567 }
568 
569 /**
570  * Dirac Specification ->
571  * 13.3 intra_dc_prediction(band)
572  */
573 #define INTRA_DC_PRED(n, type) \
574  static inline void intra_dc_prediction_##n(SubBand *b) \
575  { \
576  type *buf = (type*)b->ibuf; \
577  int x, y; \
578  \
579  for (x = 1; x < b->width; x++) \
580  buf[x] += buf[x-1]; \
581  buf += (b->stride >> (1+b->pshift)); \
582  \
583  for (y = 1; y < b->height; y++) { \
584  buf[0] += buf[-(b->stride >> (1+b->pshift))]; \
585  \
586  for (x = 1; x < b->width; x++) { \
587  int pred = buf[x - 1] + buf[x - (b->stride >> (1+b->pshift))] + buf[x - (b->stride >> (1+b->pshift))-1]; \
588  buf[x] += divide3(pred); \
589  } \
590  buf += (b->stride >> (1+b->pshift)); \
591  } \
592  } \
593 
594 INTRA_DC_PRED(8, int16_t)
595 INTRA_DC_PRED(10, uint32_t)
596 
597 /**
598  * Dirac Specification ->
599  * 13.4.2 Non-skipped subbands. subband_coeffs()
600  */
602 {
603  int cb_x, cb_y, left, right, top, bottom;
604  DiracArith c;
605  GetBitContext gb;
606  int cb_width = s->codeblock[b->level + (b->orientation != subband_ll)].width;
607  int cb_height = s->codeblock[b->level + (b->orientation != subband_ll)].height;
608  int blockcnt_one = (cb_width + cb_height) == 2;
609  int ret;
610 
611  if (!b->length)
612  return 0;
613 
614  init_get_bits8(&gb, b->coeff_data, b->length);
615 
616  if (is_arith)
617  ff_dirac_init_arith_decoder(&c, &gb, b->length);
618 
619  top = 0;
620  for (cb_y = 0; cb_y < cb_height; cb_y++) {
621  bottom = (b->height * (cb_y+1LL)) / cb_height;
622  left = 0;
623  for (cb_x = 0; cb_x < cb_width; cb_x++) {
624  right = (b->width * (cb_x+1LL)) / cb_width;
625  ret = codeblock(s, b, &gb, &c, left, right, top, bottom, blockcnt_one, is_arith);
626  if (ret < 0)
627  return ret;
628  left = right;
629  }
630  top = bottom;
631  }
632 
633  if (b->orientation == subband_ll && s->num_refs == 0) {
634  if (s->pshift) {
635  intra_dc_prediction_10(b);
636  } else {
637  intra_dc_prediction_8(b);
638  }
639  }
640  return 0;
641 }
642 
643 static int decode_subband_arith(AVCodecContext *avctx, void *b)
644 {
645  DiracContext *s = avctx->priv_data;
646  return decode_subband_internal(s, b, 1);
647 }
648 
649 static int decode_subband_golomb(AVCodecContext *avctx, void *arg)
650 {
651  DiracContext *s = avctx->priv_data;
652  SubBand **b = arg;
653  return decode_subband_internal(s, *b, 0);
654 }
655 
656 /**
657  * Dirac Specification ->
658  * [DIRAC_STD] 13.4.1 core_transform_data()
659  */
661 {
662  AVCodecContext *avctx = s->avctx;
664  enum dirac_subband orientation;
665  int level, num_bands = 0;
666  int ret[3*MAX_DWT_LEVELS+1];
667  int i;
668  int damaged_count = 0;
669 
670  /* Unpack all subbands at all levels. */
671  for (level = 0; level < s->wavelet_depth; level++) {
672  for (orientation = !!level; orientation < 4; orientation++) {
673  SubBand *b = &s->plane[comp].band[level][orientation];
674  bands[num_bands++] = b;
675 
676  align_get_bits(&s->gb);
677  /* [DIRAC_STD] 13.4.2 subband() */
678  b->length = get_interleaved_ue_golomb(&s->gb);
679  if (b->length) {
680  b->quant = get_interleaved_ue_golomb(&s->gb);
681  if (b->quant > (DIRAC_MAX_QUANT_INDEX - 1)) {
682  av_log(s->avctx, AV_LOG_ERROR, "Unsupported quant %d\n", b->quant);
683  b->quant = 0;
684  return AVERROR_INVALIDDATA;
685  }
686  align_get_bits(&s->gb);
687  b->coeff_data = s->gb.buffer + get_bits_count(&s->gb)/8;
688  if (b->length > FFMAX(get_bits_left(&s->gb)/8, 0)) {
689  b->length = FFMAX(get_bits_left(&s->gb)/8, 0);
690  damaged_count ++;
691  }
692  skip_bits_long(&s->gb, b->length*8);
693  }
694  }
695  /* arithmetic coding has inter-level dependencies, so we can only execute one level at a time */
696  if (s->is_arith)
697  avctx->execute(avctx, decode_subband_arith, &s->plane[comp].band[level][!!level],
698  ret + 3*level + !!level, 4-!!level, sizeof(SubBand));
699  }
700  /* golomb coding has no inter-level dependencies, so we can execute all subbands in parallel */
701  if (!s->is_arith)
702  avctx->execute(avctx, decode_subband_golomb, bands, ret, num_bands, sizeof(SubBand*));
703 
704  for (i = 0; i < s->wavelet_depth * 3 + 1; i++) {
705  if (ret[i] < 0)
706  damaged_count++;
707  }
708  if (damaged_count > (s->wavelet_depth * 3 + 1) /2)
709  return AVERROR_INVALIDDATA;
710 
711  return 0;
712 }
713 
714 #define PARSE_VALUES(type, x, gb, ebits, buf1, buf2) \
715  type *buf = (type *)buf1; \
716  buf[x] = coeff_unpack_golomb(gb, qfactor, qoffset); \
717  if (get_bits_count(gb) >= ebits) \
718  return; \
719  if (buf2) { \
720  buf = (type *)buf2; \
721  buf[x] = coeff_unpack_golomb(gb, qfactor, qoffset); \
722  if (get_bits_count(gb) >= ebits) \
723  return; \
724  } \
725 
727  int slice_x, int slice_y, int bits_end,
728  SubBand *b1, SubBand *b2)
729 {
730  int left = b1->width * slice_x / s->num_x;
731  int right = b1->width *(slice_x+1) / s->num_x;
732  int top = b1->height * slice_y / s->num_y;
733  int bottom = b1->height *(slice_y+1) / s->num_y;
734 
735  int qfactor, qoffset;
736 
737  uint8_t *buf1 = b1->ibuf + top * b1->stride;
738  uint8_t *buf2 = b2 ? b2->ibuf + top * b2->stride: NULL;
739  int x, y;
740 
741  if (quant > (DIRAC_MAX_QUANT_INDEX - 1)) {
742  av_log(s->avctx, AV_LOG_ERROR, "Unsupported quant %d\n", quant);
743  return;
744  }
745  qfactor = ff_dirac_qscale_tab[quant];
746  qoffset = ff_dirac_qoffset_intra_tab[quant] + 2;
747  /* we have to constantly check for overread since the spec explicitly
748  requires this, with the meaning that all remaining coeffs are set to 0 */
749  if (get_bits_count(gb) >= bits_end)
750  return;
751 
752  if (s->pshift) {
753  for (y = top; y < bottom; y++) {
754  for (x = left; x < right; x++) {
755  PARSE_VALUES(int32_t, x, gb, bits_end, buf1, buf2);
756  }
757  buf1 += b1->stride;
758  if (buf2)
759  buf2 += b2->stride;
760  }
761  }
762  else {
763  for (y = top; y < bottom; y++) {
764  for (x = left; x < right; x++) {
765  PARSE_VALUES(int16_t, x, gb, bits_end, buf1, buf2);
766  }
767  buf1 += b1->stride;
768  if (buf2)
769  buf2 += b2->stride;
770  }
771  }
772 }
773 
774 /**
775  * Dirac Specification ->
776  * 13.5.2 Slices. slice(sx,sy)
777  */
778 static int decode_lowdelay_slice(AVCodecContext *avctx, void *arg)
779 {
780  DiracContext *s = avctx->priv_data;
781  DiracSlice *slice = arg;
782  GetBitContext *gb = &slice->gb;
783  enum dirac_subband orientation;
784  int level, quant, chroma_bits, chroma_end;
785 
786  int quant_base = get_bits(gb, 7); /*[DIRAC_STD] qindex */
787  int length_bits = av_log2(8 * slice->bytes)+1;
788  int luma_bits = get_bits_long(gb, length_bits);
789  int luma_end = get_bits_count(gb) + FFMIN(luma_bits, get_bits_left(gb));
790 
791  /* [DIRAC_STD] 13.5.5.2 luma_slice_band */
792  for (level = 0; level < s->wavelet_depth; level++)
793  for (orientation = !!level; orientation < 4; orientation++) {
794  quant = FFMAX(quant_base - s->lowdelay.quant[level][orientation], 0);
795  decode_subband(s, gb, quant, slice->slice_x, slice->slice_y, luma_end,
796  &s->plane[0].band[level][orientation], NULL);
797  }
798 
799  /* consume any unused bits from luma */
800  skip_bits_long(gb, get_bits_count(gb) - luma_end);
801 
802  chroma_bits = 8*slice->bytes - 7 - length_bits - luma_bits;
803  chroma_end = get_bits_count(gb) + FFMIN(chroma_bits, get_bits_left(gb));
804  /* [DIRAC_STD] 13.5.5.3 chroma_slice_band */
805  for (level = 0; level < s->wavelet_depth; level++)
806  for (orientation = !!level; orientation < 4; orientation++) {
807  quant = FFMAX(quant_base - s->lowdelay.quant[level][orientation], 0);
808  decode_subband(s, gb, quant, slice->slice_x, slice->slice_y, chroma_end,
809  &s->plane[1].band[level][orientation],
810  &s->plane[2].band[level][orientation]);
811  }
812 
813  return 0;
814 }
815 
816 typedef struct SliceCoeffs {
817  int left;
818  int top;
819  int tot_h;
820  int tot_v;
821  int tot;
822 } SliceCoeffs;
823 
824 static int subband_coeffs(DiracContext *s, int x, int y, int p,
826 {
827  int level, coef = 0;
828  for (level = 0; level < s->wavelet_depth; level++) {
829  SliceCoeffs *o = &c[level];
830  SubBand *b = &s->plane[p].band[level][3]; /* orientation doens't matter */
831  o->top = b->height * y / s->num_y;
832  o->left = b->width * x / s->num_x;
833  o->tot_h = ((b->width * (x + 1)) / s->num_x) - o->left;
834  o->tot_v = ((b->height * (y + 1)) / s->num_y) - o->top;
835  o->tot = o->tot_h*o->tot_v;
836  coef += o->tot * (4 - !!level);
837  }
838  return coef;
839 }
840 
841 /**
842  * VC-2 Specification ->
843  * 13.5.3 hq_slice(sx,sy)
844  */
845 static int decode_hq_slice(DiracContext *s, DiracSlice *slice, uint8_t *tmp_buf)
846 {
847  int i, level, orientation, quant_idx;
848  int qfactor[MAX_DWT_LEVELS][4], qoffset[MAX_DWT_LEVELS][4];
849  GetBitContext *gb = &slice->gb;
850  SliceCoeffs coeffs_num[MAX_DWT_LEVELS];
851 
852  skip_bits_long(gb, 8*s->highquality.prefix_bytes);
853  quant_idx = get_bits(gb, 8);
854 
855  if (quant_idx > DIRAC_MAX_QUANT_INDEX - 1) {
856  av_log(s->avctx, AV_LOG_ERROR, "Invalid quantization index - %i\n", quant_idx);
857  return AVERROR_INVALIDDATA;
858  }
859 
860  /* Slice quantization (slice_quantizers() in the specs) */
861  for (level = 0; level < s->wavelet_depth; level++) {
862  for (orientation = !!level; orientation < 4; orientation++) {
863  const int quant = FFMAX(quant_idx - s->lowdelay.quant[level][orientation], 0);
864  qfactor[level][orientation] = ff_dirac_qscale_tab[quant];
865  qoffset[level][orientation] = ff_dirac_qoffset_intra_tab[quant] + 2;
866  }
867  }
868 
869  /* Luma + 2 Chroma planes */
870  for (i = 0; i < 3; i++) {
871  int coef_num, coef_par, off = 0;
872  int64_t length = s->highquality.size_scaler*get_bits(gb, 8);
873  int64_t bits_end = get_bits_count(gb) + 8*length;
874  const uint8_t *addr = align_get_bits(gb);
875 
876  if (length*8 > get_bits_left(gb)) {
877  av_log(s->avctx, AV_LOG_ERROR, "end too far away\n");
878  return AVERROR_INVALIDDATA;
879  }
880 
881  coef_num = subband_coeffs(s, slice->slice_x, slice->slice_y, i, coeffs_num);
882 
883  if (s->pshift)
884  coef_par = ff_dirac_golomb_read_32bit(s->reader_ctx, addr,
885  length, tmp_buf, coef_num);
886  else
887  coef_par = ff_dirac_golomb_read_16bit(s->reader_ctx, addr,
888  length, tmp_buf, coef_num);
889 
890  if (coef_num > coef_par) {
891  const int start_b = coef_par * (1 << (s->pshift + 1));
892  const int end_b = coef_num * (1 << (s->pshift + 1));
893  memset(&tmp_buf[start_b], 0, end_b - start_b);
894  }
895 
896  for (level = 0; level < s->wavelet_depth; level++) {
897  const SliceCoeffs *c = &coeffs_num[level];
898  for (orientation = !!level; orientation < 4; orientation++) {
899  const SubBand *b1 = &s->plane[i].band[level][orientation];
900  uint8_t *buf = b1->ibuf + c->top * b1->stride + (c->left << (s->pshift + 1));
901 
902  /* Change to c->tot_h <= 4 for AVX2 dequantization */
903  const int qfunc = s->pshift + 2*(c->tot_h <= 2);
904  s->diracdsp.dequant_subband[qfunc](&tmp_buf[off], buf, b1->stride,
905  qfactor[level][orientation],
906  qoffset[level][orientation],
907  c->tot_v, c->tot_h);
908 
909  off += c->tot << (s->pshift + 1);
910  }
911  }
912 
913  skip_bits_long(gb, bits_end - get_bits_count(gb));
914  }
915 
916  return 0;
917 }
918 
919 static int decode_hq_slice_row(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
920 {
921  int i;
922  DiracContext *s = avctx->priv_data;
923  DiracSlice *slices = ((DiracSlice *)arg) + s->num_x*jobnr;
924  uint8_t *thread_buf = &s->thread_buf[s->thread_buf_size*threadnr];
925  for (i = 0; i < s->num_x; i++)
926  decode_hq_slice(s, &slices[i], thread_buf);
927  return 0;
928 }
929 
930 /**
931  * Dirac Specification ->
932  * 13.5.1 low_delay_transform_data()
933  */
935 {
936  AVCodecContext *avctx = s->avctx;
937  int slice_x, slice_y, bufsize;
938  int64_t coef_buf_size, bytes = 0;
939  const uint8_t *buf;
940  DiracSlice *slices;
942  int slice_num = 0;
943 
944  if (s->slice_params_num_buf != (s->num_x * s->num_y)) {
945  s->slice_params_buf = av_realloc_f(s->slice_params_buf, s->num_x * s->num_y, sizeof(DiracSlice));
946  if (!s->slice_params_buf) {
947  av_log(s->avctx, AV_LOG_ERROR, "slice params buffer allocation failure\n");
948  s->slice_params_num_buf = 0;
949  return AVERROR(ENOMEM);
950  }
951  s->slice_params_num_buf = s->num_x * s->num_y;
952  }
953  slices = s->slice_params_buf;
954 
955  /* 8 becacuse that's how much the golomb reader could overread junk data
956  * from another plane/slice at most, and 512 because SIMD */
957  coef_buf_size = subband_coeffs(s, s->num_x - 1, s->num_y - 1, 0, tmp) + 8;
958  coef_buf_size = (coef_buf_size << (1 + s->pshift)) + 512;
959 
960  if (s->threads_num_buf != avctx->thread_count ||
961  s->thread_buf_size != coef_buf_size) {
962  s->threads_num_buf = avctx->thread_count;
963  s->thread_buf_size = coef_buf_size;
964  s->thread_buf = av_realloc_f(s->thread_buf, avctx->thread_count, s->thread_buf_size);
965  if (!s->thread_buf) {
966  av_log(s->avctx, AV_LOG_ERROR, "thread buffer allocation failure\n");
967  return AVERROR(ENOMEM);
968  }
969  }
970 
971  align_get_bits(&s->gb);
972  /*[DIRAC_STD] 13.5.2 Slices. slice(sx,sy) */
973  buf = s->gb.buffer + get_bits_count(&s->gb)/8;
974  bufsize = get_bits_left(&s->gb);
975 
976  if (s->hq_picture) {
977  int i;
978 
979  for (slice_y = 0; bufsize > 0 && slice_y < s->num_y; slice_y++) {
980  for (slice_x = 0; bufsize > 0 && slice_x < s->num_x; slice_x++) {
981  bytes = s->highquality.prefix_bytes + 1;
982  for (i = 0; i < 3; i++) {
983  if (bytes <= bufsize/8)
984  bytes += buf[bytes] * s->highquality.size_scaler + 1;
985  }
986  if (bytes >= INT_MAX || bytes*8 > bufsize) {
987  av_log(s->avctx, AV_LOG_ERROR, "too many bytes\n");
988  return AVERROR_INVALIDDATA;
989  }
990 
991  slices[slice_num].bytes = bytes;
992  slices[slice_num].slice_x = slice_x;
993  slices[slice_num].slice_y = slice_y;
994  init_get_bits(&slices[slice_num].gb, buf, bufsize);
995  slice_num++;
996 
997  buf += bytes;
998  if (bufsize/8 >= bytes)
999  bufsize -= bytes*8;
1000  else
1001  bufsize = 0;
1002  }
1003  }
1004 
1005  if (s->num_x*s->num_y != slice_num) {
1006  av_log(s->avctx, AV_LOG_ERROR, "too few slices\n");
1007  return AVERROR_INVALIDDATA;
1008  }
1009 
1010  avctx->execute2(avctx, decode_hq_slice_row, slices, NULL, s->num_y);
1011  } else {
1012  for (slice_y = 0; bufsize > 0 && slice_y < s->num_y; slice_y++) {
1013  for (slice_x = 0; bufsize > 0 && slice_x < s->num_x; slice_x++) {
1014  bytes = (slice_num+1) * (int64_t)s->lowdelay.bytes.num / s->lowdelay.bytes.den
1015  - slice_num * (int64_t)s->lowdelay.bytes.num / s->lowdelay.bytes.den;
1016  if (bytes >= INT_MAX || bytes*8 > bufsize) {
1017  av_log(s->avctx, AV_LOG_ERROR, "too many bytes\n");
1018  return AVERROR_INVALIDDATA;
1019  }
1020  slices[slice_num].bytes = bytes;
1021  slices[slice_num].slice_x = slice_x;
1022  slices[slice_num].slice_y = slice_y;
1023  init_get_bits(&slices[slice_num].gb, buf, bufsize);
1024  slice_num++;
1025 
1026  buf += bytes;
1027  if (bufsize/8 >= bytes)
1028  bufsize -= bytes*8;
1029  else
1030  bufsize = 0;
1031  }
1032  }
1033  avctx->execute(avctx, decode_lowdelay_slice, slices, NULL, slice_num,
1034  sizeof(DiracSlice)); /* [DIRAC_STD] 13.5.2 Slices */
1035  }
1036 
1037  if (s->dc_prediction) {
1038  if (s->pshift) {
1039  intra_dc_prediction_10(&s->plane[0].band[0][0]); /* [DIRAC_STD] 13.3 intra_dc_prediction() */
1040  intra_dc_prediction_10(&s->plane[1].band[0][0]); /* [DIRAC_STD] 13.3 intra_dc_prediction() */
1041  intra_dc_prediction_10(&s->plane[2].band[0][0]); /* [DIRAC_STD] 13.3 intra_dc_prediction() */
1042  } else {
1043  intra_dc_prediction_8(&s->plane[0].band[0][0]);
1044  intra_dc_prediction_8(&s->plane[1].band[0][0]);
1045  intra_dc_prediction_8(&s->plane[2].band[0][0]);
1046  }
1047  }
1048 
1049  return 0;
1050 }
1051 
1053 {
1054  int i, w, h, level, orientation;
1055 
1056  for (i = 0; i < 3; i++) {
1057  Plane *p = &s->plane[i];
1058 
1059  p->width = s->seq.width >> (i ? s->chroma_x_shift : 0);
1060  p->height = s->seq.height >> (i ? s->chroma_y_shift : 0);
1061  p->idwt.width = w = CALC_PADDING(p->width , s->wavelet_depth);
1062  p->idwt.height = h = CALC_PADDING(p->height, s->wavelet_depth);
1063  p->idwt.stride = FFALIGN(p->idwt.width, 8) << (1 + s->pshift);
1064 
1065  for (level = s->wavelet_depth-1; level >= 0; level--) {
1066  w = w>>1;
1067  h = h>>1;
1068  for (orientation = !!level; orientation < 4; orientation++) {
1069  SubBand *b = &p->band[level][orientation];
1070 
1071  b->pshift = s->pshift;
1072  b->ibuf = p->idwt.buf;
1073  b->level = level;
1074  b->stride = p->idwt.stride << (s->wavelet_depth - level);
1075  b->width = w;
1076  b->height = h;
1077  b->orientation = orientation;
1078 
1079  if (orientation & 1)
1080  b->ibuf += w << (1+b->pshift);
1081  if (orientation > 1)
1082  b->ibuf += (b->stride>>1);
1083 
1084  if (level)
1085  b->parent = &p->band[level-1][orientation];
1086  }
1087  }
1088 
1089  if (i > 0) {
1090  p->xblen = s->plane[0].xblen >> s->chroma_x_shift;
1091  p->yblen = s->plane[0].yblen >> s->chroma_y_shift;
1092  p->xbsep = s->plane[0].xbsep >> s->chroma_x_shift;
1093  p->ybsep = s->plane[0].ybsep >> s->chroma_y_shift;
1094  }
1095 
1096  p->xoffset = (p->xblen - p->xbsep)/2;
1097  p->yoffset = (p->yblen - p->ybsep)/2;
1098  }
1099 }
1100 
1101 /**
1102  * Unpack the motion compensation parameters
1103  * Dirac Specification ->
1104  * 11.2 Picture prediction data. picture_prediction()
1105  */
1107 {
1108  static const uint8_t default_blen[] = { 4, 12, 16, 24 };
1109 
1110  GetBitContext *gb = &s->gb;
1111  unsigned idx, ref;
1112 
1113  align_get_bits(gb);
1114  /* [DIRAC_STD] 11.2.2 Block parameters. block_parameters() */
1115  /* Luma and Chroma are equal. 11.2.3 */
1116  idx = get_interleaved_ue_golomb(gb); /* [DIRAC_STD] index */
1117 
1118  if (idx > 4) {
1119  av_log(s->avctx, AV_LOG_ERROR, "Block prediction index too high\n");
1120  return AVERROR_INVALIDDATA;
1121  }
1122 
1123  if (idx == 0) {
1124  s->plane[0].xblen = get_interleaved_ue_golomb(gb);
1125  s->plane[0].yblen = get_interleaved_ue_golomb(gb);
1126  s->plane[0].xbsep = get_interleaved_ue_golomb(gb);
1127  s->plane[0].ybsep = get_interleaved_ue_golomb(gb);
1128  } else {
1129  /*[DIRAC_STD] preset_block_params(index). Table 11.1 */
1130  s->plane[0].xblen = default_blen[idx-1];
1131  s->plane[0].yblen = default_blen[idx-1];
1132  s->plane[0].xbsep = 4 * idx;
1133  s->plane[0].ybsep = 4 * idx;
1134  }
1135  /*[DIRAC_STD] 11.2.4 motion_data_dimensions()
1136  Calculated in function dirac_unpack_block_motion_data */
1137 
1138  if (s->plane[0].xblen % (1 << s->chroma_x_shift) != 0 ||
1139  s->plane[0].yblen % (1 << s->chroma_y_shift) != 0 ||
1140  !s->plane[0].xblen || !s->plane[0].yblen) {
1141  av_log(s->avctx, AV_LOG_ERROR,
1142  "invalid x/y block length (%d/%d) for x/y chroma shift (%d/%d)\n",
1143  s->plane[0].xblen, s->plane[0].yblen, s->chroma_x_shift, s->chroma_y_shift);
1144  return AVERROR_INVALIDDATA;
1145  }
1146  if (!s->plane[0].xbsep || !s->plane[0].ybsep || s->plane[0].xbsep < s->plane[0].xblen/2 || s->plane[0].ybsep < s->plane[0].yblen/2) {
1147  av_log(s->avctx, AV_LOG_ERROR, "Block separation too small\n");
1148  return AVERROR_INVALIDDATA;
1149  }
1150  if (s->plane[0].xbsep > s->plane[0].xblen || s->plane[0].ybsep > s->plane[0].yblen) {
1151  av_log(s->avctx, AV_LOG_ERROR, "Block separation greater than size\n");
1152  return AVERROR_INVALIDDATA;
1153  }
1154  if (FFMAX(s->plane[0].xblen, s->plane[0].yblen) > MAX_BLOCKSIZE) {
1155  av_log(s->avctx, AV_LOG_ERROR, "Unsupported large block size\n");
1156  return AVERROR_PATCHWELCOME;
1157  }
1158 
1159  /*[DIRAC_STD] 11.2.5 Motion vector precision. motion_vector_precision()
1160  Read motion vector precision */
1161  s->mv_precision = get_interleaved_ue_golomb(gb);
1162  if (s->mv_precision > 3) {
1163  av_log(s->avctx, AV_LOG_ERROR, "MV precision finer than eighth-pel\n");
1164  return AVERROR_INVALIDDATA;
1165  }
1166 
1167  /*[DIRAC_STD] 11.2.6 Global motion. global_motion()
1168  Read the global motion compensation parameters */
1169  s->globalmc_flag = get_bits1(gb);
1170  if (s->globalmc_flag) {
1171  memset(s->globalmc, 0, sizeof(s->globalmc));
1172  /* [DIRAC_STD] pan_tilt(gparams) */
1173  for (ref = 0; ref < s->num_refs; ref++) {
1174  if (get_bits1(gb)) {
1175  s->globalmc[ref].pan_tilt[0] = dirac_get_se_golomb(gb);
1176  s->globalmc[ref].pan_tilt[1] = dirac_get_se_golomb(gb);
1177  }
1178  /* [DIRAC_STD] zoom_rotate_shear(gparams)
1179  zoom/rotation/shear parameters */
1180  if (get_bits1(gb)) {
1181  s->globalmc[ref].zrs_exp = get_interleaved_ue_golomb(gb);
1182  s->globalmc[ref].zrs[0][0] = dirac_get_se_golomb(gb);
1183  s->globalmc[ref].zrs[0][1] = dirac_get_se_golomb(gb);
1184  s->globalmc[ref].zrs[1][0] = dirac_get_se_golomb(gb);
1185  s->globalmc[ref].zrs[1][1] = dirac_get_se_golomb(gb);
1186  } else {
1187  s->globalmc[ref].zrs[0][0] = 1;
1188  s->globalmc[ref].zrs[1][1] = 1;
1189  }
1190  /* [DIRAC_STD] perspective(gparams) */
1191  if (get_bits1(gb)) {
1192  s->globalmc[ref].perspective_exp = get_interleaved_ue_golomb(gb);
1193  s->globalmc[ref].perspective[0] = dirac_get_se_golomb(gb);
1194  s->globalmc[ref].perspective[1] = dirac_get_se_golomb(gb);
1195  }
1196  if (s->globalmc[ref].perspective_exp + (uint64_t)s->globalmc[ref].zrs_exp > 30) {
1197  return AVERROR_INVALIDDATA;
1198  }
1199 
1200  }
1201  }
1202 
1203  /*[DIRAC_STD] 11.2.7 Picture prediction mode. prediction_mode()
1204  Picture prediction mode, not currently used. */
1205  if (get_interleaved_ue_golomb(gb)) {
1206  av_log(s->avctx, AV_LOG_ERROR, "Unknown picture prediction mode\n");
1207  return AVERROR_INVALIDDATA;
1208  }
1209 
1210  /* [DIRAC_STD] 11.2.8 Reference picture weight. reference_picture_weights()
1211  just data read, weight calculation will be done later on. */
1212  s->weight_log2denom = 1;
1213  s->weight[0] = 1;
1214  s->weight[1] = 1;
1215 
1216  if (get_bits1(gb)) {
1217  s->weight_log2denom = get_interleaved_ue_golomb(gb);
1218  if (s->weight_log2denom < 1 || s->weight_log2denom > 8) {
1219  av_log(s->avctx, AV_LOG_ERROR, "weight_log2denom unsupported or invalid\n");
1220  s->weight_log2denom = 1;
1221  return AVERROR_INVALIDDATA;
1222  }
1223  s->weight[0] = dirac_get_se_golomb(gb);
1224  if (s->num_refs == 2)
1225  s->weight[1] = dirac_get_se_golomb(gb);
1226  }
1227  return 0;
1228 }
1229 
1230 /**
1231  * Dirac Specification ->
1232  * 11.3 Wavelet transform data. wavelet_transform()
1233  */
1235 {
1236  GetBitContext *gb = &s->gb;
1237  int i, level;
1238  unsigned tmp;
1239 
1240 #define CHECKEDREAD(dst, cond, errmsg) \
1241  tmp = get_interleaved_ue_golomb(gb); \
1242  if (cond) { \
1243  av_log(s->avctx, AV_LOG_ERROR, errmsg); \
1244  return AVERROR_INVALIDDATA; \
1245  }\
1246  dst = tmp;
1247 
1248  align_get_bits(gb);
1249 
1250  s->zero_res = s->num_refs ? get_bits1(gb) : 0;
1251  if (s->zero_res)
1252  return 0;
1253 
1254  /*[DIRAC_STD] 11.3.1 Transform parameters. transform_parameters() */
1255  CHECKEDREAD(s->wavelet_idx, tmp > 6, "wavelet_idx is too big\n")
1256 
1257  CHECKEDREAD(s->wavelet_depth, tmp > MAX_DWT_LEVELS || tmp < 1, "invalid number of DWT decompositions\n")
1258 
1259  if (!s->low_delay) {
1260  /* Codeblock parameters (core syntax only) */
1261  if (get_bits1(gb)) {
1262  for (i = 0; i <= s->wavelet_depth; i++) {
1263  CHECKEDREAD(s->codeblock[i].width , tmp < 1 || tmp > (s->avctx->width >>s->wavelet_depth-i), "codeblock width invalid\n")
1264  CHECKEDREAD(s->codeblock[i].height, tmp < 1 || tmp > (s->avctx->height>>s->wavelet_depth-i), "codeblock height invalid\n")
1265  }
1266 
1267  CHECKEDREAD(s->codeblock_mode, tmp > 1, "unknown codeblock mode\n")
1268  }
1269  else {
1270  for (i = 0; i <= s->wavelet_depth; i++)
1271  s->codeblock[i].width = s->codeblock[i].height = 1;
1272  }
1273  }
1274  else {
1275  s->num_x = get_interleaved_ue_golomb(gb);
1276  s->num_y = get_interleaved_ue_golomb(gb);
1277  if (s->num_x * s->num_y == 0 || s->num_x * (uint64_t)s->num_y > INT_MAX ||
1278  s->num_x * (uint64_t)s->avctx->width > INT_MAX ||
1279  s->num_y * (uint64_t)s->avctx->height > INT_MAX ||
1280  s->num_x > s->avctx->width ||
1281  s->num_y > s->avctx->height
1282  ) {
1283  av_log(s->avctx,AV_LOG_ERROR,"Invalid numx/y\n");
1284  s->num_x = s->num_y = 0;
1285  return AVERROR_INVALIDDATA;
1286  }
1287  if (s->ld_picture) {
1288  s->lowdelay.bytes.num = get_interleaved_ue_golomb(gb);
1289  s->lowdelay.bytes.den = get_interleaved_ue_golomb(gb);
1290  if (s->lowdelay.bytes.den <= 0) {
1291  av_log(s->avctx,AV_LOG_ERROR,"Invalid lowdelay.bytes.den\n");
1292  return AVERROR_INVALIDDATA;
1293  }
1294  } else if (s->hq_picture) {
1295  s->highquality.prefix_bytes = get_interleaved_ue_golomb(gb);
1296  s->highquality.size_scaler = get_interleaved_ue_golomb(gb);
1297  if (s->highquality.prefix_bytes >= INT_MAX / 8) {
1298  av_log(s->avctx,AV_LOG_ERROR,"too many prefix bytes\n");
1299  return AVERROR_INVALIDDATA;
1300  }
1301  }
1302 
1303  /* [DIRAC_STD] 11.3.5 Quantisation matrices (low-delay syntax). quant_matrix() */
1304  if (get_bits1(gb)) {
1305  av_log(s->avctx,AV_LOG_DEBUG,"Low Delay: Has Custom Quantization Matrix!\n");
1306  /* custom quantization matrix */
1307  for (level = 0; level < s->wavelet_depth; level++) {
1308  for (i = !!level; i < 4; i++) {
1309  s->lowdelay.quant[level][i] = get_interleaved_ue_golomb(gb);
1310  }
1311  }
1312  } else {
1313  if (s->wavelet_depth > 4) {
1314  av_log(s->avctx,AV_LOG_ERROR,"Mandatory custom low delay matrix missing for depth %d\n", s->wavelet_depth);
1315  return AVERROR_INVALIDDATA;
1316  }
1317  /* default quantization matrix */
1318  for (level = 0; level < s->wavelet_depth; level++)
1319  for (i = 0; i < 4; i++) {
1320  s->lowdelay.quant[level][i] = ff_dirac_default_qmat[s->wavelet_idx][level][i];
1321  /* haar with no shift differs for different depths */
1322  if (s->wavelet_idx == 3)
1323  s->lowdelay.quant[level][i] += 4*(s->wavelet_depth-1 - level);
1324  }
1325  }
1326  }
1327  return 0;
1328 }
1329 
1330 static inline int pred_sbsplit(uint8_t *sbsplit, int stride, int x, int y)
1331 {
1332  static const uint8_t avgsplit[7] = { 0, 0, 1, 1, 1, 2, 2 };
1333 
1334  if (!(x|y))
1335  return 0;
1336  else if (!y)
1337  return sbsplit[-1];
1338  else if (!x)
1339  return sbsplit[-stride];
1340 
1341  return avgsplit[sbsplit[-1] + sbsplit[-stride] + sbsplit[-stride-1]];
1342 }
1343 
1344 static inline int pred_block_mode(DiracBlock *block, int stride, int x, int y, int refmask)
1345 {
1346  int pred;
1347 
1348  if (!(x|y))
1349  return 0;
1350  else if (!y)
1351  return block[-1].ref & refmask;
1352  else if (!x)
1353  return block[-stride].ref & refmask;
1354 
1355  /* return the majority */
1356  pred = (block[-1].ref & refmask) + (block[-stride].ref & refmask) + (block[-stride-1].ref & refmask);
1357  return (pred >> 1) & refmask;
1358 }
1359 
1360 static inline void pred_block_dc(DiracBlock *block, int stride, int x, int y)
1361 {
1362  int i, n = 0;
1363 
1364  memset(block->u.dc, 0, sizeof(block->u.dc));
1365 
1366  if (x && !(block[-1].ref & 3)) {
1367  for (i = 0; i < 3; i++)
1368  block->u.dc[i] += block[-1].u.dc[i];
1369  n++;
1370  }
1371 
1372  if (y && !(block[-stride].ref & 3)) {
1373  for (i = 0; i < 3; i++)
1374  block->u.dc[i] += block[-stride].u.dc[i];
1375  n++;
1376  }
1377 
1378  if (x && y && !(block[-1-stride].ref & 3)) {
1379  for (i = 0; i < 3; i++)
1380  block->u.dc[i] += block[-1-stride].u.dc[i];
1381  n++;
1382  }
1383 
1384  if (n == 2) {
1385  for (i = 0; i < 3; i++)
1386  block->u.dc[i] = (block->u.dc[i]+1)>>1;
1387  } else if (n == 3) {
1388  for (i = 0; i < 3; i++)
1389  block->u.dc[i] = divide3(block->u.dc[i]);
1390  }
1391 }
1392 
1393 static inline void pred_mv(DiracBlock *block, int stride, int x, int y, int ref)
1394 {
1395  int16_t *pred[3];
1396  int refmask = ref+1;
1397  int mask = refmask | DIRAC_REF_MASK_GLOBAL; /* exclude gmc blocks */
1398  int n = 0;
1399 
1400  if (x && (block[-1].ref & mask) == refmask)
1401  pred[n++] = block[-1].u.mv[ref];
1402 
1403  if (y && (block[-stride].ref & mask) == refmask)
1404  pred[n++] = block[-stride].u.mv[ref];
1405 
1406  if (x && y && (block[-stride-1].ref & mask) == refmask)
1407  pred[n++] = block[-stride-1].u.mv[ref];
1408 
1409  switch (n) {
1410  case 0:
1411  block->u.mv[ref][0] = 0;
1412  block->u.mv[ref][1] = 0;
1413  break;
1414  case 1:
1415  block->u.mv[ref][0] = pred[0][0];
1416  block->u.mv[ref][1] = pred[0][1];
1417  break;
1418  case 2:
1419  block->u.mv[ref][0] = (pred[0][0] + pred[1][0] + 1) >> 1;
1420  block->u.mv[ref][1] = (pred[0][1] + pred[1][1] + 1) >> 1;
1421  break;
1422  case 3:
1423  block->u.mv[ref][0] = mid_pred(pred[0][0], pred[1][0], pred[2][0]);
1424  block->u.mv[ref][1] = mid_pred(pred[0][1], pred[1][1], pred[2][1]);
1425  break;
1426  }
1427 }
1428 
1429 static void global_mv(DiracContext *s, DiracBlock *block, int x, int y, int ref)
1430 {
1431  int ez = s->globalmc[ref].zrs_exp;
1432  int ep = s->globalmc[ref].perspective_exp;
1433  int (*A)[2] = s->globalmc[ref].zrs;
1434  int *b = s->globalmc[ref].pan_tilt;
1435  int *c = s->globalmc[ref].perspective;
1436 
1437  int64_t m = (1<<ep) - (c[0]*(int64_t)x + c[1]*(int64_t)y);
1438  int64_t mx = m * (uint64_t)((A[0][0] * (int64_t)x + A[0][1]*(int64_t)y) + (1LL<<ez) * b[0]);
1439  int64_t my = m * (uint64_t)((A[1][0] * (int64_t)x + A[1][1]*(int64_t)y) + (1LL<<ez) * b[1]);
1440 
1441  block->u.mv[ref][0] = (mx + (1<<(ez+ep))) >> (ez+ep);
1442  block->u.mv[ref][1] = (my + (1<<(ez+ep))) >> (ez+ep);
1443 }
1444 
1446  int stride, int x, int y)
1447 {
1448  int i;
1449 
1451  block->ref ^= dirac_get_arith_bit(arith, CTX_PMODE_REF1);
1452 
1453  if (s->num_refs == 2) {
1455  block->ref ^= dirac_get_arith_bit(arith, CTX_PMODE_REF2) << 1;
1456  }
1457 
1458  if (!block->ref) {
1459  pred_block_dc(block, stride, x, y);
1460  for (i = 0; i < 3; i++)
1461  block->u.dc[i] += (unsigned)dirac_get_arith_int(arith+1+i, CTX_DC_F1, CTX_DC_DATA);
1462  return;
1463  }
1464 
1465  if (s->globalmc_flag) {
1467  block->ref ^= dirac_get_arith_bit(arith, CTX_GLOBAL_BLOCK) << 2;
1468  }
1469 
1470  for (i = 0; i < s->num_refs; i++)
1471  if (block->ref & (i+1)) {
1472  if (block->ref & DIRAC_REF_MASK_GLOBAL) {
1473  global_mv(s, block, x, y, i);
1474  } else {
1475  pred_mv(block, stride, x, y, i);
1476  block->u.mv[i][0] += (unsigned)dirac_get_arith_int(arith + 4 + 2 * i, CTX_MV_F1, CTX_MV_DATA);
1477  block->u.mv[i][1] += (unsigned)dirac_get_arith_int(arith + 5 + 2 * i, CTX_MV_F1, CTX_MV_DATA);
1478  }
1479  }
1480 }
1481 
1482 /**
1483  * Copies the current block to the other blocks covered by the current superblock split mode
1484  */
1486 {
1487  int x, y;
1488  DiracBlock *dst = block;
1489 
1490  for (x = 1; x < size; x++)
1491  dst[x] = *block;
1492 
1493  for (y = 1; y < size; y++) {
1494  dst += stride;
1495  for (x = 0; x < size; x++)
1496  dst[x] = *block;
1497  }
1498 }
1499 
1500 /**
1501  * Dirac Specification ->
1502  * 12. Block motion data syntax
1503  */
1505 {
1506  GetBitContext *gb = &s->gb;
1507  uint8_t *sbsplit = s->sbsplit;
1508  int i, x, y, q, p;
1509  DiracArith arith[8];
1510 
1511  align_get_bits(gb);
1512 
1513  /* [DIRAC_STD] 11.2.4 and 12.2.1 Number of blocks and superblocks */
1514  s->sbwidth = DIVRNDUP(s->seq.width, 4*s->plane[0].xbsep);
1515  s->sbheight = DIVRNDUP(s->seq.height, 4*s->plane[0].ybsep);
1516  s->blwidth = 4 * s->sbwidth;
1517  s->blheight = 4 * s->sbheight;
1518 
1519  /* [DIRAC_STD] 12.3.1 Superblock splitting modes. superblock_split_modes()
1520  decode superblock split modes */
1521  ff_dirac_init_arith_decoder(arith, gb, get_interleaved_ue_golomb(gb)); /* get_interleaved_ue_golomb(gb) is the length */
1522  for (y = 0; y < s->sbheight; y++) {
1523  for (x = 0; x < s->sbwidth; x++) {
1524  unsigned int split = dirac_get_arith_uint(arith, CTX_SB_F1, CTX_SB_DATA);
1525  if (split > 2)
1526  return AVERROR_INVALIDDATA;
1527  sbsplit[x] = (split + pred_sbsplit(sbsplit+x, s->sbwidth, x, y)) % 3;
1528  }
1529  sbsplit += s->sbwidth;
1530  }
1531 
1532  /* setup arith decoding */
1534  for (i = 0; i < s->num_refs; i++) {
1535  ff_dirac_init_arith_decoder(arith + 4 + 2 * i, gb, get_interleaved_ue_golomb(gb));
1536  ff_dirac_init_arith_decoder(arith + 5 + 2 * i, gb, get_interleaved_ue_golomb(gb));
1537  }
1538  for (i = 0; i < 3; i++)
1540 
1541  for (y = 0; y < s->sbheight; y++)
1542  for (x = 0; x < s->sbwidth; x++) {
1543  int blkcnt = 1 << s->sbsplit[y * s->sbwidth + x];
1544  int step = 4 >> s->sbsplit[y * s->sbwidth + x];
1545 
1546  for (q = 0; q < blkcnt; q++)
1547  for (p = 0; p < blkcnt; p++) {
1548  int bx = 4 * x + p*step;
1549  int by = 4 * y + q*step;
1550  DiracBlock *block = &s->blmotion[by*s->blwidth + bx];
1551  decode_block_params(s, arith, block, s->blwidth, bx, by);
1552  propagate_block_data(block, s->blwidth, step);
1553  }
1554  }
1555 
1556  for (i = 0; i < 4 + 2*s->num_refs; i++) {
1557  if (arith[i].error)
1558  return arith[i].error;
1559  }
1560 
1561  return 0;
1562 }
1563 
1564 static int weight(int i, int blen, int offset)
1565 {
1566 #define ROLLOFF(i) offset == 1 ? ((i) ? 5 : 3) : \
1567  (1 + (6*(i) + offset - 1) / (2*offset - 1))
1568 
1569  if (i < 2*offset)
1570  return ROLLOFF(i);
1571  else if (i > blen-1 - 2*offset)
1572  return ROLLOFF(blen-1 - i);
1573  return 8;
1574 }
1575 
1576 static void init_obmc_weight_row(Plane *p, uint8_t *obmc_weight, int stride,
1577  int left, int right, int wy)
1578 {
1579  int x;
1580  for (x = 0; left && x < p->xblen >> 1; x++)
1581  obmc_weight[x] = wy*8;
1582  for (; x < p->xblen >> right; x++)
1583  obmc_weight[x] = wy*weight(x, p->xblen, p->xoffset);
1584  for (; x < p->xblen; x++)
1585  obmc_weight[x] = wy*8;
1586  for (; x < stride; x++)
1587  obmc_weight[x] = 0;
1588 }
1589 
1590 static void init_obmc_weight(Plane *p, uint8_t *obmc_weight, int stride,
1591  int left, int right, int top, int bottom)
1592 {
1593  int y;
1594  for (y = 0; top && y < p->yblen >> 1; y++) {
1595  init_obmc_weight_row(p, obmc_weight, stride, left, right, 8);
1596  obmc_weight += stride;
1597  }
1598  for (; y < p->yblen >> bottom; y++) {
1599  int wy = weight(y, p->yblen, p->yoffset);
1600  init_obmc_weight_row(p, obmc_weight, stride, left, right, wy);
1601  obmc_weight += stride;
1602  }
1603  for (; y < p->yblen; y++) {
1604  init_obmc_weight_row(p, obmc_weight, stride, left, right, 8);
1605  obmc_weight += stride;
1606  }
1607 }
1608 
1609 static void init_obmc_weights(DiracContext *s, Plane *p, int by)
1610 {
1611  int top = !by;
1612  int bottom = by == s->blheight-1;
1613 
1614  /* don't bother re-initing for rows 2 to blheight-2, the weights don't change */
1615  if (top || bottom || by == 1) {
1616  init_obmc_weight(p, s->obmc_weight[0], MAX_BLOCKSIZE, 1, 0, top, bottom);
1617  init_obmc_weight(p, s->obmc_weight[1], MAX_BLOCKSIZE, 0, 0, top, bottom);
1618  init_obmc_weight(p, s->obmc_weight[2], MAX_BLOCKSIZE, 0, 1, top, bottom);
1619  }
1620 }
1621 
1622 static const uint8_t epel_weights[4][4][4] = {
1623  {{ 16, 0, 0, 0 },
1624  { 12, 4, 0, 0 },
1625  { 8, 8, 0, 0 },
1626  { 4, 12, 0, 0 }},
1627  {{ 12, 0, 4, 0 },
1628  { 9, 3, 3, 1 },
1629  { 6, 6, 2, 2 },
1630  { 3, 9, 1, 3 }},
1631  {{ 8, 0, 8, 0 },
1632  { 6, 2, 6, 2 },
1633  { 4, 4, 4, 4 },
1634  { 2, 6, 2, 6 }},
1635  {{ 4, 0, 12, 0 },
1636  { 3, 1, 9, 3 },
1637  { 2, 2, 6, 6 },
1638  { 1, 3, 3, 9 }}
1639 };
1640 
1641 /**
1642  * For block x,y, determine which of the hpel planes to do bilinear
1643  * interpolation from and set src[] to the location in each hpel plane
1644  * to MC from.
1645  *
1646  * @return the index of the put_dirac_pixels_tab function to use
1647  * 0 for 1 plane (fpel,hpel), 1 for 2 planes (qpel), 2 for 4 planes (qpel), and 3 for epel
1648  */
1650  int x, int y, int ref, int plane)
1651 {
1652  Plane *p = &s->plane[plane];
1653  uint8_t **ref_hpel = s->ref_pics[ref]->hpel[plane];
1654  int motion_x = block->u.mv[ref][0];
1655  int motion_y = block->u.mv[ref][1];
1656  int mx, my, i, epel, nplanes = 0;
1657 
1658  if (plane) {
1659  motion_x >>= s->chroma_x_shift;
1660  motion_y >>= s->chroma_y_shift;
1661  }
1662 
1663  mx = motion_x & ~(-1U << s->mv_precision);
1664  my = motion_y & ~(-1U << s->mv_precision);
1665  motion_x >>= s->mv_precision;
1666  motion_y >>= s->mv_precision;
1667  /* normalize subpel coordinates to epel */
1668  /* TODO: template this function? */
1669  mx <<= 3 - s->mv_precision;
1670  my <<= 3 - s->mv_precision;
1671 
1672  x += motion_x;
1673  y += motion_y;
1674  epel = (mx|my)&1;
1675 
1676  /* hpel position */
1677  if (!((mx|my)&3)) {
1678  nplanes = 1;
1679  src[0] = ref_hpel[(my>>1)+(mx>>2)] + y*p->stride + x;
1680  } else {
1681  /* qpel or epel */
1682  nplanes = 4;
1683  for (i = 0; i < 4; i++)
1684  src[i] = ref_hpel[i] + y*p->stride + x;
1685 
1686  /* if we're interpolating in the right/bottom halves, adjust the planes as needed
1687  we increment x/y because the edge changes for half of the pixels */
1688  if (mx > 4) {
1689  src[0] += 1;
1690  src[2] += 1;
1691  x++;
1692  }
1693  if (my > 4) {
1694  src[0] += p->stride;
1695  src[1] += p->stride;
1696  y++;
1697  }
1698 
1699  /* hpel planes are:
1700  [0]: F [1]: H
1701  [2]: V [3]: C */
1702  if (!epel) {
1703  /* check if we really only need 2 planes since either mx or my is
1704  a hpel position. (epel weights of 0 handle this there) */
1705  if (!(mx&3)) {
1706  /* mx == 0: average [0] and [2]
1707  mx == 4: average [1] and [3] */
1708  src[!mx] = src[2 + !!mx];
1709  nplanes = 2;
1710  } else if (!(my&3)) {
1711  src[0] = src[(my>>1) ];
1712  src[1] = src[(my>>1)+1];
1713  nplanes = 2;
1714  }
1715  } else {
1716  /* adjust the ordering if needed so the weights work */
1717  if (mx > 4) {
1718  FFSWAP(const uint8_t *, src[0], src[1]);
1719  FFSWAP(const uint8_t *, src[2], src[3]);
1720  }
1721  if (my > 4) {
1722  FFSWAP(const uint8_t *, src[0], src[2]);
1723  FFSWAP(const uint8_t *, src[1], src[3]);
1724  }
1725  src[4] = epel_weights[my&3][mx&3];
1726  }
1727  }
1728 
1729  /* fixme: v/h _edge_pos */
1730  if (x + p->xblen > p->width +EDGE_WIDTH/2 ||
1731  y + p->yblen > p->height+EDGE_WIDTH/2 ||
1732  x < 0 || y < 0) {
1733  for (i = 0; i < nplanes; i++) {
1734  s->vdsp.emulated_edge_mc(s->edge_emu_buffer[i], src[i],
1735  p->stride, p->stride,
1736  p->xblen, p->yblen, x, y,
1737  p->width+EDGE_WIDTH/2, p->height+EDGE_WIDTH/2);
1738  src[i] = s->edge_emu_buffer[i];
1739  }
1740  }
1741  return (nplanes>>1) + epel;
1742 }
1743 
1744 static void add_dc(uint16_t *dst, int dc, int stride,
1745  uint8_t *obmc_weight, int xblen, int yblen)
1746 {
1747  int x, y;
1748  dc += 128;
1749 
1750  for (y = 0; y < yblen; y++) {
1751  for (x = 0; x < xblen; x += 2) {
1752  dst[x ] += dc * obmc_weight[x ];
1753  dst[x+1] += dc * obmc_weight[x+1];
1754  }
1755  dst += stride;
1756  obmc_weight += MAX_BLOCKSIZE;
1757  }
1758 }
1759 
1761  uint16_t *mctmp, uint8_t *obmc_weight,
1762  int plane, int dstx, int dsty)
1763 {
1764  Plane *p = &s->plane[plane];
1765  const uint8_t *src[5];
1766  int idx;
1767 
1768  switch (block->ref&3) {
1769  case 0: /* DC */
1770  add_dc(mctmp, block->u.dc[plane], p->stride, obmc_weight, p->xblen, p->yblen);
1771  return;
1772  case 1:
1773  case 2:
1774  idx = mc_subpel(s, block, src, dstx, dsty, (block->ref&3)-1, plane);
1775  s->put_pixels_tab[idx](s->mcscratch, src, p->stride, p->yblen);
1776  if (s->weight_func)
1777  s->weight_func(s->mcscratch, p->stride, s->weight_log2denom,
1778  s->weight[0] + s->weight[1], p->yblen);
1779  break;
1780  case 3:
1781  idx = mc_subpel(s, block, src, dstx, dsty, 0, plane);
1782  s->put_pixels_tab[idx](s->mcscratch, src, p->stride, p->yblen);
1783  idx = mc_subpel(s, block, src, dstx, dsty, 1, plane);
1784  if (s->biweight_func) {
1785  /* fixme: +32 is a quick hack */
1786  s->put_pixels_tab[idx](s->mcscratch + 32, src, p->stride, p->yblen);
1787  s->biweight_func(s->mcscratch, s->mcscratch+32, p->stride, s->weight_log2denom,
1788  s->weight[0], s->weight[1], p->yblen);
1789  } else
1790  s->avg_pixels_tab[idx](s->mcscratch, src, p->stride, p->yblen);
1791  break;
1792  }
1793  s->add_obmc(mctmp, s->mcscratch, p->stride, obmc_weight, p->yblen);
1794 }
1795 
1796 static void mc_row(DiracContext *s, DiracBlock *block, uint16_t *mctmp, int plane, int dsty)
1797 {
1798  Plane *p = &s->plane[plane];
1799  int x, dstx = p->xbsep - p->xoffset;
1800 
1801  block_mc(s, block, mctmp, s->obmc_weight[0], plane, -p->xoffset, dsty);
1802  mctmp += p->xbsep;
1803 
1804  for (x = 1; x < s->blwidth-1; x++) {
1805  block_mc(s, block+x, mctmp, s->obmc_weight[1], plane, dstx, dsty);
1806  dstx += p->xbsep;
1807  mctmp += p->xbsep;
1808  }
1809  block_mc(s, block+x, mctmp, s->obmc_weight[2], plane, dstx, dsty);
1810 }
1811 
1812 static void select_dsp_funcs(DiracContext *s, int width, int height, int xblen, int yblen)
1813 {
1814  int idx = 0;
1815  if (xblen > 8)
1816  idx = 1;
1817  if (xblen > 16)
1818  idx = 2;
1819 
1820  memcpy(s->put_pixels_tab, s->diracdsp.put_dirac_pixels_tab[idx], sizeof(s->put_pixels_tab));
1821  memcpy(s->avg_pixels_tab, s->diracdsp.avg_dirac_pixels_tab[idx], sizeof(s->avg_pixels_tab));
1822  s->add_obmc = s->diracdsp.add_dirac_obmc[idx];
1823  if (s->weight_log2denom > 1 || s->weight[0] != 1 || s->weight[1] != 1) {
1824  s->weight_func = s->diracdsp.weight_dirac_pixels_tab[idx];
1825  s->biweight_func = s->diracdsp.biweight_dirac_pixels_tab[idx];
1826  } else {
1827  s->weight_func = NULL;
1828  s->biweight_func = NULL;
1829  }
1830 }
1831 
1833 {
1834  /* chroma allocates an edge of 8 when subsampled
1835  which for 4:2:2 means an h edge of 16 and v edge of 8
1836  just use 8 for everything for the moment */
1837  int i, edge = EDGE_WIDTH/2;
1838 
1839  ref->hpel[plane][0] = ref->avframe->data[plane];
1840  s->mpvencdsp.draw_edges(ref->hpel[plane][0], ref->avframe->linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM); /* EDGE_TOP | EDGE_BOTTOM values just copied to make it build, this needs to be ensured */
1841 
1842  /* no need for hpel if we only have fpel vectors */
1843  if (!s->mv_precision)
1844  return 0;
1845 
1846  for (i = 1; i < 4; i++) {
1847  if (!ref->hpel_base[plane][i])
1848  ref->hpel_base[plane][i] = av_malloc((height+2*edge) * ref->avframe->linesize[plane] + 32);
1849  if (!ref->hpel_base[plane][i]) {
1850  return AVERROR(ENOMEM);
1851  }
1852  /* we need to be 16-byte aligned even for chroma */
1853  ref->hpel[plane][i] = ref->hpel_base[plane][i] + edge*ref->avframe->linesize[plane] + 16;
1854  }
1855 
1856  if (!ref->interpolated[plane]) {
1857  s->diracdsp.dirac_hpel_filter(ref->hpel[plane][1], ref->hpel[plane][2],
1858  ref->hpel[plane][3], ref->hpel[plane][0],
1859  ref->avframe->linesize[plane], width, height);
1860  s->mpvencdsp.draw_edges(ref->hpel[plane][1], ref->avframe->linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM);
1861  s->mpvencdsp.draw_edges(ref->hpel[plane][2], ref->avframe->linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM);
1862  s->mpvencdsp.draw_edges(ref->hpel[plane][3], ref->avframe->linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM);
1863  }
1864  ref->interpolated[plane] = 1;
1865 
1866  return 0;
1867 }
1868 
1869 /**
1870  * Dirac Specification ->
1871  * 13.0 Transform data syntax. transform_data()
1872  */
1874 {
1875  DWTContext d;
1876  int y, i, comp, dsty;
1877  int ret;
1878 
1879  if (s->low_delay) {
1880  /* [DIRAC_STD] 13.5.1 low_delay_transform_data() */
1881  if (!s->hq_picture) {
1882  for (comp = 0; comp < 3; comp++) {
1883  Plane *p = &s->plane[comp];
1884  memset(p->idwt.buf, 0, p->idwt.stride * p->idwt.height);
1885  }
1886  }
1887  if (!s->zero_res) {
1888  if ((ret = decode_lowdelay(s)) < 0)
1889  return ret;
1890  }
1891  }
1892 
1893  for (comp = 0; comp < 3; comp++) {
1894  Plane *p = &s->plane[comp];
1895  uint8_t *frame = s->current_picture->avframe->data[comp];
1896 
1897  /* FIXME: small resolutions */
1898  for (i = 0; i < 4; i++)
1899  s->edge_emu_buffer[i] = s->edge_emu_buffer_base + i*FFALIGN(p->width, 16);
1900 
1901  if (!s->zero_res && !s->low_delay)
1902  {
1903  memset(p->idwt.buf, 0, p->idwt.stride * p->idwt.height);
1904  ret = decode_component(s, comp); /* [DIRAC_STD] 13.4.1 core_transform_data() */
1905  if (ret < 0)
1906  return ret;
1907  }
1908  ret = ff_spatial_idwt_init(&d, &p->idwt, s->wavelet_idx+2,
1909  s->wavelet_depth, s->bit_depth);
1910  if (ret < 0)
1911  return ret;
1912 
1913  if (!s->num_refs) { /* intra */
1914  for (y = 0; y < p->height; y += 16) {
1915  int idx = (s->bit_depth - 8) >> 1;
1916  ff_spatial_idwt_slice2(&d, y+16); /* decode */
1917  s->diracdsp.put_signed_rect_clamped[idx](frame + y*p->stride,
1918  p->stride,
1919  p->idwt.buf + y*p->idwt.stride,
1920  p->idwt.stride, p->width, 16);
1921  }
1922  } else { /* inter */
1923  int rowheight = p->ybsep*p->stride;
1924 
1925  select_dsp_funcs(s, p->width, p->height, p->xblen, p->yblen);
1926 
1927  for (i = 0; i < s->num_refs; i++) {
1928  int ret = interpolate_refplane(s, s->ref_pics[i], comp, p->width, p->height);
1929  if (ret < 0)
1930  return ret;
1931  }
1932 
1933  memset(s->mctmp, 0, 4*p->yoffset*p->stride);
1934 
1935  dsty = -p->yoffset;
1936  for (y = 0; y < s->blheight; y++) {
1937  int h = 0,
1938  start = FFMAX(dsty, 0);
1939  uint16_t *mctmp = s->mctmp + y*rowheight;
1940  DiracBlock *blocks = s->blmotion + y*s->blwidth;
1941 
1942  init_obmc_weights(s, p, y);
1943 
1944  if (y == s->blheight-1 || start+p->ybsep > p->height)
1945  h = p->height - start;
1946  else
1947  h = p->ybsep - (start - dsty);
1948  if (h < 0)
1949  break;
1950 
1951  memset(mctmp+2*p->yoffset*p->stride, 0, 2*rowheight);
1952  mc_row(s, blocks, mctmp, comp, dsty);
1953 
1954  mctmp += (start - dsty)*p->stride + p->xoffset;
1955  ff_spatial_idwt_slice2(&d, start + h); /* decode */
1956  /* NOTE: add_rect_clamped hasn't been templated hence the shifts.
1957  * idwt.stride is passed as pixels, not in bytes as in the rest of the decoder */
1958  s->diracdsp.add_rect_clamped(frame + start*p->stride, mctmp, p->stride,
1959  (int16_t*)(p->idwt.buf) + start*(p->idwt.stride >> 1), (p->idwt.stride >> 1), p->width, h);
1960 
1961  dsty += p->ybsep;
1962  }
1963  }
1964  }
1965 
1966 
1967  return 0;
1968 }
1969 
1971 {
1972  int ret, i;
1973  int chroma_x_shift, chroma_y_shift;
1974  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &chroma_x_shift,
1975  &chroma_y_shift);
1976  if (ret < 0)
1977  return ret;
1978 
1979  f->width = avctx->width + 2 * EDGE_WIDTH;
1980  f->height = avctx->height + 2 * EDGE_WIDTH + 2;
1981  ret = ff_get_buffer(avctx, f, flags);
1982  if (ret < 0)
1983  return ret;
1984 
1985  for (i = 0; f->data[i]; i++) {
1986  int offset = (EDGE_WIDTH >> (i && i<3 ? chroma_y_shift : 0)) *
1987  f->linesize[i] + 32;
1988  f->data[i] += offset;
1989  }
1990  f->width = avctx->width;
1991  f->height = avctx->height;
1992 
1993  return 0;
1994 }
1995 
1996 /**
1997  * Dirac Specification ->
1998  * 11.1.1 Picture Header. picture_header()
1999  */
2001 {
2002  unsigned retire, picnum;
2003  int i, j, ret;
2004  int64_t refdist, refnum;
2005  GetBitContext *gb = &s->gb;
2006 
2007  /* [DIRAC_STD] 11.1.1 Picture Header. picture_header() PICTURE_NUM */
2008  picnum = s->current_picture->avframe->display_picture_number = get_bits_long(gb, 32);
2009 
2010 
2011  av_log(s->avctx,AV_LOG_DEBUG,"PICTURE_NUM: %d\n",picnum);
2012 
2013  /* if this is the first keyframe after a sequence header, start our
2014  reordering from here */
2015  if (s->frame_number < 0)
2016  s->frame_number = picnum;
2017 
2018  s->ref_pics[0] = s->ref_pics[1] = NULL;
2019  for (i = 0; i < s->num_refs; i++) {
2020  refnum = (picnum + dirac_get_se_golomb(gb)) & 0xFFFFFFFF;
2021  refdist = INT64_MAX;
2022 
2023  /* find the closest reference to the one we want */
2024  /* Jordi: this is needed if the referenced picture hasn't yet arrived */
2025  for (j = 0; j < MAX_REFERENCE_FRAMES && refdist; j++)
2026  if (s->ref_frames[j]
2027  && FFABS(s->ref_frames[j]->avframe->display_picture_number - refnum) < refdist) {
2028  s->ref_pics[i] = s->ref_frames[j];
2029  refdist = FFABS(s->ref_frames[j]->avframe->display_picture_number - refnum);
2030  }
2031 
2032  if (!s->ref_pics[i] || refdist)
2033  av_log(s->avctx, AV_LOG_DEBUG, "Reference not found\n");
2034 
2035  /* if there were no references at all, allocate one */
2036  if (!s->ref_pics[i])
2037  for (j = 0; j < MAX_FRAMES; j++)
2038  if (!s->all_frames[j].avframe->data[0]) {
2039  s->ref_pics[i] = &s->all_frames[j];
2040  ret = get_buffer_with_edge(s->avctx, s->ref_pics[i]->avframe, AV_GET_BUFFER_FLAG_REF);
2041  if (ret < 0)
2042  return ret;
2043  break;
2044  }
2045 
2046  if (!s->ref_pics[i]) {
2047  av_log(s->avctx, AV_LOG_ERROR, "Reference could not be allocated\n");
2048  return AVERROR_INVALIDDATA;
2049  }
2050 
2051  }
2052 
2053  /* retire the reference frames that are not used anymore */
2054  if (s->current_picture->reference) {
2055  retire = (picnum + dirac_get_se_golomb(gb)) & 0xFFFFFFFF;
2056  if (retire != picnum) {
2057  DiracFrame *retire_pic = remove_frame(s->ref_frames, retire);
2058 
2059  if (retire_pic)
2060  retire_pic->reference &= DELAYED_PIC_REF;
2061  else
2062  av_log(s->avctx, AV_LOG_DEBUG, "Frame to retire not found\n");
2063  }
2064 
2065  /* if reference array is full, remove the oldest as per the spec */
2066  while (add_frame(s->ref_frames, MAX_REFERENCE_FRAMES, s->current_picture)) {
2067  av_log(s->avctx, AV_LOG_ERROR, "Reference frame overflow\n");
2068  remove_frame(s->ref_frames, s->ref_frames[0]->avframe->display_picture_number)->reference &= DELAYED_PIC_REF;
2069  }
2070  }
2071 
2072  if (s->num_refs) {
2073  ret = dirac_unpack_prediction_parameters(s); /* [DIRAC_STD] 11.2 Picture Prediction Data. picture_prediction() */
2074  if (ret < 0)
2075  return ret;
2076  ret = dirac_unpack_block_motion_data(s); /* [DIRAC_STD] 12. Block motion data syntax */
2077  if (ret < 0)
2078  return ret;
2079  }
2080  ret = dirac_unpack_idwt_params(s); /* [DIRAC_STD] 11.3 Wavelet transform data */
2081  if (ret < 0)
2082  return ret;
2083 
2084  init_planes(s);
2085  return 0;
2086 }
2087 
2088 static int get_delayed_pic(DiracContext *s, AVFrame *picture, int *got_frame)
2089 {
2090  DiracFrame *out = s->delay_frames[0];
2091  int i, out_idx = 0;
2092  int ret;
2093 
2094  /* find frame with lowest picture number */
2095  for (i = 1; s->delay_frames[i]; i++)
2096  if (s->delay_frames[i]->avframe->display_picture_number < out->avframe->display_picture_number) {
2097  out = s->delay_frames[i];
2098  out_idx = i;
2099  }
2100 
2101  for (i = out_idx; s->delay_frames[i]; i++)
2102  s->delay_frames[i] = s->delay_frames[i+1];
2103 
2104  if (out) {
2105  out->reference ^= DELAYED_PIC_REF;
2106  if((ret = av_frame_ref(picture, out->avframe)) < 0)
2107  return ret;
2108  *got_frame = 1;
2109  }
2110 
2111  return 0;
2112 }
2113 
2114 /**
2115  * Dirac Specification ->
2116  * 9.6 Parse Info Header Syntax. parse_info()
2117  * 4 byte start code + byte parse code + 4 byte size + 4 byte previous size
2118  */
2119 #define DATA_UNIT_HEADER_SIZE 13
2120 
2121 /* [DIRAC_STD] dirac_decode_data_unit makes reference to the while defined in 9.3
2122  inside the function parse_sequence() */
2123 static int dirac_decode_data_unit(AVCodecContext *avctx, const uint8_t *buf, int size)
2124 {
2125  DiracContext *s = avctx->priv_data;
2126  DiracFrame *pic = NULL;
2127  AVDiracSeqHeader *dsh;
2128  int ret, i;
2129  uint8_t parse_code;
2130  unsigned tmp;
2131 
2133  return AVERROR_INVALIDDATA;
2134 
2135  parse_code = buf[4];
2136 
2137  init_get_bits(&s->gb, &buf[13], 8*(size - DATA_UNIT_HEADER_SIZE));
2138 
2139  if (parse_code == DIRAC_PCODE_SEQ_HEADER) {
2140  if (s->seen_sequence_header)
2141  return 0;
2142 
2143  /* [DIRAC_STD] 10. Sequence header */
2145  if (ret < 0) {
2146  av_log(avctx, AV_LOG_ERROR, "error parsing sequence header");
2147  return ret;
2148  }
2149 
2150  if (CALC_PADDING((int64_t)dsh->width, MAX_DWT_LEVELS) * CALC_PADDING((int64_t)dsh->height, MAX_DWT_LEVELS) * 5LL > avctx->max_pixels)
2151  ret = AVERROR(ERANGE);
2152  if (ret >= 0)
2153  ret = ff_set_dimensions(avctx, dsh->width, dsh->height);
2154  if (ret < 0) {
2155  av_freep(&dsh);
2156  return ret;
2157  }
2158 
2159  ff_set_sar(avctx, dsh->sample_aspect_ratio);
2160  avctx->pix_fmt = dsh->pix_fmt;
2161  avctx->color_range = dsh->color_range;
2162  avctx->color_trc = dsh->color_trc;
2163  avctx->color_primaries = dsh->color_primaries;
2164  avctx->colorspace = dsh->colorspace;
2165  avctx->profile = dsh->profile;
2166  avctx->level = dsh->level;
2167  avctx->framerate = dsh->framerate;
2168  s->bit_depth = dsh->bit_depth;
2169  s->version.major = dsh->version.major;
2170  s->version.minor = dsh->version.minor;
2171  s->seq = *dsh;
2172  av_freep(&dsh);
2173 
2174  s->pshift = s->bit_depth > 8;
2175 
2177  &s->chroma_x_shift,
2178  &s->chroma_y_shift);
2179  if (ret < 0)
2180  return ret;
2181 
2183  if (ret < 0)
2184  return ret;
2185 
2186  s->seen_sequence_header = 1;
2187  } else if (parse_code == DIRAC_PCODE_END_SEQ) { /* [DIRAC_STD] End of Sequence */
2189  s->seen_sequence_header = 0;
2190  } else if (parse_code == DIRAC_PCODE_AUX) {
2191  if (buf[13] == 1) { /* encoder implementation/version */
2192  int ver[3];
2193  /* versions older than 1.0.8 don't store quant delta for
2194  subbands with only one codeblock */
2195  if (sscanf(buf+14, "Schroedinger %d.%d.%d", ver, ver+1, ver+2) == 3)
2196  if (ver[0] == 1 && ver[1] == 0 && ver[2] <= 7)
2197  s->old_delta_quant = 1;
2198  }
2199  } else if (parse_code & 0x8) { /* picture data unit */
2200  if (!s->seen_sequence_header) {
2201  av_log(avctx, AV_LOG_DEBUG, "Dropping frame without sequence header\n");
2202  return AVERROR_INVALIDDATA;
2203  }
2204 
2205  /* find an unused frame */
2206  for (i = 0; i < MAX_FRAMES; i++)
2207  if (s->all_frames[i].avframe->data[0] == NULL)
2208  pic = &s->all_frames[i];
2209  if (!pic) {
2210  av_log(avctx, AV_LOG_ERROR, "framelist full\n");
2211  return AVERROR_INVALIDDATA;
2212  }
2213 
2214  av_frame_unref(pic->avframe);
2215 
2216  /* [DIRAC_STD] Defined in 9.6.1 ... */
2217  tmp = parse_code & 0x03; /* [DIRAC_STD] num_refs() */
2218  if (tmp > 2) {
2219  av_log(avctx, AV_LOG_ERROR, "num_refs of 3\n");
2220  return AVERROR_INVALIDDATA;
2221  }
2222  s->num_refs = tmp;
2223  s->is_arith = (parse_code & 0x48) == 0x08; /* [DIRAC_STD] using_ac() */
2224  s->low_delay = (parse_code & 0x88) == 0x88; /* [DIRAC_STD] is_low_delay() */
2225  s->core_syntax = (parse_code & 0x88) == 0x08; /* [DIRAC_STD] is_core_syntax() */
2226  s->ld_picture = (parse_code & 0xF8) == 0xC8; /* [DIRAC_STD] is_ld_picture() */
2227  s->hq_picture = (parse_code & 0xF8) == 0xE8; /* [DIRAC_STD] is_hq_picture() */
2228  s->dc_prediction = (parse_code & 0x28) == 0x08; /* [DIRAC_STD] using_dc_prediction() */
2229  pic->reference = (parse_code & 0x0C) == 0x0C; /* [DIRAC_STD] is_reference() */
2230  pic->avframe->key_frame = s->num_refs == 0; /* [DIRAC_STD] is_intra() */
2231  pic->avframe->pict_type = s->num_refs + 1; /* Definition of AVPictureType in avutil.h */
2232 
2233  /* VC-2 Low Delay has a different parse code than the Dirac Low Delay */
2234  if (s->version.minor == 2 && parse_code == 0x88)
2235  s->ld_picture = 1;
2236 
2237  if (s->low_delay && !(s->ld_picture || s->hq_picture) ) {
2238  av_log(avctx, AV_LOG_ERROR, "Invalid low delay flag\n");
2239  return AVERROR_INVALIDDATA;
2240  }
2241 
2242  if ((ret = get_buffer_with_edge(avctx, pic->avframe, (parse_code & 0x0C) == 0x0C ? AV_GET_BUFFER_FLAG_REF : 0)) < 0)
2243  return ret;
2244  s->current_picture = pic;
2245  s->plane[0].stride = pic->avframe->linesize[0];
2246  s->plane[1].stride = pic->avframe->linesize[1];
2247  s->plane[2].stride = pic->avframe->linesize[2];
2248 
2249  if (alloc_buffers(s, FFMAX3(FFABS(s->plane[0].stride), FFABS(s->plane[1].stride), FFABS(s->plane[2].stride))) < 0)
2250  return AVERROR(ENOMEM);
2251 
2252  /* [DIRAC_STD] 11.1 Picture parse. picture_parse() */
2254  if (ret < 0)
2255  return ret;
2256 
2257  /* [DIRAC_STD] 13.0 Transform data syntax. transform_data() */
2259  if (ret < 0)
2260  return ret;
2261  }
2262  return 0;
2263 }
2264 
2265 static int dirac_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *pkt)
2266 {
2267  DiracContext *s = avctx->priv_data;
2268  AVFrame *picture = data;
2269  uint8_t *buf = pkt->data;
2270  int buf_size = pkt->size;
2271  int i, buf_idx = 0;
2272  int ret;
2273  unsigned data_unit_size;
2274 
2275  /* release unused frames */
2276  for (i = 0; i < MAX_FRAMES; i++)
2277  if (s->all_frames[i].avframe->data[0] && !s->all_frames[i].reference) {
2278  av_frame_unref(s->all_frames[i].avframe);
2279  memset(s->all_frames[i].interpolated, 0, sizeof(s->all_frames[i].interpolated));
2280  }
2281 
2282  s->current_picture = NULL;
2283  *got_frame = 0;
2284 
2285  /* end of stream, so flush delayed pics */
2286  if (buf_size == 0)
2287  return get_delayed_pic(s, (AVFrame *)data, got_frame);
2288 
2289  for (;;) {
2290  /*[DIRAC_STD] Here starts the code from parse_info() defined in 9.6
2291  [DIRAC_STD] PARSE_INFO_PREFIX = "BBCD" as defined in ISO/IEC 646
2292  BBCD start code search */
2293  for (; buf_idx + DATA_UNIT_HEADER_SIZE < buf_size; buf_idx++) {
2294  if (buf[buf_idx ] == 'B' && buf[buf_idx+1] == 'B' &&
2295  buf[buf_idx+2] == 'C' && buf[buf_idx+3] == 'D')
2296  break;
2297  }
2298  /* BBCD found or end of data */
2299  if (buf_idx + DATA_UNIT_HEADER_SIZE >= buf_size)
2300  break;
2301 
2302  data_unit_size = AV_RB32(buf+buf_idx+5);
2303  if (data_unit_size > buf_size - buf_idx || !data_unit_size) {
2304  if(data_unit_size > buf_size - buf_idx)
2305  av_log(s->avctx, AV_LOG_ERROR,
2306  "Data unit with size %d is larger than input buffer, discarding\n",
2307  data_unit_size);
2308  buf_idx += 4;
2309  continue;
2310  }
2311  /* [DIRAC_STD] dirac_decode_data_unit makes reference to the while defined in 9.3 inside the function parse_sequence() */
2312  ret = dirac_decode_data_unit(avctx, buf+buf_idx, data_unit_size);
2313  if (ret < 0)
2314  {
2315  av_log(s->avctx, AV_LOG_ERROR,"Error in dirac_decode_data_unit\n");
2316  return ret;
2317  }
2318  buf_idx += data_unit_size;
2319  }
2320 
2321  if (!s->current_picture)
2322  return buf_size;
2323 
2324  if (s->current_picture->avframe->display_picture_number > s->frame_number) {
2325  DiracFrame *delayed_frame = remove_frame(s->delay_frames, s->frame_number);
2326 
2327  s->current_picture->reference |= DELAYED_PIC_REF;
2328 
2329  if (add_frame(s->delay_frames, MAX_DELAY, s->current_picture)) {
2330  int min_num = s->delay_frames[0]->avframe->display_picture_number;
2331  /* Too many delayed frames, so we display the frame with the lowest pts */
2332  av_log(avctx, AV_LOG_ERROR, "Delay frame overflow\n");
2333 
2334  for (i = 1; s->delay_frames[i]; i++)
2335  if (s->delay_frames[i]->avframe->display_picture_number < min_num)
2336  min_num = s->delay_frames[i]->avframe->display_picture_number;
2337 
2338  delayed_frame = remove_frame(s->delay_frames, min_num);
2339  add_frame(s->delay_frames, MAX_DELAY, s->current_picture);
2340  }
2341 
2342  if (delayed_frame) {
2343  delayed_frame->reference ^= DELAYED_PIC_REF;
2344  if((ret=av_frame_ref(data, delayed_frame->avframe)) < 0)
2345  return ret;
2346  *got_frame = 1;
2347  }
2348  } else if (s->current_picture->avframe->display_picture_number == s->frame_number) {
2349  /* The right frame at the right time :-) */
2350  if((ret=av_frame_ref(data, s->current_picture->avframe)) < 0)
2351  return ret;
2352  *got_frame = 1;
2353  }
2354 
2355  if (*got_frame)
2356  s->frame_number = picture->display_picture_number + 1LL;
2357 
2358  return buf_idx;
2359 }
2360 
2362  .name = "dirac",
2363  .long_name = NULL_IF_CONFIG_SMALL("BBC Dirac VC-2"),
2364  .type = AVMEDIA_TYPE_VIDEO,
2365  .id = AV_CODEC_ID_DIRAC,
2366  .priv_data_size = sizeof(DiracContext),
2368  .close = dirac_decode_end,
2371  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
2373 };
DWTPlane::buf
uint8_t * buf
Definition: dirac_dwt.h:41
DATA_UNIT_HEADER_SIZE
#define DATA_UNIT_HEADER_SIZE
Dirac Specification -> 9.6 Parse Info Header Syntax.
Definition: diracdec.c:2119
AVCodec
AVCodec.
Definition: avcodec.h:3481
DiracContext::put_pixels_tab
void(* put_pixels_tab[4])(uint8_t *dst, const uint8_t *src[5], int stride, int h)
Definition: diracdec.c:228
DiracContext::blmotion
DiracBlock * blmotion
Definition: diracdec.c:217
av_dirac_parse_sequence_header
int av_dirac_parse_sequence_header(AVDiracSeqHeader **pdsh, const uint8_t *buf, size_t buf_size, void *log_ctx)
Parse a Dirac sequence header.
Definition: dirac.c:402
stride
int stride
Definition: mace.c:144
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:291
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
DiracContext::num_y
unsigned num_y
Definition: diracdec.c:174
ff_dirac_golomb_read_32bit
int ff_dirac_golomb_read_32bit(DiracGolombLUT *lut_ctx, const uint8_t *buf, int bytes, uint8_t *_dst, int coeffs)
Definition: dirac_vlc.c:42
level
uint8_t level
Definition: svq3.c:207
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
DiracContext::globalmc
struct DiracContext::@78 globalmc[2]
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
DiracContext::blwidth
int blwidth
Definition: diracdec.c:211
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
DiracVersionInfo
Definition: dirac.h:76
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:2193
SliceCoeffs::left
int left
Definition: diracdec.c:817
out
FILE * out
Definition: movenc.c:54
FFSWAP
#define FFSWAP(type, a, b)
Definition: common.h:99
EDGE_BOTTOM
#define EDGE_BOTTOM
Definition: mpegvideoencdsp.h:30
comp
static void comp(unsigned char *dst, ptrdiff_t dst_stride, unsigned char *src, ptrdiff_t src_stride, int add)
Definition: eamad.c:83
n
int n
Definition: avisynth_c.h:760
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:252
thread.h
DiracBlock::ref
uint8_t ref
Definition: diracdec.c:87
subband_hh
@ subband_hh
Definition: diracdec.c:246
CTX_MV_DATA
#define CTX_MV_DATA
Definition: dirac_arith.h:71
MAX_DWT_LEVELS
#define MAX_DWT_LEVELS
The spec limits the number of wavelet decompositions to 4 for both level 1 (VC-2) and 128 (long-gop d...
Definition: dirac.h:45
free_sequence_buffers
static void free_sequence_buffers(DiracContext *s)
Definition: diracdec.c:352
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:546
epel_weights
static const uint8_t epel_weights[4][4][4]
Definition: diracdec.c:1622
AV_CODEC_ID_DIRAC
@ AV_CODEC_ID_DIRAC
Definition: avcodec.h:334
dirac_decode_picture_header
static int dirac_decode_picture_header(DiracContext *s)
Dirac Specification -> 11.1.1 Picture Header.
Definition: diracdec.c:2000
SliceCoeffs::tot
int tot
Definition: diracdec.c:821
mv
static const int8_t mv[256][2]
Definition: 4xm.c:77
DiracContext::wavelet_idx
unsigned wavelet_idx
Definition: diracdec.c:164
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
dirac_unpack_prediction_parameters
static int dirac_unpack_prediction_parameters(DiracContext *s)
Unpack the motion compensation parameters Dirac Specification -> 11.2 Picture prediction data.
Definition: diracdec.c:1106
DIRAC_REF_MASK_REF1
#define DIRAC_REF_MASK_REF1
DiracBlock->ref flags, if set then the block does MC from the given ref.
Definition: diracdec.c:59
DiracContext::avctx
AVCodecContext * avctx
Definition: diracdec.c:135
get_interleaved_ue_golomb
static unsigned get_interleaved_ue_golomb(GetBitContext *gb)
Definition: golomb.h:143
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
AVDiracSeqHeader::color_trc
enum AVColorTransferCharacteristic color_trc
Definition: dirac.h:109
DiracVersionInfo::major
int major
Definition: dirac.h:77
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:26
pixdesc.h
SubBand::stride
int stride
Definition: diracdec.c:93
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
DWTPlane
Definition: dirac_dwt.h:37
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:2186
w
uint8_t w
Definition: llviddspenc.c:38
internal.h
AVPacket::data
uint8_t * data
Definition: avcodec.h:1477
SubBand::width
int width
Definition: cfhd.h:49
SubBand::level
int level
Definition: cfhd.h:45
DiracContext::biweight_func
dirac_biweight_func biweight_func
Definition: diracdec.c:232
CTX_SB_F1
#define CTX_SB_F1
Definition: dirac_arith.h:65
CTX_ZERO_BLOCK
@ CTX_ZERO_BLOCK
Definition: dirac_arith.h:54
b
#define b
Definition: input.c:41
data
const char data[16]
Definition: mxf.c:91
DiracContext::perspective_exp
unsigned perspective_exp
Definition: diracdec.c:203
DiracContext::bit_depth
int bit_depth
Definition: diracdec.c:149
ff_dirac_decoder
AVCodec ff_dirac_decoder
Definition: diracdec.c:2361
decode_lowdelay_slice
static int decode_lowdelay_slice(AVCodecContext *avctx, void *arg)
Dirac Specification -> 13.5.2 Slices.
Definition: diracdec.c:778
DiracContext::mpvencdsp
MpegvideoEncDSPContext mpvencdsp
Definition: diracdec.c:136
av_mallocz_array
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:191
dirac_biweight_func
void(* dirac_biweight_func)(uint8_t *dst, const uint8_t *src, int stride, int log2_denom, int weightd, int weights, int h)
Definition: diracdsp.h:28
init_planes
static void init_planes(DiracContext *s)
Definition: diracdec.c:1052
dirac_dwt.h
mpegvideo.h
DIRAC_REF_MASK_GLOBAL
#define DIRAC_REF_MASK_GLOBAL
Definition: diracdec.c:61
dirac_arith_init
static AVOnce dirac_arith_init
Definition: diracdec.c:384
DiracContext::delay_frames
DiracFrame * delay_frames[MAX_DELAY+1]
Definition: diracdec.c:238
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:71
ff_dirac_qscale_tab
const int32_t ff_dirac_qscale_tab[116]
Definition: diractab.c:34
DiracContext::lowdelay
struct DiracContext::@76 lowdelay
AVDiracSeqHeader::color_range
enum AVColorRange color_range
Definition: dirac.h:107
MAX_DELAY
#define MAX_DELAY
Definition: diracdec.c:51
DiracArith
Definition: dirac_arith.h:75
dirac_get_arith_int
static int dirac_get_arith_int(DiracArith *c, int follow_ctx, int data_ctx)
Definition: dirac_arith.h:191
codeblock
static int codeblock(DiracContext *s, SubBand *b, GetBitContext *gb, DiracArith *c, int left, int right, int top, int bottom, int blockcnt_one, int is_arith)
Decode the coeffs in the rectangle defined by left, right, top, bottom [DIRAC_STD] 13....
Definition: diracdec.c:491
CHECKEDREAD
#define CHECKEDREAD(dst, cond, errmsg)
DiracContext::mcscratch
uint8_t * mcscratch
Definition: diracdec.c:223
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
AVDiracSeqHeader::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: dirac.h:104
DiracContext::current_picture
DiracFrame * current_picture
Definition: diracdec.c:234
alloc_buffers
static int alloc_buffers(DiracContext *s, int stride)
Definition: diracdec.c:323
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
ff_dirac_golomb_reader_init
av_cold int ff_dirac_golomb_reader_init(DiracGolombLUT **lut_ctx)
Definition: dirac_vlc.c:232
DiracContext::zrs
int zrs[2][2]
Definition: diracdec.c:200
diractab.h
ff_dirac_default_qmat
const uint8_t ff_dirac_default_qmat[7][4][4]
Definition: diractab.c:24
A
#define A(x)
Definition: vp56_arith.h:28
DiracFrame
Definition: diracdec.c:74
CTX_DC_F1
#define CTX_DC_F1
Definition: dirac_arith.h:72
ff_mpegvideoencdsp_init
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
Definition: mpegvideoencdsp.c:232
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:3105
golomb.h
exp golomb vlc stuff
decode_subband_arith
static int decode_subband_arith(AVCodecContext *avctx, void *b)
Definition: diracdec.c:643
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
AVDiracSeqHeader::level
int level
Definition: dirac.h:101
SubBand::parent
struct SubBand * parent
Definition: diracdec.c:99
subband_lh
@ subband_lh
Definition: diracdec.c:245
DiracContext::ld_picture
int ld_picture
Definition: diracdec.c:157
DiracContext::edge_emu_buffer
uint8_t * edge_emu_buffer[4]
Definition: diracdec.c:219
AVDiracSeqHeader::version
DiracVersionInfo version
Definition: dirac.h:112
DiracContext::num_refs
int num_refs
Definition: diracdec.c:160
ff_spatial_idwt_init
int ff_spatial_idwt_init(DWTContext *d, DWTPlane *p, enum dwt_type type, int decomposition_count, int bit_depth)
Definition: dirac_dwt.c:36
U
#define U(x)
Definition: vp56_arith.h:37
DiracContext::sbheight
int sbheight
Definition: diracdec.c:214
start
void INT64 start
Definition: avisynth_c.h:767
AVCodecContext::thread_count
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:2824
FFSIGN
#define FFSIGN(a)
Definition: common.h:73
plane
int plane
Definition: avisynth_c.h:384
GetBitContext
Definition: get_bits.h:61
DiracContext::blheight
int blheight
Definition: diracdec.c:212
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:373
Plane::band
SubBand band[DWT_LEVELS][4]
Definition: cfhd.h:69
dirac_unpack_block_motion_data
static int dirac_unpack_block_motion_data(DiracContext *s)
Dirac Specification ->
Definition: diracdec.c:1504
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2550
decode_component
static int decode_component(DiracContext *s, int comp)
Dirac Specification -> [DIRAC_STD] 13.4.1 core_transform_data()
Definition: diracdec.c:660
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
DiracSlice::gb
GetBitContext gb
Definition: diracdec.c:128
pred_sbsplit
static int pred_sbsplit(uint8_t *sbsplit, int stride, int x, int y)
Definition: diracdec.c:1330
src
#define src
Definition: vp8dsp.c:254
pred_block_dc
static void pred_block_dc(DiracBlock *block, int stride, int x, int y)
Definition: diracdec.c:1360
dirac.h
select_dsp_funcs
static void select_dsp_funcs(DiracContext *s, int width, int height, int xblen, int yblen)
Definition: diracdec.c:1812
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
diracdsp.h
DiracContext::hq_picture
int hq_picture
Definition: diracdec.c:156
DiracSlice::bytes
int bytes
Definition: diracdec.c:131
dirac_weight_func
void(* dirac_weight_func)(uint8_t *block, int stride, int log2_denom, int weight, int h)
Definition: diracdsp.h:27
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:2179
DiracContext::perspective
int perspective[2]
Definition: diracdec.c:201
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:162
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
AVDiracSeqHeader::bit_depth
int bit_depth
Definition: dirac.h:113
buf
void * buf
Definition: avisynth_c.h:766
av_cold
#define av_cold
Definition: attributes.h:84
DiracContext::sbwidth
int sbwidth
Definition: diracdec.c:213
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
coeff_unpack_golomb
static int coeff_unpack_golomb(GetBitContext *gb, int qfactor, int qoffset)
Definition: diracdec.c:443
DiracContext::chroma_y_shift
int chroma_y_shift
Definition: diracdec.c:147
mask
static const uint16_t mask[17]
Definition: lzw.c:38
ROLLOFF
#define ROLLOFF(i)
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:42
width
#define width
FFMAX3
#define FFMAX3(a, b, c)
Definition: common.h:95
s
#define s(width, name)
Definition: cbs_vp9.c:257
DiracSlice::slice_x
int slice_x
Definition: diracdec.c:129
DiracContext::zero_res
int zero_res
Definition: diracdec.c:152
DiracContext::mctmp
uint16_t * mctmp
Definition: diracdec.c:222
Plane::xbsep
uint8_t xbsep
Definition: diracdec.c:117
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1176
MAX_REFERENCE_FRAMES
#define MAX_REFERENCE_FRAMES
The spec limits this to 3 for frame coding, but in practice can be as high as 6.
Definition: diracdec.c:50
ff_dirac_init_arith_decoder
void ff_dirac_init_arith_decoder(DiracArith *c, GetBitContext *gb, int length)
Definition: dirac_arith.c:96
decode_subband_internal
static av_always_inline int decode_subband_internal(DiracContext *s, SubBand *b, int is_arith)
Dirac Specification -> 13.4.2 Non-skipped subbands.
Definition: diracdec.c:601
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AVDiracSeqHeader::profile
int profile
Definition: dirac.h:100
CTX_DELTA_Q_F
@ CTX_DELTA_Q_F
Definition: dirac_arith.h:55
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
DiracContext::version
DiracVersionInfo version
Definition: diracdec.c:140
DiracContext::reader_ctx
DiracGolombLUT * reader_ctx
Definition: diracdec.c:139
get_bits.h
DiracContext::size_scaler
uint64_t size_scaler
Definition: diracdec.c:195
DWTPlane::stride
int stride
Definition: dirac_dwt.h:40
AVCodecContext::max_pixels
int64_t max_pixels
The number of pixels per image to maximally accept.
Definition: avcodec.h:3292
SubBand::ibuf
uint8_t * ibuf
Definition: cfhd.h:54
Plane::idwt
DWTPlane idwt
Definition: diracdec.c:107
bands
static const float bands[]
Definition: af_superequalizer.c:56
DiracContext::seen_sequence_header
int seen_sequence_header
Definition: diracdec.c:143
DiracContext::diracdsp
DiracDSPContext diracdsp
Definition: diracdec.c:138
f
#define f(width, name)
Definition: cbs_vp9.c:255
int32_t
int32_t
Definition: audio_convert.c:194
arg
const char * arg
Definition: jacosubdec.c:66
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
if
if(ret)
Definition: filter_design.txt:179
ff_dirac_golomb_reader_end
av_cold void ff_dirac_golomb_reader_end(DiracGolombLUT **lut_ctx)
Definition: dirac_vlc.c:249
DiracContext::weight
int16_t weight[2]
Definition: diracdec.c:208
AVDiracSeqHeader::framerate
AVRational framerate
Definition: dirac.h:103
av_realloc_f
#define av_realloc_f(p, o, n)
Definition: tableprint_vlc.h:33
DiracContext::slice_params_buf
DiracSlice * slice_params_buf
Definition: diracdec.c:180
DiracContext::edge_emu_buffer_base
uint8_t * edge_emu_buffer_base
Definition: diracdec.c:220
dirac_get_arith_bit
static int dirac_get_arith_bit(DiracArith *c, int ctx)
Definition: dirac_arith.h:134
mc_row
static void mc_row(DiracContext *s, DiracBlock *block, uint16_t *mctmp, int plane, int dsty)
Definition: diracdec.c:1796
ff_dirac_qoffset_inter_tab
const int ff_dirac_qoffset_inter_tab[122]
Definition: diractab.c:72
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:160
DIVRNDUP
#define DIVRNDUP(a, b)
Definition: diracdec.c:72
decode_hq_slice_row
static int decode_hq_slice_row(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
Definition: diracdec.c:919
DiracContext::weight_func
dirac_weight_func weight_func
Definition: diracdec.c:231
flush
static void flush(AVCodecContext *avctx)
Definition: aacdec_template.c:500
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
dirac_decode_frame_internal
static int dirac_decode_frame_internal(DiracContext *s)
Dirac Specification -> 13.0 Transform data syntax.
Definition: diracdec.c:1873
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2200
SliceCoeffs::tot_v
int tot_v
Definition: diracdec.c:820
decode_lowdelay
static int decode_lowdelay(DiracContext *s)
Dirac Specification -> 13.5.1 low_delay_transform_data()
Definition: diracdec.c:934
EDGE_WIDTH
#define EDGE_WIDTH
Definition: mpegpicture.h:33
DiracContext::dc_prediction
int dc_prediction
Definition: diracdec.c:158
DiracContext::wavelet_depth
unsigned wavelet_depth
Definition: diracdec.c:163
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AVDiracSeqHeader::colorspace
enum AVColorSpace colorspace
Definition: dirac.h:110
DiracSlice::slice_y
int slice_y
Definition: diracdec.c:130
DiracContext::ref_frames
DiracFrame * ref_frames[MAX_REFERENCE_FRAMES+1]
Definition: diracdec.c:237
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
DiracContext::old_delta_quant
unsigned old_delta_quant
schroedinger older than 1.0.8 doesn't store quant delta if only one codebook exists in a band
Definition: diracdec.c:170
dirac_get_arith_uint
static int dirac_get_arith_uint(DiracArith *c, int follow_ctx, int data_ctx)
Definition: dirac_arith.h:175
CTX_MV_F1
#define CTX_MV_F1
Definition: dirac_arith.h:70
dirac_decode_frame
static int dirac_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *pkt)
Definition: diracdec.c:2265
DIRAC_MAX_QUANT_INDEX
#define DIRAC_MAX_QUANT_INDEX
Definition: diractab.h:41
DiracGolombLUT
Definition: dirac_vlc.h:34
DIRAC_PCODE_AUX
@ DIRAC_PCODE_AUX
Definition: dirac.h:60
AVCodecContext::level
int level
level
Definition: avcodec.h:3018
DiracContext::thread_buf_size
int thread_buf_size
Definition: diracdec.c:178
subband_ll
@ subband_ll
Definition: diracdec.c:243
AVOnce
#define AVOnce
Definition: thread.h:159
DiracContext::is_arith
int is_arith
Definition: diracdec.c:153
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
DiracContext::width
unsigned width
Definition: diracdec.c:184
weight
static int weight(int i, int blen, int offset)
Definition: diracdec.c:1564
error
static void error(const char *err)
Definition: target_dec_fuzzer.c:61
ff_spatial_idwt_slice2
void ff_spatial_idwt_slice2(DWTContext *d, int y)
Definition: dirac_dwt.c:67
dirac_subband
dirac_subband
Definition: diracdec.c:242
DELAYED_PIC_REF
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output.
Definition: diracdec.c:67
add_frame
static int add_frame(DiracFrame *framelist[], int maxframes, DiracFrame *frame)
Definition: diracdec.c:274
DiracContext::codeblock
struct DiracContext::@75 codeblock[MAX_DWT_LEVELS+1]
INTRA_DC_PRED
#define INTRA_DC_PRED(n, type)
Dirac Specification -> 13.3 intra_dc_prediction(band)
Definition: diracdec.c:573
dirac_decode_end
static av_cold int dirac_decode_end(AVCodecContext *avctx)
Definition: diracdec.c:426
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:378
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1965
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:981
Plane::yoffset
uint8_t yoffset
Definition: diracdec.c:121
Plane::yblen
uint8_t yblen
Definition: diracdec.c:115
AVPacket::size
int size
Definition: avcodec.h:1478
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
SubBand::stride
ptrdiff_t stride
Definition: cfhd.h:47
Plane::height
int height
Definition: cfhd.h:59
AV_WN32
#define AV_WN32(p, v)
Definition: intreadwrite.h:376
DiracContext::gb
GetBitContext gb
Definition: diracdec.c:141
init_obmc_weight_row
static void init_obmc_weight_row(Plane *p, uint8_t *obmc_weight, int stride, int left, int right, int wy)
Definition: diracdec.c:1576
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
DiracContext::codeblock_mode
unsigned codeblock_mode
Definition: diracdec.c:171
size
int size
Definition: twinvq_data.h:11134
DiracContext::chroma_x_shift
int chroma_x_shift
Definition: diracdec.c:146
SubBand::length
unsigned length
Definition: diracdec.c:102
DiracContext::seq
AVDiracSeqHeader seq
Definition: diracdec.c:142
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:92
DiracContext::bytes
AVRational bytes
Definition: diracdec.c:189
dirac_vlc.h
DiracContext::weight_log2denom
unsigned weight_log2denom
Definition: diracdec.c:209
SubBand
Definition: cfhd.h:44
DiracContext::thread_buf
uint8_t * thread_buf
Definition: diracdec.c:176
val
const char const char void * val
Definition: avisynth_c.h:863
split
static char * split(char *message, char delim)
Definition: af_channelmap.c:81
height
#define height
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
Plane::width
int width
Definition: cfhd.h:58
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: avcodec.h:1041
dirac_get_se_golomb
static int dirac_get_se_golomb(GetBitContext *gb)
Definition: golomb.h:359
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
add_dc
static void add_dc(uint16_t *dst, int dc, int stride, uint8_t *obmc_weight, int xblen, int yblen)
Definition: diracdec.c:1744
DIRAC_PCODE_SEQ_HEADER
@ DIRAC_PCODE_SEQ_HEADER
Definition: dirac.h:58
get_buffer_with_edge
static int get_buffer_with_edge(AVCodecContext *avctx, AVFrame *f, int flags)
Definition: diracdec.c:1970
pred_block_mode
static int pred_block_mode(DiracBlock *block, int stride, int x, int y, int refmask)
Definition: diracdec.c:1344
decode_hq_slice
static int decode_hq_slice(DiracContext *s, DiracSlice *slice, uint8_t *tmp_buf)
VC-2 Specification -> 13.5.3 hq_slice(sx,sy)
Definition: diracdec.c:845
subband_coeffs
static int subband_coeffs(DiracContext *s, int x, int y, int p, SliceCoeffs c[MAX_DWT_LEVELS])
Definition: diracdec.c:824
subband_nb
@ subband_nb
Definition: diracdec.c:247
init_obmc_weights
static void init_obmc_weights(DiracContext *s, Plane *p, int by)
Definition: diracdec.c:1609
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem.h:112
Plane::stride
ptrdiff_t stride
Definition: cfhd.h:60
DiracContext::num_x
unsigned num_x
Definition: diracdec.c:173
DiracContext::prefix_bytes
unsigned prefix_bytes
Definition: diracdec.c:194
MpegvideoEncDSPContext
Definition: mpegvideoencdsp.h:32
DiracBlock
Definition: diracdec.c:82
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
DiracContext::low_delay
int low_delay
Definition: diracdec.c:155
pred_mv
static void pred_mv(DiracBlock *block, int stride, int x, int y, int ref)
Definition: diracdec.c:1393
UNPACK_ARITH
#define UNPACK_ARITH(n, type)
Definition: diracdec.c:454
alloc_sequence_buffers
static int alloc_sequence_buffers(DiracContext *s)
Definition: diracdec.c:285
DiracDSPContext
Definition: diracdsp.h:30
DIRAC_REF_MASK_REF2
#define DIRAC_REF_MASK_REF2
Definition: diracdec.c:60
PARSE_VALUES
#define PARSE_VALUES(type, x, gb, ebits, buf1, buf2)
Definition: diracdec.c:714
decode_block_params
static void decode_block_params(DiracContext *s, DiracArith arith[8], DiracBlock *block, int stride, int x, int y)
Definition: diracdec.c:1445
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
DiracContext::globalmc_flag
int globalmc_flag
Definition: diracdec.c:159
CTX_PMODE_REF2
#define CTX_PMODE_REF2
Definition: dirac_arith.h:68
av_always_inline
#define av_always_inline
Definition: attributes.h:43
DiracFrame::avframe
AVFrame * avframe
Definition: diracdec.c:75
uint8_t
uint8_t
Definition: audio_convert.c:194
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
dirac_arith.h
ff_dirac_init_arith_tables
av_cold void ff_dirac_init_arith_tables(void)
Definition: dirac_arith.c:86
AVCodec::name
const char * name
Name of the codec implementation.
Definition: avcodec.h:3488
MAX_BLOCKSIZE
#define MAX_BLOCKSIZE
Definition: diracdec.c:54
mc_subpel
static int mc_subpel(DiracContext *s, DiracBlock *block, const uint8_t *src[5], int x, int y, int ref, int plane)
For block x,y, determine which of the hpel planes to do bilinear interpolation from and set src[] to ...
Definition: diracdec.c:1649
AVCodecContext::height
int height
Definition: avcodec.h:1738
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1775
DiracSlice
Definition: diracdec.c:127
interpolate_refplane
static int interpolate_refplane(DiracContext *s, DiracFrame *ref, int plane, int width, int height)
Definition: diracdec.c:1832
DWTContext
Definition: dirac_dwt.h:54
SliceCoeffs::tot_h
int tot_h
Definition: diracdec.c:819
DiracContext::core_syntax
int core_syntax
Definition: diracdec.c:154
DiracVersionInfo::minor
int minor
Definition: dirac.h:78
avcodec.h
DiracContext::mv_precision
uint8_t mv_precision
Definition: diracdec.c:207
DiracContext::height
unsigned height
Definition: diracdec.c:185
SubBand::coeff_data
const uint8_t * coeff_data
Definition: diracdec.c:103
DiracContext::highquality
struct DiracContext::@77 highquality
AVDiracSeqHeader
Definition: dirac.h:81
mid_pred
#define mid_pred
Definition: mathops.h:97
SliceCoeffs::top
int top
Definition: diracdec.c:818
ret
ret
Definition: filter_design.txt:187
pred
static const float pred[4]
Definition: siprdata.h:259
Plane::xoffset
uint8_t xoffset
Definition: diracdec.c:120
SliceCoeffs
Definition: diracdec.c:816
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
SubBand::pshift
int pshift
Definition: cfhd.h:52
DiracContext::pan_tilt
int pan_tilt[2]
Definition: diracdec.c:199
SubBand::quant
int quant
Definition: cfhd.h:53
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:693
remove_frame
static DiracFrame * remove_frame(DiracFrame *framelist[], int picnum)
Definition: diracdec.c:256
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
mpeg12data.h
DIRAC_PCODE_END_SEQ
@ DIRAC_PCODE_END_SEQ
Definition: dirac.h:59
ff_set_sar
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:119
AVDiracSeqHeader::pix_fmt
enum AVPixelFormat pix_fmt
Definition: dirac.h:106
decode_subband_golomb
static int decode_subband_golomb(AVCodecContext *avctx, void *arg)
Definition: diracdec.c:649
DiracContext
Definition: diracdec.c:134
ff_dirac_qoffset_intra_tab
const int32_t ff_dirac_qoffset_intra_tab[120]
Definition: diractab.c:53
CTX_SB_DATA
#define CTX_SB_DATA
Definition: dirac_arith.h:66
DiracContext::add_obmc
void(* add_obmc)(uint16_t *dst, const uint8_t *src, int stride, const uint8_t *obmc_weight, int yblen)
Definition: diracdec.c:230
AVCodecContext
main external API structure.
Definition: avcodec.h:1565
EDGE_TOP
#define EDGE_TOP
Definition: mpegvideoencdsp.h:29
AVCodecContext::execute
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:2864
subband_hl
@ subband_hl
Definition: diracdec.c:244
pkt
static AVPacket pkt
Definition: demuxing_decoding.c:54
DiracContext::all_frames
DiracFrame all_frames[MAX_FRAMES]
Definition: diracdec.c:239
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:2898
mpegvideoencdsp.h
CALC_PADDING
#define CALC_PADDING(size, depth)
Definition: diracdec.c:69
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
DiracContext::threads_num_buf
int threads_num_buf
Definition: diracdec.c:177
DiracContext::vdsp
VideoDSPContext vdsp
Definition: diracdec.c:137
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:1006
CTX_GLOBAL_BLOCK
#define CTX_GLOBAL_BLOCK
Definition: dirac_arith.h:69
Plane
Definition: cfhd.h:57
DiracFrame::reference
int reference
Definition: diracdec.c:79
divide3
static int divide3(int x)
Definition: diracdec.c:251
VideoDSPContext
Definition: videodsp.h:41
Plane::xblen
uint8_t xblen
Definition: diracdec.c:114
DiracContext::quant
uint8_t quant[MAX_DWT_LEVELS][4]
Definition: diracdec.c:190
DiracContext::plane
Plane plane[3]
Definition: diracdec.c:145
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
propagate_block_data
static void propagate_block_data(DiracBlock *block, int stride, int size)
Copies the current block to the other blocks covered by the current superblock split mode.
Definition: diracdec.c:1485
DiracContext::buffer_stride
int buffer_stride
Definition: diracdec.c:224
DiracContext::slice_params_num_buf
int slice_params_num_buf
Definition: diracdec.c:181
quant
const uint8_t * quant
Definition: vorbis_enc_data.h:458
Plane::ybsep
uint8_t ybsep
Definition: diracdec.c:118
AVDiracSeqHeader::width
unsigned width
Definition: dirac.h:82
CTX_PMODE_REF1
#define CTX_PMODE_REF1
Definition: dirac_arith.h:67
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
AVPacket
This structure stores compressed data.
Definition: avcodec.h:1454
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:1592
global_mv
static void global_mv(DiracContext *s, DiracBlock *block, int x, int y, int ref)
Definition: diracdec.c:1429
decode_subband
static void decode_subband(DiracContext *s, GetBitContext *gb, int quant, int slice_x, int slice_y, int bits_end, SubBand *b1, SubBand *b2)
Definition: diracdec.c:726
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
DiracContext::avg_pixels_tab
void(* avg_pixels_tab[4])(uint8_t *dst, const uint8_t *src[5], int stride, int h)
Definition: diracdec.c:229
videodsp.h
DiracContext::ref_pics
DiracFrame * ref_pics[2]
Definition: diracdec.c:235
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:1738
bytestream.h
DiracContext::sbsplit
uint8_t * sbsplit
Definition: diracdec.c:216
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:565
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
AVDiracSeqHeader::height
unsigned height
Definition: dirac.h:83
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
CTX_DELTA_Q_DATA
@ CTX_DELTA_Q_DATA
Definition: dirac_arith.h:56
dirac_decode_data_unit
static int dirac_decode_data_unit(AVCodecContext *avctx, const uint8_t *buf, int size)
Definition: diracdec.c:2123
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
length
const char int length
Definition: avisynth_c.h:860
dirac_decode_init
static av_cold int dirac_decode_init(AVCodecContext *avctx)
Definition: diracdec.c:386
h
h
Definition: vp9dsp_template.c:2038
ff_diracdsp_init
av_cold void ff_diracdsp_init(DiracDSPContext *c)
Definition: diracdsp.c:219
dirac_unpack_idwt_params
static int dirac_unpack_idwt_params(DiracContext *s)
Dirac Specification -> 11.3 Wavelet transform data.
Definition: diracdec.c:1234
DiracContext::zrs_exp
unsigned zrs_exp
Definition: diracdec.c:202
DiracContext::obmc_weight
uint8_t obmc_weight[3][MAX_BLOCKSIZE *MAX_BLOCKSIZE]
Definition: diracdec.c:226
DWTPlane::width
int width
Definition: dirac_dwt.h:38
dirac_decode_flush
static void dirac_decode_flush(AVCodecContext *avctx)
Definition: diracdec.c:418
int
int
Definition: ffmpeg_filter.c:191
AVFrame::display_picture_number
int display_picture_number
picture number in display order
Definition: frame.h:413
DiracContext::frame_number
int64_t frame_number
Definition: diracdec.c:144
DiracArith::error
int error
Definition: dirac_arith.h:84
AVDiracSeqHeader::color_primaries
enum AVColorPrimaries color_primaries
Definition: dirac.h:108
AVCodecContext::execute2
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
Definition: avcodec.h:2884
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
CTX_DC_DATA
#define CTX_DC_DATA
Definition: dirac_arith.h:73
get_delayed_pic
static int get_delayed_pic(DiracContext *s, AVFrame *picture, int *got_frame)
Definition: diracdec.c:2088
ff_dirac_golomb_read_16bit
int ff_dirac_golomb_read_16bit(DiracGolombLUT *lut_ctx, const uint8_t *buf, int bytes, uint8_t *_dst, int coeffs)
Definition: dirac_vlc.c:82
SubBand::height
int height
Definition: cfhd.h:51
DiracContext::pshift
int pshift
Definition: diracdec.c:150
MAX_FRAMES
#define MAX_FRAMES
Definition: diracdec.c:52
SubBand::orientation
int orientation
Definition: cfhd.h:46
init_obmc_weight
static void init_obmc_weight(Plane *p, uint8_t *obmc_weight, int stride, int left, int right, int top, int bottom)
Definition: diracdec.c:1590
block_mc
static void block_mc(DiracContext *s, DiracBlock *block, uint16_t *mctmp, uint8_t *obmc_weight, int plane, int dstx, int dsty)
Definition: diracdec.c:1760
DWTPlane::height
int height
Definition: dirac_dwt.h:39
AV_WN16
#define AV_WN16(p, v)
Definition: intreadwrite.h:372