FFmpeg
truemotion2.c
Go to the documentation of this file.
1 /*
2  * Duck/ON2 TrueMotion 2 Decoder
3  * Copyright (c) 2005 Konstantin Shishkov
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Duck TrueMotion2 decoder.
25  */
26 
27 #include <inttypes.h>
28 
29 #include "libavutil/mem.h"
30 #include "avcodec.h"
31 #include "bswapdsp.h"
32 #include "bytestream.h"
33 #include "codec_internal.h"
34 #include "decode.h"
35 #include "get_bits.h"
36 
37 #define TM2_ESCAPE 0x80000000
38 #define TM2_DELTAS 64
39 
40 /* Huffman-coded streams of different types of blocks */
42  TM2_C_HI = 0,
50 };
51 
52 /* Block types */
53 enum TM2_BLOCKS {
61 };
62 
63 typedef struct TM2Context {
66 
68  int error;
70 
71  uint8_t *buffer;
73 
74  /* TM2 streams */
79  /* for blocks decoding */
80  int D[4];
81  int CD[4];
82  int *last;
83  int *clast;
84 
85  /* data for current and previous frame */
86  int *Y_base, *UV_base;
87  int *Y1, *U1, *V1, *Y2, *U2, *V2;
89  int cur;
90 } TM2Context;
91 
92 /**
93 * Huffman codes for each of streams
94 */
95 typedef struct TM2Codes {
96  VLC vlc; ///< table for FFmpeg bitstream reader
97  int bits;
98  int *recode; ///< table for converting from code indexes to values
99  int length;
100 } TM2Codes;
101 
102 /**
103 * structure for gathering Huffman codes information
104 */
105 typedef struct TM2Huff {
106  int val_bits; ///< length of literal
107  int max_bits; ///< maximum length of code
108  int min_bits; ///< minimum length of code
109  int nodes; ///< total number of nodes in tree
110  int num; ///< current number filled
111  int max_num; ///< total number of codes
112  int *nums; ///< literals
113  uint8_t *lens; ///< codelengths
114 } TM2Huff;
115 
116 /**
117  *
118  * @returns the length of the longest code or an AVERROR code
119  */
120 static int tm2_read_tree(TM2Context *ctx, int length, TM2Huff *huff)
121 {
122  int ret, ret2;
123  if (length > huff->max_bits) {
124  av_log(ctx->avctx, AV_LOG_ERROR, "Tree exceeded its given depth (%i)\n",
125  huff->max_bits);
126  return AVERROR_INVALIDDATA;
127  }
128 
129  if (!get_bits1(&ctx->gb)) { /* literal */
130  if (length == 0) {
131  length = 1;
132  }
133  if (huff->num >= huff->max_num) {
134  av_log(ctx->avctx, AV_LOG_DEBUG, "Too many literals\n");
135  return AVERROR_INVALIDDATA;
136  }
137  huff->nums[huff->num] = get_bits_long(&ctx->gb, huff->val_bits);
138  huff->lens[huff->num] = length;
139  huff->num++;
140  return length;
141  } else { /* non-terminal node */
142  if ((ret2 = tm2_read_tree(ctx, length + 1, huff)) < 0)
143  return ret2;
144  if ((ret = tm2_read_tree(ctx, length + 1, huff)) < 0)
145  return ret;
146  }
147  return FFMAX(ret, ret2);
148 }
149 
151 {
152  TM2Huff huff;
153  int res = 0;
154 
155  huff.val_bits = get_bits(&ctx->gb, 5);
156  huff.max_bits = get_bits(&ctx->gb, 5);
157  huff.min_bits = get_bits(&ctx->gb, 5);
158  huff.nodes = get_bits(&ctx->gb, 17);
159  huff.num = 0;
160 
161  /* check for correct codes parameters */
162  if ((huff.val_bits < 1) || (huff.val_bits > 32) ||
163  (huff.max_bits < 0) || (huff.max_bits > 25)) {
164  av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect tree parameters - literal "
165  "length: %i, max code length: %i\n", huff.val_bits, huff.max_bits);
166  return AVERROR_INVALIDDATA;
167  }
168  if ((huff.nodes <= 0) || (huff.nodes > 0x10000)) {
169  av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect number of Huffman tree "
170  "nodes: %i\n", huff.nodes);
171  return AVERROR_INVALIDDATA;
172  }
173  /* one-node tree */
174  if (huff.max_bits == 0)
175  huff.max_bits = 1;
176 
177  /* allocate space for codes - it is exactly ceil(nodes / 2) entries */
178  huff.max_num = (huff.nodes + 1) >> 1;
179  huff.nums = av_calloc(huff.max_num, sizeof(int));
180  huff.lens = av_mallocz(huff.max_num);
181 
182  if (!huff.nums || !huff.lens) {
183  res = AVERROR(ENOMEM);
184  goto out;
185  }
186 
187  res = tm2_read_tree(ctx, 0, &huff);
188 
189  if (res >= 0 && res != huff.max_bits) {
190  av_log(ctx->avctx, AV_LOG_ERROR, "Got less bits than expected: %i of %i\n",
191  res, huff.max_bits);
192  res = AVERROR_INVALIDDATA;
193  }
194  if (huff.num != huff.max_num) {
195  av_log(ctx->avctx, AV_LOG_ERROR, "Got less codes than expected: %i of %i\n",
196  huff.num, huff.max_num);
197  res = AVERROR_INVALIDDATA;
198  }
199 
200  /* convert codes to vlc_table */
201  if (res >= 0) {
202  res = ff_vlc_init_from_lengths(&code->vlc, huff.max_bits, huff.max_num,
203  huff.lens, sizeof(huff.lens[0]),
204  NULL, 0, 0, 0, 0, ctx->avctx);
205  if (res < 0)
206  av_log(ctx->avctx, AV_LOG_ERROR, "Cannot build VLC table\n");
207  else {
208  code->bits = huff.max_bits;
209  code->length = huff.max_num;
210  code->recode = huff.nums;
211  huff.nums = NULL;
212  }
213  }
214 
215 out:
216  /* free allocated memory */
217  av_free(huff.nums);
218  av_free(huff.lens);
219 
220  return res;
221 }
222 
224 {
225  av_free(code->recode);
226  ff_vlc_free(&code->vlc);
227 }
228 
229 static inline int tm2_get_token(GetBitContext *gb, TM2Codes *code)
230 {
231  int val;
232  val = get_vlc2(gb, code->vlc.table, code->bits, 1);
233  if(val<0)
234  return -1;
235  return code->recode[val];
236 }
237 
238 #define TM2_OLD_HEADER_MAGIC 0x00000100
239 #define TM2_NEW_HEADER_MAGIC 0x00000101
240 
241 static inline int tm2_read_header(TM2Context *ctx, const uint8_t *buf)
242 {
243  uint32_t magic = AV_RL32(buf);
244 
245  switch (magic) {
247  avpriv_request_sample(ctx->avctx, "Old TM2 header");
248  return 0;
250  return 0;
251  default:
252  av_log(ctx->avctx, AV_LOG_ERROR, "Not a TM2 header: 0x%08"PRIX32"\n",
253  magic);
254  return AVERROR_INVALIDDATA;
255  }
256 }
257 
258 static int tm2_read_deltas(TM2Context *ctx, int stream_id)
259 {
260  int d, mb;
261  int i, v;
262 
263  d = get_bits(&ctx->gb, 9);
264  mb = get_bits(&ctx->gb, 5);
265 
266  av_assert2(mb < 32);
267  if ((d < 1) || (d > TM2_DELTAS) || (mb < 1)) {
268  av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect delta table: %i deltas x %i bits\n", d, mb);
269  return AVERROR_INVALIDDATA;
270  }
271 
272  for (i = 0; i < d; i++) {
273  v = get_bits_long(&ctx->gb, mb);
274  if (v & (1 << (mb - 1)))
275  ctx->deltas[stream_id][i] = v - (1U << mb);
276  else
277  ctx->deltas[stream_id][i] = v;
278  }
279  for (; i < TM2_DELTAS; i++)
280  ctx->deltas[stream_id][i] = 0;
281 
282  return 0;
283 }
284 
285 static int tm2_read_stream(TM2Context *ctx, const uint8_t *buf, int stream_id, int buf_size)
286 {
287  int i, ret;
288  int skip = 0;
289  int len, toks, pos;
290  TM2Codes codes;
291  GetByteContext gb;
292 
293  if (buf_size < 4) {
294  av_log(ctx->avctx, AV_LOG_ERROR, "not enough space for len left\n");
295  return AVERROR_INVALIDDATA;
296  }
297 
298  /* get stream length in dwords */
299  bytestream2_init(&gb, buf, buf_size);
300  len = bytestream2_get_be32(&gb);
301 
302  if (len == 0)
303  return 4;
304 
305  if (len >= INT_MAX / 4 - 1 || len < 0 || len * 4 + 4 > buf_size) {
306  av_log(ctx->avctx, AV_LOG_ERROR, "Error, invalid stream size.\n");
307  return AVERROR_INVALIDDATA;
308  }
309  skip = len * 4 + 4;
310 
311  toks = bytestream2_get_be32(&gb);
312  if (toks & 1) {
313  len = bytestream2_get_be32(&gb);
314  if (len == TM2_ESCAPE) {
315  len = bytestream2_get_be32(&gb);
316  }
317  if (len > 0) {
318  pos = bytestream2_tell(&gb);
319  if (skip <= pos)
320  return AVERROR_INVALIDDATA;
321  init_get_bits(&ctx->gb, buf + pos, (skip - pos) * 8);
322  if ((ret = tm2_read_deltas(ctx, stream_id)) < 0)
323  return ret;
324  bytestream2_skip(&gb, ((get_bits_count(&ctx->gb) + 31) >> 5) << 2);
325  }
326  }
327  /* skip unused fields */
328  len = bytestream2_get_be32(&gb);
329  if (len == TM2_ESCAPE) { /* some unknown length - could be escaped too */
330  bytestream2_skip(&gb, 8); /* unused by decoder */
331  } else {
332  bytestream2_skip(&gb, 4); /* unused by decoder */
333  }
334 
335  pos = bytestream2_tell(&gb);
336  if (skip <= pos)
337  return AVERROR_INVALIDDATA;
338  init_get_bits(&ctx->gb, buf + pos, (skip - pos) * 8);
339  if ((ret = tm2_build_huff_table(ctx, &codes)) < 0)
340  return ret;
341  bytestream2_skip(&gb, ((get_bits_count(&ctx->gb) + 31) >> 5) << 2);
342 
343  toks >>= 1;
344  /* check if we have sane number of tokens */
345  if ((toks < 0) || (toks > 0xFFFFFF)) {
346  av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect number of tokens: %i\n", toks);
348  goto end;
349  }
350  ret = av_reallocp_array(&ctx->tokens[stream_id], toks, sizeof(int));
351  if (ret < 0) {
352  ctx->tok_lens[stream_id] = 0;
353  goto end;
354  }
355  ctx->tok_lens[stream_id] = toks;
356  len = bytestream2_get_be32(&gb);
357  if (len > 0) {
358  pos = bytestream2_tell(&gb);
359  if (skip <= pos) {
361  goto end;
362  }
363  init_get_bits(&ctx->gb, buf + pos, (skip - pos) * 8);
364  for (i = 0; i < toks; i++) {
365  if (get_bits_left(&ctx->gb) <= 0) {
366  av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect number of tokens: %i\n", toks);
368  goto end;
369  }
370  ctx->tokens[stream_id][i] = tm2_get_token(&ctx->gb, &codes);
371  if (stream_id <= TM2_MOT && ctx->tokens[stream_id][i] >= TM2_DELTAS || ctx->tokens[stream_id][i]<0) {
372  av_log(ctx->avctx, AV_LOG_ERROR, "Invalid delta token index %d for type %d, n=%d\n",
373  ctx->tokens[stream_id][i], stream_id, i);
375  goto end;
376  }
377  }
378  } else {
379  if (len < 0) {
381  goto end;
382  }
383  for (i = 0; i < toks; i++) {
384  ctx->tokens[stream_id][i] = codes.recode[0];
385  if (stream_id <= TM2_MOT && ctx->tokens[stream_id][i] >= TM2_DELTAS) {
386  av_log(ctx->avctx, AV_LOG_ERROR, "Invalid delta token index %d for type %d, n=%d\n",
387  ctx->tokens[stream_id][i], stream_id, i);
389  goto end;
390  }
391  }
392  }
393 
394  ret = skip;
395 
396 end:
397  tm2_free_codes(&codes);
398  return ret;
399 }
400 
401 static inline int GET_TOK(TM2Context *ctx,int type)
402 {
403  if (ctx->tok_ptrs[type] >= ctx->tok_lens[type]) {
404  av_log(ctx->avctx, AV_LOG_ERROR, "Read token from stream %i out of bounds (%i>=%i)\n", type, ctx->tok_ptrs[type], ctx->tok_lens[type]);
405  ctx->error = 1;
406  return 0;
407  }
408  if (type <= TM2_MOT) {
409  if (ctx->tokens[type][ctx->tok_ptrs[type]] >= TM2_DELTAS) {
410  av_log(ctx->avctx, AV_LOG_ERROR, "token %d is too large\n", ctx->tokens[type][ctx->tok_ptrs[type]]);
411  return 0;
412  }
413  return ctx->deltas[type][ctx->tokens[type][ctx->tok_ptrs[type]++]];
414  }
415  return ctx->tokens[type][ctx->tok_ptrs[type]++];
416 }
417 
418 /* blocks decoding routines */
419 
420 /* common Y, U, V pointers initialisation */
421 #define TM2_INIT_POINTERS() \
422  int *last, *clast; \
423  int *Y, *U, *V;\
424  int Ystride, Ustride, Vstride;\
425 \
426  Ystride = ctx->y_stride;\
427  Vstride = ctx->uv_stride;\
428  Ustride = ctx->uv_stride;\
429  Y = (ctx->cur?ctx->Y2:ctx->Y1) + by * 4 * Ystride + bx * 4;\
430  V = (ctx->cur?ctx->V2:ctx->V1) + by * 2 * Vstride + bx * 2;\
431  U = (ctx->cur?ctx->U2:ctx->U1) + by * 2 * Ustride + bx * 2;\
432  last = ctx->last + bx * 4;\
433  clast = ctx->clast + bx * 4;
434 
435 #define TM2_INIT_POINTERS_2() \
436  unsigned *Yo, *Uo, *Vo;\
437  int oYstride, oUstride, oVstride;\
438 \
439  TM2_INIT_POINTERS();\
440  oYstride = Ystride;\
441  oVstride = Vstride;\
442  oUstride = Ustride;\
443  Yo = (ctx->cur?ctx->Y1:ctx->Y2) + by * 4 * oYstride + bx * 4;\
444  Vo = (ctx->cur?ctx->V1:ctx->V2) + by * 2 * oVstride + bx * 2;\
445  Uo = (ctx->cur?ctx->U1:ctx->U2) + by * 2 * oUstride + bx * 2;
446 
447 /* recalculate last and delta values for next blocks */
448 #define TM2_RECALC_BLOCK(CHR, stride, last, CD) {\
449  CD[0] = (unsigned)CHR[ 1] - (unsigned)last[1];\
450  CD[1] = (unsigned)CHR[stride + 1] - (unsigned) CHR[1];\
451  last[0] = (int)CHR[stride + 0];\
452  last[1] = (int)CHR[stride + 1];}
453 
454 /* common operations - add deltas to 4x4 block of luma or 2x2 blocks of chroma */
455 static inline void tm2_apply_deltas(TM2Context *ctx, int* Y, int stride, int *deltas, int *last)
456 {
457  unsigned ct, d;
458  int i, j;
459 
460  for (j = 0; j < 4; j++){
461  ct = ctx->D[j];
462  for (i = 0; i < 4; i++){
463  d = deltas[i + j * 4];
464  ct += d;
465  last[i] += ct;
466  Y[i] = av_clip_uint8(last[i]);
467  }
468  Y += stride;
469  ctx->D[j] = ct;
470  }
471 }
472 
473 static inline void tm2_high_chroma(int *data, int stride, int *last, unsigned *CD, int *deltas)
474 {
475  int i, j;
476  for (j = 0; j < 2; j++) {
477  for (i = 0; i < 2; i++) {
478  CD[j] += deltas[i + j * 2];
479  last[i] += CD[j];
480  data[i] = last[i];
481  }
482  data += stride;
483  }
484 }
485 
486 static inline void tm2_low_chroma(int *data, int stride, int *clast, unsigned *CD, int *deltas, int bx)
487 {
488  int t;
489  int l;
490  int prev;
491 
492  if (bx > 0)
493  prev = clast[-3];
494  else
495  prev = 0;
496  t = (int)(CD[0] + CD[1]) >> 1;
497  l = (int)(prev - CD[0] - CD[1] + clast[1]) >> 1;
498  CD[1] = CD[0] + CD[1] - t;
499  CD[0] = t;
500  clast[0] = l;
501 
502  tm2_high_chroma(data, stride, clast, CD, deltas);
503 }
504 
505 static inline void tm2_hi_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
506 {
507  int i;
508  int deltas[16];
510 
511  /* hi-res chroma */
512  for (i = 0; i < 4; i++) {
513  deltas[i] = GET_TOK(ctx, TM2_C_HI);
514  deltas[i + 4] = GET_TOK(ctx, TM2_C_HI);
515  }
516  tm2_high_chroma(U, Ustride, clast, ctx->CD, deltas);
517  tm2_high_chroma(V, Vstride, clast + 2, ctx->CD + 2, deltas + 4);
518 
519  /* hi-res luma */
520  for (i = 0; i < 16; i++)
521  deltas[i] = GET_TOK(ctx, TM2_L_HI);
522 
523  tm2_apply_deltas(ctx, Y, Ystride, deltas, last);
524 }
525 
526 static inline void tm2_med_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
527 {
528  int i;
529  int deltas[16];
531 
532  /* low-res chroma */
533  deltas[0] = GET_TOK(ctx, TM2_C_LO);
534  deltas[1] = deltas[2] = deltas[3] = 0;
535  tm2_low_chroma(U, Ustride, clast, ctx->CD, deltas, bx);
536 
537  deltas[0] = GET_TOK(ctx, TM2_C_LO);
538  deltas[1] = deltas[2] = deltas[3] = 0;
539  tm2_low_chroma(V, Vstride, clast + 2, ctx->CD + 2, deltas, bx);
540 
541  /* hi-res luma */
542  for (i = 0; i < 16; i++)
543  deltas[i] = GET_TOK(ctx, TM2_L_HI);
544 
545  tm2_apply_deltas(ctx, Y, Ystride, deltas, last);
546 }
547 
548 static inline void tm2_low_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
549 {
550  int i;
551  int t1, t2;
552  int deltas[16];
554 
555  /* low-res chroma */
556  deltas[0] = GET_TOK(ctx, TM2_C_LO);
557  deltas[1] = deltas[2] = deltas[3] = 0;
558  tm2_low_chroma(U, Ustride, clast, ctx->CD, deltas, bx);
559 
560  deltas[0] = GET_TOK(ctx, TM2_C_LO);
561  deltas[1] = deltas[2] = deltas[3] = 0;
562  tm2_low_chroma(V, Vstride, clast + 2, ctx->CD + 2, deltas, bx);
563 
564  /* low-res luma */
565  for (i = 0; i < 16; i++)
566  deltas[i] = 0;
567 
568  deltas[ 0] = GET_TOK(ctx, TM2_L_LO);
569  deltas[ 2] = GET_TOK(ctx, TM2_L_LO);
570  deltas[ 8] = GET_TOK(ctx, TM2_L_LO);
571  deltas[10] = GET_TOK(ctx, TM2_L_LO);
572 
573  if (bx > 0)
574  last[0] = (int)((unsigned)last[-1] - ctx->D[0] - ctx->D[1] - ctx->D[2] - ctx->D[3] + last[1]) >> 1;
575  else
576  last[0] = (int)((unsigned)last[1] - ctx->D[0] - ctx->D[1] - ctx->D[2] - ctx->D[3])>> 1;
577  last[2] = (int)((unsigned)last[1] + last[3]) >> 1;
578 
579  t1 = ctx->D[0] + (unsigned)ctx->D[1];
580  ctx->D[0] = t1 >> 1;
581  ctx->D[1] = t1 - (t1 >> 1);
582  t2 = ctx->D[2] + (unsigned)ctx->D[3];
583  ctx->D[2] = t2 >> 1;
584  ctx->D[3] = t2 - (t2 >> 1);
585 
586  tm2_apply_deltas(ctx, Y, Ystride, deltas, last);
587 }
588 
589 static inline void tm2_null_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
590 {
591  int i;
592  int ct;
593  unsigned left, right;
594  int diff;
595  int deltas[16];
597 
598  /* null chroma */
599  deltas[0] = deltas[1] = deltas[2] = deltas[3] = 0;
600  tm2_low_chroma(U, Ustride, clast, ctx->CD, deltas, bx);
601 
602  deltas[0] = deltas[1] = deltas[2] = deltas[3] = 0;
603  tm2_low_chroma(V, Vstride, clast + 2, ctx->CD + 2, deltas, bx);
604 
605  /* null luma */
606  for (i = 0; i < 16; i++)
607  deltas[i] = 0;
608 
609  ct = (unsigned)ctx->D[0] + ctx->D[1] + ctx->D[2] + ctx->D[3];
610 
611  if (bx > 0)
612  left = last[-1] - (unsigned)ct;
613  else
614  left = 0;
615 
616  right = last[3];
617  diff = right - left;
618  last[0] = left + (diff >> 2);
619  last[1] = left + (diff >> 1);
620  last[2] = right - (diff >> 2);
621  last[3] = right;
622  {
623  unsigned tp = left;
624 
625  ctx->D[0] = (tp + (ct >> 2)) - left;
626  left += ctx->D[0];
627  ctx->D[1] = (tp + (ct >> 1)) - left;
628  left += ctx->D[1];
629  ctx->D[2] = ((tp + ct) - (ct >> 2)) - left;
630  left += ctx->D[2];
631  ctx->D[3] = (tp + ct) - left;
632  }
633  tm2_apply_deltas(ctx, Y, Ystride, deltas, last);
634 }
635 
636 static inline void tm2_still_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
637 {
638  int i, j;
640 
641  /* update chroma */
642  for (j = 0; j < 2; j++) {
643  for (i = 0; i < 2; i++){
644  U[i] = Uo[i];
645  V[i] = Vo[i];
646  }
647  U += Ustride; V += Vstride;
648  Uo += oUstride; Vo += oVstride;
649  }
650  U -= Ustride * 2;
651  V -= Vstride * 2;
652  TM2_RECALC_BLOCK(U, Ustride, clast, ctx->CD);
653  TM2_RECALC_BLOCK(V, Vstride, (clast + 2), (ctx->CD + 2));
654 
655  /* update deltas */
656  ctx->D[0] = Yo[3] - last[3];
657  ctx->D[1] = Yo[3 + oYstride] - Yo[3];
658  ctx->D[2] = Yo[3 + oYstride * 2] - Yo[3 + oYstride];
659  ctx->D[3] = Yo[3 + oYstride * 3] - Yo[3 + oYstride * 2];
660 
661  for (j = 0; j < 4; j++) {
662  for (i = 0; i < 4; i++) {
663  Y[i] = Yo[i];
664  last[i] = Yo[i];
665  }
666  Y += Ystride;
667  Yo += oYstride;
668  }
669 }
670 
671 static inline void tm2_update_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
672 {
673  int i, j;
674  unsigned d;
676 
677  /* update chroma */
678  for (j = 0; j < 2; j++) {
679  for (i = 0; i < 2; i++) {
680  U[i] = Uo[i] + GET_TOK(ctx, TM2_UPD);
681  V[i] = Vo[i] + GET_TOK(ctx, TM2_UPD);
682  }
683  U += Ustride;
684  V += Vstride;
685  Uo += oUstride;
686  Vo += oVstride;
687  }
688  U -= Ustride * 2;
689  V -= Vstride * 2;
690  TM2_RECALC_BLOCK(U, Ustride, clast, ctx->CD);
691  TM2_RECALC_BLOCK(V, Vstride, (clast + 2), (ctx->CD + 2));
692 
693  /* update deltas */
694  ctx->D[0] = Yo[3] - last[3];
695  ctx->D[1] = Yo[3 + oYstride] - Yo[3];
696  ctx->D[2] = Yo[3 + oYstride * 2] - Yo[3 + oYstride];
697  ctx->D[3] = Yo[3 + oYstride * 3] - Yo[3 + oYstride * 2];
698 
699  for (j = 0; j < 4; j++) {
700  d = last[3];
701  for (i = 0; i < 4; i++) {
702  Y[i] = Yo[i] + (unsigned)GET_TOK(ctx, TM2_UPD);
703  last[i] = Y[i];
704  }
705  ctx->D[j] = last[3] - d;
706  Y += Ystride;
707  Yo += oYstride;
708  }
709 }
710 
711 static inline void tm2_motion_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
712 {
713  int i, j;
714  int mx, my;
716 
717  mx = GET_TOK(ctx, TM2_MOT);
718  my = GET_TOK(ctx, TM2_MOT);
719  mx = av_clip(mx, -(bx * 4 + 4), ctx->avctx->width - bx * 4);
720  my = av_clip(my, -(by * 4 + 4), ctx->avctx->height - by * 4);
721 
722  if (4*bx+mx<0 || 4*by+my<0 || 4*bx+mx+4 > ctx->avctx->width || 4*by+my+4 > ctx->avctx->height) {
723  av_log(ctx->avctx, AV_LOG_ERROR, "MV out of picture\n");
724  return;
725  }
726 
727  Yo += my * oYstride + mx;
728  Uo += (my >> 1) * oUstride + (mx >> 1);
729  Vo += (my >> 1) * oVstride + (mx >> 1);
730 
731  /* copy chroma */
732  for (j = 0; j < 2; j++) {
733  for (i = 0; i < 2; i++) {
734  U[i] = Uo[i];
735  V[i] = Vo[i];
736  }
737  U += Ustride;
738  V += Vstride;
739  Uo += oUstride;
740  Vo += oVstride;
741  }
742  U -= Ustride * 2;
743  V -= Vstride * 2;
744  TM2_RECALC_BLOCK(U, Ustride, clast, ctx->CD);
745  TM2_RECALC_BLOCK(V, Vstride, (clast + 2), (ctx->CD + 2));
746 
747  /* copy luma */
748  for (j = 0; j < 4; j++) {
749  for (i = 0; i < 4; i++) {
750  Y[i] = Yo[i];
751  }
752  Y += Ystride;
753  Yo += oYstride;
754  }
755  /* calculate deltas */
756  Y -= Ystride * 4;
757  ctx->D[0] = (unsigned)Y[3] - last[3];
758  ctx->D[1] = (unsigned)Y[3 + Ystride] - Y[3];
759  ctx->D[2] = (unsigned)Y[3 + Ystride * 2] - Y[3 + Ystride];
760  ctx->D[3] = (unsigned)Y[3 + Ystride * 3] - Y[3 + Ystride * 2];
761  for (i = 0; i < 4; i++)
762  last[i] = Y[i + Ystride * 3];
763 }
764 
766 {
767  int i, j;
768  int w = ctx->avctx->width, h = ctx->avctx->height, bw = w >> 2, bh = h >> 2, cw = w >> 1;
769  int type;
770  int keyframe = 1;
771  int *Y, *U, *V;
772  uint8_t *dst;
773 
774  for (i = 0; i < TM2_NUM_STREAMS; i++)
775  ctx->tok_ptrs[i] = 0;
776 
777  if (ctx->tok_lens[TM2_TYPE]<bw*bh) {
778  av_log(ctx->avctx,AV_LOG_ERROR,"Got %i tokens for %i blocks\n",ctx->tok_lens[TM2_TYPE],bw*bh);
779  return AVERROR_INVALIDDATA;
780  }
781 
782  memset(ctx->last, 0, 4 * bw * sizeof(int));
783  memset(ctx->clast, 0, 4 * bw * sizeof(int));
784 
785  for (j = 0; j < bh; j++) {
786  memset(ctx->D, 0, 4 * sizeof(int));
787  memset(ctx->CD, 0, 4 * sizeof(int));
788  for (i = 0; i < bw; i++) {
789  type = GET_TOK(ctx, TM2_TYPE);
790  switch(type) {
791  case TM2_HI_RES:
792  tm2_hi_res_block(ctx, p, i, j);
793  break;
794  case TM2_MED_RES:
795  tm2_med_res_block(ctx, p, i, j);
796  break;
797  case TM2_LOW_RES:
798  tm2_low_res_block(ctx, p, i, j);
799  break;
800  case TM2_NULL_RES:
801  tm2_null_res_block(ctx, p, i, j);
802  break;
803  case TM2_UPDATE:
804  tm2_update_block(ctx, p, i, j);
805  keyframe = 0;
806  break;
807  case TM2_STILL:
808  tm2_still_block(ctx, p, i, j);
809  keyframe = 0;
810  break;
811  case TM2_MOTION:
812  tm2_motion_block(ctx, p, i, j);
813  keyframe = 0;
814  break;
815  default:
816  av_log(ctx->avctx, AV_LOG_ERROR, "Skipping unknown block type %i\n", type);
817  }
818  if (ctx->error)
819  return AVERROR_INVALIDDATA;
820  }
821  }
822 
823  /* copy data from our buffer to AVFrame */
824  Y = (ctx->cur?ctx->Y2:ctx->Y1);
825  U = (ctx->cur?ctx->U2:ctx->U1);
826  V = (ctx->cur?ctx->V2:ctx->V1);
827  dst = p->data[0];
828  for (j = 0; j < h; j++) {
829  for (i = 0; i < w; i++) {
830  unsigned y = Y[i], u = U[i >> 1], v = V[i >> 1];
831  dst[3*i+0] = av_clip_uint8(y + v);
832  dst[3*i+1] = av_clip_uint8(y);
833  dst[3*i+2] = av_clip_uint8(y + u);
834  }
835 
836  /* horizontal edge extension */
837  Y[-4] = Y[-3] = Y[-2] = Y[-1] = Y[0];
838  Y[w + 3] = Y[w + 2] = Y[w + 1] = Y[w] = Y[w - 1];
839 
840  /* vertical edge extension */
841  if (j == 0) {
842  memcpy(Y - 4 - 1 * ctx->y_stride, Y - 4, ctx->y_stride);
843  memcpy(Y - 4 - 2 * ctx->y_stride, Y - 4, ctx->y_stride);
844  memcpy(Y - 4 - 3 * ctx->y_stride, Y - 4, ctx->y_stride);
845  memcpy(Y - 4 - 4 * ctx->y_stride, Y - 4, ctx->y_stride);
846  } else if (j == h - 1) {
847  memcpy(Y - 4 + 1 * ctx->y_stride, Y - 4, ctx->y_stride);
848  memcpy(Y - 4 + 2 * ctx->y_stride, Y - 4, ctx->y_stride);
849  memcpy(Y - 4 + 3 * ctx->y_stride, Y - 4, ctx->y_stride);
850  memcpy(Y - 4 + 4 * ctx->y_stride, Y - 4, ctx->y_stride);
851  }
852 
853  Y += ctx->y_stride;
854  if (j & 1) {
855  /* horizontal edge extension */
856  U[-2] = U[-1] = U[0];
857  V[-2] = V[-1] = V[0];
858  U[cw + 1] = U[cw] = U[cw - 1];
859  V[cw + 1] = V[cw] = V[cw - 1];
860 
861  /* vertical edge extension */
862  if (j == 1) {
863  memcpy(U - 2 - 1 * ctx->uv_stride, U - 2, ctx->uv_stride);
864  memcpy(V - 2 - 1 * ctx->uv_stride, V - 2, ctx->uv_stride);
865  memcpy(U - 2 - 2 * ctx->uv_stride, U - 2, ctx->uv_stride);
866  memcpy(V - 2 - 2 * ctx->uv_stride, V - 2, ctx->uv_stride);
867  } else if (j == h - 1) {
868  memcpy(U - 2 + 1 * ctx->uv_stride, U - 2, ctx->uv_stride);
869  memcpy(V - 2 + 1 * ctx->uv_stride, V - 2, ctx->uv_stride);
870  memcpy(U - 2 + 2 * ctx->uv_stride, U - 2, ctx->uv_stride);
871  memcpy(V - 2 + 2 * ctx->uv_stride, V - 2, ctx->uv_stride);
872  }
873 
874  U += ctx->uv_stride;
875  V += ctx->uv_stride;
876  }
877  dst += p->linesize[0];
878  }
879 
880  return keyframe;
881 }
882 
883 static const int tm2_stream_order[TM2_NUM_STREAMS] = {
885 };
886 
887 #define TM2_HEADER_SIZE 40
888 
889 static int decode_frame(AVCodecContext *avctx, AVFrame *rframe,
890  int *got_frame, AVPacket *avpkt)
891 {
892  TM2Context * const l = avctx->priv_data;
893  const uint8_t *buf = avpkt->data;
894  int buf_size = avpkt->size & ~3;
895  AVFrame * const p = l->pic;
896  int offset = TM2_HEADER_SIZE;
897  int i, t, ret;
898 
899  l->error = 0;
900 
901  av_fast_padded_malloc(&l->buffer, &l->buffer_size, buf_size);
902  if (!l->buffer) {
903  av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer\n");
904  return AVERROR(ENOMEM);
905  }
906 
907  if ((ret = ff_reget_buffer(avctx, p, 0)) < 0)
908  return ret;
909 
910  l->bdsp.bswap_buf((uint32_t *) l->buffer, (const uint32_t *) buf,
911  buf_size >> 2);
912 
913  if ((ret = tm2_read_header(l, l->buffer)) < 0) {
914  return ret;
915  }
916 
917  for (i = 0; i < TM2_NUM_STREAMS; i++) {
918  if (offset >= buf_size) {
919  av_log(avctx, AV_LOG_ERROR, "no space for tm2_read_stream\n");
920  return AVERROR_INVALIDDATA;
921  }
922 
924  buf_size - offset);
925  if (t < 0) {
926  int j = tm2_stream_order[i];
927  if (l->tok_lens[j])
928  memset(l->tokens[j], 0, sizeof(**l->tokens) * l->tok_lens[j]);
929  return t;
930  }
931  offset += t;
932  }
933  if (tm2_decode_blocks(l, p)) {
934  p->flags |= AV_FRAME_FLAG_KEY;
936  } else {
937  p->flags &= ~AV_FRAME_FLAG_KEY;
939  }
940 
941  l->cur = !l->cur;
942  *got_frame = 1;
943  ret = av_frame_ref(rframe, l->pic);
944 
945  return (ret < 0) ? ret : buf_size;
946 }
947 
949 {
950  TM2Context * const l = avctx->priv_data;
951  int w = avctx->width, h = avctx->height;
952 
953  if ((avctx->width & 3) || (avctx->height & 3)) {
954  av_log(avctx, AV_LOG_ERROR, "Width and height must be multiple of 4\n");
955  return AVERROR(EINVAL);
956  }
957 
958  l->avctx = avctx;
959  avctx->pix_fmt = AV_PIX_FMT_BGR24;
960 
961  l->pic = av_frame_alloc();
962  if (!l->pic)
963  return AVERROR(ENOMEM);
964 
965  ff_bswapdsp_init(&l->bdsp);
966 
967  l->last = av_malloc_array(w, 2 * sizeof(*l->last));
968  if (!l->last)
969  return AVERROR(ENOMEM);
970  l->clast = l->last + w;
971 
972  w += 8;
973  h += 8;
974  l->Y_base = av_calloc(w * h, 2 * sizeof(*l->Y_base));
975  if (!l->Y_base)
976  return AVERROR(ENOMEM);
977  l->y_stride = w;
978  l->Y1 = l->Y_base + l->y_stride * 4 + 4;
979  l->Y2 = l->Y1 + w * h;
980  w = (w + 1) >> 1;
981  h = (h + 1) >> 1;
982  l->UV_base = av_calloc(w * h, 4 * sizeof(*l->UV_base));
983  if (!l->UV_base)
984  return AVERROR(ENOMEM);
985  l->uv_stride = w;
986  l->U1 = l->UV_base + l->uv_stride * 2 + 2;
987  l->U2 = l->U1 + w * h;
988  l->V1 = l->U2 + w * h;
989  l->V2 = l->V1 + w * h;
990 
991  return 0;
992 }
993 
995 {
996  TM2Context * const l = avctx->priv_data;
997  int i;
998 
999  av_freep(&l->last);
1000  for (i = 0; i < TM2_NUM_STREAMS; i++)
1001  av_freep(&l->tokens[i]);
1002 
1003  av_freep(&l->Y_base);
1004  av_freep(&l->UV_base);
1005  av_freep(&l->buffer);
1006  l->buffer_size = 0;
1007 
1008  av_frame_free(&l->pic);
1009 
1010  return 0;
1011 }
1012 
1014  .p.name = "truemotion2",
1015  CODEC_LONG_NAME("Duck TrueMotion 2.0"),
1016  .p.type = AVMEDIA_TYPE_VIDEO,
1017  .p.id = AV_CODEC_ID_TRUEMOTION2,
1018  .priv_data_size = sizeof(TM2Context),
1019  .init = decode_init,
1020  .close = decode_end,
1022  .p.capabilities = AV_CODEC_CAP_DR1,
1023  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
1024 };
AV_CODEC_ID_TRUEMOTION2
@ AV_CODEC_ID_TRUEMOTION2
Definition: codec_id.h:129
tm2_low_res_block
static void tm2_low_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
Definition: truemotion2.c:548
bswapdsp.h
TM2Huff::val_bits
int val_bits
length of literal
Definition: truemotion2.c:106
ff_vlc_init_from_lengths
int ff_vlc_init_from_lengths(VLC *vlc, int nb_bits, int nb_codes, const int8_t *lens, int lens_wrap, const void *symbols, int symbols_wrap, int symbols_size, int offset, int flags, void *logctx)
Build VLC decoding tables suitable for use with get_vlc2()
Definition: vlc.c:306
av_clip
#define av_clip
Definition: common.h:99
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:695
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
TM2Codes::length
int length
Definition: truemotion2.c:99
TM2_INIT_POINTERS
#define TM2_INIT_POINTERS()
Definition: truemotion2.c:421
out
FILE * out
Definition: movenc.c:55
tm2_decode_blocks
static int tm2_decode_blocks(TM2Context *ctx, AVFrame *p)
Definition: truemotion2.c:765
GetByteContext
Definition: bytestream.h:33
TM2Huff::min_bits
int min_bits
minimum length of code
Definition: truemotion2.c:108
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:251
TM2_MOT
@ TM2_MOT
Definition: truemotion2.c:47
TM2_LOW_RES
@ TM2_LOW_RES
Definition: truemotion2.c:56
TM2_STILL
@ TM2_STILL
Definition: truemotion2.c:59
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:421
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:266
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
w
uint8_t w
Definition: llviddspenc.c:38
AVPacket::data
uint8_t * data
Definition: packet.h:524
data
const char data[16]
Definition: mxf.c:148
TM2Huff::nodes
int nodes
total number of nodes in tree
Definition: truemotion2.c:109
tm2_update_block
static void tm2_update_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
Definition: truemotion2.c:671
FFCodec
Definition: codec_internal.h:126
TM2_C_HI
@ TM2_C_HI
Definition: truemotion2.c:42
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:76
t1
#define t1
Definition: regdef.h:29
TM2Context::uv_stride
int uv_stride
Definition: truemotion2.c:88
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:646
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
TM2Huff::nums
int * nums
literals
Definition: truemotion2.c:112
tm2_get_token
static int tm2_get_token(GetBitContext *gb, TM2Codes *code)
Definition: truemotion2.c:229
TM2Codes::bits
int bits
Definition: truemotion2.c:97
TM2Codes::vlc
VLC vlc
table for FFmpeg bitstream reader
Definition: truemotion2.c:96
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:514
TM2_UPD
@ TM2_UPD
Definition: truemotion2.c:46
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:395
tm2_med_res_block
static void tm2_med_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
Definition: truemotion2.c:526
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
TM2Codes::recode
int * recode
table for converting from code indexes to values
Definition: truemotion2.c:98
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:130
GetBitContext
Definition: get_bits.h:108
TM2Huff
structure for gathering Huffman codes information
Definition: truemotion2.c:105
val
static double val(void *priv, double ch)
Definition: aeval.c:78
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
TM2_NUM_STREAMS
@ TM2_NUM_STREAMS
Definition: truemotion2.c:49
TM2_INIT_POINTERS_2
#define TM2_INIT_POINTERS_2()
Definition: truemotion2.c:435
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:148
TM2Context::UV_base
int * UV_base
Definition: truemotion2.c:86
tm2_read_deltas
static int tm2_read_deltas(TM2Context *ctx, int stream_id)
Definition: truemotion2.c:258
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
TM2_OLD_HEADER_MAGIC
#define TM2_OLD_HEADER_MAGIC
Definition: truemotion2.c:238
av_cold
#define av_cold
Definition: attributes.h:90
tm2_build_huff_table
static int tm2_build_huff_table(TM2Context *ctx, TM2Codes *code)
Definition: truemotion2.c:150
TM2Context::buffer_size
int buffer_size
Definition: truemotion2.c:72
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:625
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:286
TM2Context::tok_ptrs
int tok_ptrs[TM2_NUM_STREAMS]
Definition: truemotion2.c:77
TM2Context::gb
GetBitContext gb
Definition: truemotion2.c:67
TM2_DELTAS
#define TM2_DELTAS
Definition: truemotion2.c:38
TM2_NEW_HEADER_MAGIC
#define TM2_NEW_HEADER_MAGIC
Definition: truemotion2.c:239
BswapDSPContext::bswap_buf
void(* bswap_buf)(uint32_t *dst, const uint32_t *src, int w)
Definition: bswapdsp.h:25
tm2_null_res_block
static void tm2_null_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
Definition: truemotion2.c:589
TM2_C_LO
@ TM2_C_LO
Definition: truemotion2.c:43
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
ctx
AVFormatContext * ctx
Definition: movenc.c:49
TM2Context::CD
int CD[4]
Definition: truemotion2.c:81
decode.h
get_bits.h
TM2Context::y_stride
int y_stride
Definition: truemotion2.c:88
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:271
if
if(ret)
Definition: filter_design.txt:179
ff_bswapdsp_init
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
NULL
#define NULL
Definition: coverity.c:32
TM2_L_LO
@ TM2_L_LO
Definition: truemotion2.c:45
TM2Context
Definition: truemotion2.c:63
V
#define V
Definition: avdct.c:31
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:388
TM2_MED_RES
@ TM2_MED_RES
Definition: truemotion2.c:55
TM2Context::last
int * last
Definition: truemotion2.c:82
tm2_read_tree
static int tm2_read_tree(TM2Context *ctx, int length, TM2Huff *huff)
Definition: truemotion2.c:120
TM2Context::Y1
int * Y1
Definition: truemotion2.c:87
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:652
tm2_motion_block
static void tm2_motion_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
Definition: truemotion2.c:711
tm2_apply_deltas
static void tm2_apply_deltas(TM2Context *ctx, int *Y, int stride, int *deltas, int *last)
Definition: truemotion2.c:455
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
TM2_MOTION
@ TM2_MOTION
Definition: truemotion2.c:60
GET_TOK
static int GET_TOK(TM2Context *ctx, int type)
Definition: truemotion2.c:401
TM2Huff::max_bits
int max_bits
maximum length of code
Definition: truemotion2.c:107
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:476
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:366
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:525
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:384
codec_internal.h
TM2Context::cur
int cur
Definition: truemotion2.c:89
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:165
tm2_stream_order
static const int tm2_stream_order[TM2_NUM_STREAMS]
Definition: truemotion2.c:883
TM2_HEADER_SIZE
#define TM2_HEADER_SIZE
Definition: truemotion2.c:887
TM2Context::deltas
int deltas[TM2_NUM_STREAMS][TM2_DELTAS]
Definition: truemotion2.c:78
av_reallocp_array
int av_reallocp_array(void *ptr, size_t nmemb, size_t size)
Allocate, reallocate an array through a pointer to a pointer.
Definition: mem.c:225
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
TM2_ESCAPE
#define TM2_ESCAPE
Definition: truemotion2.c:37
mb
#define mb
Definition: vf_colormatrix.c:99
Y
#define Y
Definition: boxblur.h:37
TM2Context::error
int error
Definition: truemotion2.c:68
tm2_high_chroma
static void tm2_high_chroma(int *data, int stride, int *last, unsigned *CD, int *deltas)
Definition: truemotion2.c:473
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
TM2_HI_RES
@ TM2_HI_RES
Definition: truemotion2.c:54
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
TM2Context::Y_base
int * Y_base
Definition: truemotion2.c:86
tm2_hi_res_block
static void tm2_hi_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
Definition: truemotion2.c:505
decode_frame
static int decode_frame(AVCodecContext *avctx, AVFrame *rframe, int *got_frame, AVPacket *avpkt)
Definition: truemotion2.c:889
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
ff_truemotion2_decoder
const FFCodec ff_truemotion2_decoder
Definition: truemotion2.c:1013
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:52
TM2_NULL_RES
@ TM2_NULL_RES
Definition: truemotion2.c:57
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
TM2Context::D
int D[4]
Definition: truemotion2.c:80
tm2_still_block
static void tm2_still_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
Definition: truemotion2.c:636
len
int len
Definition: vorbis_enc_data.h:426
AVCodecContext::height
int height
Definition: avcodec.h:618
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
TM2Context::Y2
int * Y2
Definition: truemotion2.c:87
ff_vlc_free
void ff_vlc_free(VLC *vlc)
Definition: vlc.c:580
TM2Context::avctx
AVCodecContext * avctx
Definition: truemotion2.c:64
ff_reget_buffer
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Identical in function to ff_get_buffer(), except it reuses the existing buffer if available.
Definition: decode.c:1665
ret
ret
Definition: filter_design.txt:187
TM2Context::pic
AVFrame * pic
Definition: truemotion2.c:65
TM2Huff::max_num
int max_num
total number of codes
Definition: truemotion2.c:111
pos
unsigned int pos
Definition: spdifenc.c:414
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
U
#define U(x)
Definition: vpx_arith.h:37
TM2Context::tokens
int * tokens[TM2_NUM_STREAMS]
Definition: truemotion2.c:75
TM2Context::U2
int * U2
Definition: truemotion2.c:87
AVCodecContext
main external API structure.
Definition: avcodec.h:445
tm2_free_codes
static void tm2_free_codes(TM2Codes *code)
Definition: truemotion2.c:223
TM2_UPDATE
@ TM2_UPDATE
Definition: truemotion2.c:58
TM2_L_HI
@ TM2_L_HI
Definition: truemotion2.c:44
t2
#define t2
Definition: regdef.h:30
TM2Context::clast
int * clast
Definition: truemotion2.c:83
VLC
Definition: vlc.h:36
TM2Context::buffer
uint8_t * buffer
Definition: truemotion2.c:71
TM2_RECALC_BLOCK
#define TM2_RECALC_BLOCK(CHR, stride, last, CD)
Definition: truemotion2.c:448
TM2Huff::num
int num
current number filled
Definition: truemotion2.c:110
av_clip_uint8
#define av_clip_uint8
Definition: common.h:105
decode_end
static av_cold int decode_end(AVCodecContext *avctx)
Definition: truemotion2.c:994
TM2Context::tok_lens
int tok_lens[TM2_NUM_STREAMS]
Definition: truemotion2.c:76
tm2_low_chroma
static void tm2_low_chroma(int *data, int stride, int *clast, unsigned *CD, int *deltas, int bx)
Definition: truemotion2.c:486
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
mem.h
TM2Codes
Huffman codes for each of streams.
Definition: truemotion2.c:95
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
TM2_BLOCKS
TM2_BLOCKS
Definition: truemotion2.c:53
TM2_STREAMS
TM2_STREAMS
Definition: truemotion2.c:41
decode_init
static av_cold int decode_init(AVCodecContext *avctx)
Definition: truemotion2.c:948
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
AVPacket
This structure stores compressed data.
Definition: packet.h:501
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
d
d
Definition: ffmpeg_filter.c:424
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:618
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:419
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
TM2Context::bdsp
BswapDSPContext bdsp
Definition: truemotion2.c:69
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
BswapDSPContext
Definition: bswapdsp.h:24
h
h
Definition: vp9dsp_template.c:2038
TM2Context::V1
int * V1
Definition: truemotion2.c:87
int
int
Definition: ffmpeg_filter.c:424
tm2_read_stream
static int tm2_read_stream(TM2Context *ctx, const uint8_t *buf, int stream_id, int buf_size)
Definition: truemotion2.c:285
tm2_read_header
static int tm2_read_header(TM2Context *ctx, const uint8_t *buf)
Definition: truemotion2.c:241
skip
static void BS_FUNC() skip(BSCTX *bc, unsigned int n)
Skip n bits in the buffer.
Definition: bitstream_template.h:375
TM2Context::U1
int * U1
Definition: truemotion2.c:87
TM2Huff::lens
uint8_t * lens
codelengths
Definition: truemotion2.c:113
TM2Context::V2
int * V2
Definition: truemotion2.c:87
TM2_TYPE
@ TM2_TYPE
Definition: truemotion2.c:48