FFmpeg
truemotion2.c
Go to the documentation of this file.
1 /*
2  * Duck/ON2 TrueMotion 2 Decoder
3  * Copyright (c) 2005 Konstantin Shishkov
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Duck TrueMotion2 decoder.
25  */
26 
27 #include <inttypes.h>
28 
29 #include "avcodec.h"
30 #include "bswapdsp.h"
31 #include "bytestream.h"
32 #include "codec_internal.h"
33 #include "decode.h"
34 #include "get_bits.h"
35 
36 #define TM2_ESCAPE 0x80000000
37 #define TM2_DELTAS 64
38 
39 /* Huffman-coded streams of different types of blocks */
41  TM2_C_HI = 0,
49 };
50 
51 /* Block types */
52 enum TM2_BLOCKS {
60 };
61 
62 typedef struct TM2Context {
65 
67  int error;
69 
70  uint8_t *buffer;
72 
73  /* TM2 streams */
78  /* for blocks decoding */
79  int D[4];
80  int CD[4];
81  int *last;
82  int *clast;
83 
84  /* data for current and previous frame */
85  int *Y_base, *UV_base;
86  int *Y1, *U1, *V1, *Y2, *U2, *V2;
88  int cur;
89 } TM2Context;
90 
91 /**
92 * Huffman codes for each of streams
93 */
94 typedef struct TM2Codes {
95  VLC vlc; ///< table for FFmpeg bitstream reader
96  int bits;
97  int *recode; ///< table for converting from code indexes to values
98  int length;
99 } TM2Codes;
100 
101 /**
102 * structure for gathering Huffman codes information
103 */
104 typedef struct TM2Huff {
105  int val_bits; ///< length of literal
106  int max_bits; ///< maximum length of code
107  int min_bits; ///< minimum length of code
108  int nodes; ///< total number of nodes in tree
109  int num; ///< current number filled
110  int max_num; ///< total number of codes
111  int *nums; ///< literals
112  uint8_t *lens; ///< codelengths
113 } TM2Huff;
114 
115 /**
116  *
117  * @returns the length of the longest code or an AVERROR code
118  */
119 static int tm2_read_tree(TM2Context *ctx, int length, TM2Huff *huff)
120 {
121  int ret, ret2;
122  if (length > huff->max_bits) {
123  av_log(ctx->avctx, AV_LOG_ERROR, "Tree exceeded its given depth (%i)\n",
124  huff->max_bits);
125  return AVERROR_INVALIDDATA;
126  }
127 
128  if (!get_bits1(&ctx->gb)) { /* literal */
129  if (length == 0) {
130  length = 1;
131  }
132  if (huff->num >= huff->max_num) {
133  av_log(ctx->avctx, AV_LOG_DEBUG, "Too many literals\n");
134  return AVERROR_INVALIDDATA;
135  }
136  huff->nums[huff->num] = get_bits_long(&ctx->gb, huff->val_bits);
137  huff->lens[huff->num] = length;
138  huff->num++;
139  return length;
140  } else { /* non-terminal node */
141  if ((ret2 = tm2_read_tree(ctx, length + 1, huff)) < 0)
142  return ret2;
143  if ((ret = tm2_read_tree(ctx, length + 1, huff)) < 0)
144  return ret;
145  }
146  return FFMAX(ret, ret2);
147 }
148 
150 {
151  TM2Huff huff;
152  int res = 0;
153 
154  huff.val_bits = get_bits(&ctx->gb, 5);
155  huff.max_bits = get_bits(&ctx->gb, 5);
156  huff.min_bits = get_bits(&ctx->gb, 5);
157  huff.nodes = get_bits(&ctx->gb, 17);
158  huff.num = 0;
159 
160  /* check for correct codes parameters */
161  if ((huff.val_bits < 1) || (huff.val_bits > 32) ||
162  (huff.max_bits < 0) || (huff.max_bits > 25)) {
163  av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect tree parameters - literal "
164  "length: %i, max code length: %i\n", huff.val_bits, huff.max_bits);
165  return AVERROR_INVALIDDATA;
166  }
167  if ((huff.nodes <= 0) || (huff.nodes > 0x10000)) {
168  av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect number of Huffman tree "
169  "nodes: %i\n", huff.nodes);
170  return AVERROR_INVALIDDATA;
171  }
172  /* one-node tree */
173  if (huff.max_bits == 0)
174  huff.max_bits = 1;
175 
176  /* allocate space for codes - it is exactly ceil(nodes / 2) entries */
177  huff.max_num = (huff.nodes + 1) >> 1;
178  huff.nums = av_calloc(huff.max_num, sizeof(int));
179  huff.lens = av_mallocz(huff.max_num);
180 
181  if (!huff.nums || !huff.lens) {
182  res = AVERROR(ENOMEM);
183  goto out;
184  }
185 
186  res = tm2_read_tree(ctx, 0, &huff);
187 
188  if (res >= 0 && res != huff.max_bits) {
189  av_log(ctx->avctx, AV_LOG_ERROR, "Got less bits than expected: %i of %i\n",
190  res, huff.max_bits);
191  res = AVERROR_INVALIDDATA;
192  }
193  if (huff.num != huff.max_num) {
194  av_log(ctx->avctx, AV_LOG_ERROR, "Got less codes than expected: %i of %i\n",
195  huff.num, huff.max_num);
196  res = AVERROR_INVALIDDATA;
197  }
198 
199  /* convert codes to vlc_table */
200  if (res >= 0) {
201  res = ff_vlc_init_from_lengths(&code->vlc, huff.max_bits, huff.max_num,
202  huff.lens, sizeof(huff.lens[0]),
203  NULL, 0, 0, 0, 0, ctx->avctx);
204  if (res < 0)
205  av_log(ctx->avctx, AV_LOG_ERROR, "Cannot build VLC table\n");
206  else {
207  code->bits = huff.max_bits;
208  code->length = huff.max_num;
209  code->recode = huff.nums;
210  huff.nums = NULL;
211  }
212  }
213 
214 out:
215  /* free allocated memory */
216  av_free(huff.nums);
217  av_free(huff.lens);
218 
219  return res;
220 }
221 
223 {
224  av_free(code->recode);
225  ff_vlc_free(&code->vlc);
226 }
227 
228 static inline int tm2_get_token(GetBitContext *gb, TM2Codes *code)
229 {
230  int val;
231  val = get_vlc2(gb, code->vlc.table, code->bits, 1);
232  if(val<0)
233  return -1;
234  return code->recode[val];
235 }
236 
237 #define TM2_OLD_HEADER_MAGIC 0x00000100
238 #define TM2_NEW_HEADER_MAGIC 0x00000101
239 
240 static inline int tm2_read_header(TM2Context *ctx, const uint8_t *buf)
241 {
242  uint32_t magic = AV_RL32(buf);
243 
244  switch (magic) {
246  avpriv_request_sample(ctx->avctx, "Old TM2 header");
247  return 0;
249  return 0;
250  default:
251  av_log(ctx->avctx, AV_LOG_ERROR, "Not a TM2 header: 0x%08"PRIX32"\n",
252  magic);
253  return AVERROR_INVALIDDATA;
254  }
255 }
256 
257 static int tm2_read_deltas(TM2Context *ctx, int stream_id)
258 {
259  int d, mb;
260  int i, v;
261 
262  d = get_bits(&ctx->gb, 9);
263  mb = get_bits(&ctx->gb, 5);
264 
265  av_assert2(mb < 32);
266  if ((d < 1) || (d > TM2_DELTAS) || (mb < 1)) {
267  av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect delta table: %i deltas x %i bits\n", d, mb);
268  return AVERROR_INVALIDDATA;
269  }
270 
271  for (i = 0; i < d; i++) {
272  v = get_bits_long(&ctx->gb, mb);
273  if (v & (1 << (mb - 1)))
274  ctx->deltas[stream_id][i] = v - (1U << mb);
275  else
276  ctx->deltas[stream_id][i] = v;
277  }
278  for (; i < TM2_DELTAS; i++)
279  ctx->deltas[stream_id][i] = 0;
280 
281  return 0;
282 }
283 
284 static int tm2_read_stream(TM2Context *ctx, const uint8_t *buf, int stream_id, int buf_size)
285 {
286  int i, ret;
287  int skip = 0;
288  int len, toks, pos;
289  TM2Codes codes;
290  GetByteContext gb;
291 
292  if (buf_size < 4) {
293  av_log(ctx->avctx, AV_LOG_ERROR, "not enough space for len left\n");
294  return AVERROR_INVALIDDATA;
295  }
296 
297  /* get stream length in dwords */
298  bytestream2_init(&gb, buf, buf_size);
299  len = bytestream2_get_be32(&gb);
300 
301  if (len == 0)
302  return 4;
303 
304  if (len >= INT_MAX / 4 - 1 || len < 0 || len * 4 + 4 > buf_size) {
305  av_log(ctx->avctx, AV_LOG_ERROR, "Error, invalid stream size.\n");
306  return AVERROR_INVALIDDATA;
307  }
308  skip = len * 4 + 4;
309 
310  toks = bytestream2_get_be32(&gb);
311  if (toks & 1) {
312  len = bytestream2_get_be32(&gb);
313  if (len == TM2_ESCAPE) {
314  len = bytestream2_get_be32(&gb);
315  }
316  if (len > 0) {
317  pos = bytestream2_tell(&gb);
318  if (skip <= pos)
319  return AVERROR_INVALIDDATA;
320  init_get_bits(&ctx->gb, buf + pos, (skip - pos) * 8);
321  if ((ret = tm2_read_deltas(ctx, stream_id)) < 0)
322  return ret;
323  bytestream2_skip(&gb, ((get_bits_count(&ctx->gb) + 31) >> 5) << 2);
324  }
325  }
326  /* skip unused fields */
327  len = bytestream2_get_be32(&gb);
328  if (len == TM2_ESCAPE) { /* some unknown length - could be escaped too */
329  bytestream2_skip(&gb, 8); /* unused by decoder */
330  } else {
331  bytestream2_skip(&gb, 4); /* unused by decoder */
332  }
333 
334  pos = bytestream2_tell(&gb);
335  if (skip <= pos)
336  return AVERROR_INVALIDDATA;
337  init_get_bits(&ctx->gb, buf + pos, (skip - pos) * 8);
338  if ((ret = tm2_build_huff_table(ctx, &codes)) < 0)
339  return ret;
340  bytestream2_skip(&gb, ((get_bits_count(&ctx->gb) + 31) >> 5) << 2);
341 
342  toks >>= 1;
343  /* check if we have sane number of tokens */
344  if ((toks < 0) || (toks > 0xFFFFFF)) {
345  av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect number of tokens: %i\n", toks);
347  goto end;
348  }
349  ret = av_reallocp_array(&ctx->tokens[stream_id], toks, sizeof(int));
350  if (ret < 0) {
351  ctx->tok_lens[stream_id] = 0;
352  goto end;
353  }
354  ctx->tok_lens[stream_id] = toks;
355  len = bytestream2_get_be32(&gb);
356  if (len > 0) {
357  pos = bytestream2_tell(&gb);
358  if (skip <= pos) {
360  goto end;
361  }
362  init_get_bits(&ctx->gb, buf + pos, (skip - pos) * 8);
363  for (i = 0; i < toks; i++) {
364  if (get_bits_left(&ctx->gb) <= 0) {
365  av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect number of tokens: %i\n", toks);
367  goto end;
368  }
369  ctx->tokens[stream_id][i] = tm2_get_token(&ctx->gb, &codes);
370  if (stream_id <= TM2_MOT && ctx->tokens[stream_id][i] >= TM2_DELTAS || ctx->tokens[stream_id][i]<0) {
371  av_log(ctx->avctx, AV_LOG_ERROR, "Invalid delta token index %d for type %d, n=%d\n",
372  ctx->tokens[stream_id][i], stream_id, i);
374  goto end;
375  }
376  }
377  } else {
378  if (len < 0) {
380  goto end;
381  }
382  for (i = 0; i < toks; i++) {
383  ctx->tokens[stream_id][i] = codes.recode[0];
384  if (stream_id <= TM2_MOT && ctx->tokens[stream_id][i] >= TM2_DELTAS) {
385  av_log(ctx->avctx, AV_LOG_ERROR, "Invalid delta token index %d for type %d, n=%d\n",
386  ctx->tokens[stream_id][i], stream_id, i);
388  goto end;
389  }
390  }
391  }
392 
393  ret = skip;
394 
395 end:
396  tm2_free_codes(&codes);
397  return ret;
398 }
399 
400 static inline int GET_TOK(TM2Context *ctx,int type)
401 {
402  if (ctx->tok_ptrs[type] >= ctx->tok_lens[type]) {
403  av_log(ctx->avctx, AV_LOG_ERROR, "Read token from stream %i out of bounds (%i>=%i)\n", type, ctx->tok_ptrs[type], ctx->tok_lens[type]);
404  ctx->error = 1;
405  return 0;
406  }
407  if (type <= TM2_MOT) {
408  if (ctx->tokens[type][ctx->tok_ptrs[type]] >= TM2_DELTAS) {
409  av_log(ctx->avctx, AV_LOG_ERROR, "token %d is too large\n", ctx->tokens[type][ctx->tok_ptrs[type]]);
410  return 0;
411  }
412  return ctx->deltas[type][ctx->tokens[type][ctx->tok_ptrs[type]++]];
413  }
414  return ctx->tokens[type][ctx->tok_ptrs[type]++];
415 }
416 
417 /* blocks decoding routines */
418 
419 /* common Y, U, V pointers initialisation */
420 #define TM2_INIT_POINTERS() \
421  int *last, *clast; \
422  int *Y, *U, *V;\
423  int Ystride, Ustride, Vstride;\
424 \
425  Ystride = ctx->y_stride;\
426  Vstride = ctx->uv_stride;\
427  Ustride = ctx->uv_stride;\
428  Y = (ctx->cur?ctx->Y2:ctx->Y1) + by * 4 * Ystride + bx * 4;\
429  V = (ctx->cur?ctx->V2:ctx->V1) + by * 2 * Vstride + bx * 2;\
430  U = (ctx->cur?ctx->U2:ctx->U1) + by * 2 * Ustride + bx * 2;\
431  last = ctx->last + bx * 4;\
432  clast = ctx->clast + bx * 4;
433 
434 #define TM2_INIT_POINTERS_2() \
435  unsigned *Yo, *Uo, *Vo;\
436  int oYstride, oUstride, oVstride;\
437 \
438  TM2_INIT_POINTERS();\
439  oYstride = Ystride;\
440  oVstride = Vstride;\
441  oUstride = Ustride;\
442  Yo = (ctx->cur?ctx->Y1:ctx->Y2) + by * 4 * oYstride + bx * 4;\
443  Vo = (ctx->cur?ctx->V1:ctx->V2) + by * 2 * oVstride + bx * 2;\
444  Uo = (ctx->cur?ctx->U1:ctx->U2) + by * 2 * oUstride + bx * 2;
445 
446 /* recalculate last and delta values for next blocks */
447 #define TM2_RECALC_BLOCK(CHR, stride, last, CD) {\
448  CD[0] = (unsigned)CHR[ 1] - (unsigned)last[1];\
449  CD[1] = (unsigned)CHR[stride + 1] - (unsigned) CHR[1];\
450  last[0] = (int)CHR[stride + 0];\
451  last[1] = (int)CHR[stride + 1];}
452 
453 /* common operations - add deltas to 4x4 block of luma or 2x2 blocks of chroma */
454 static inline void tm2_apply_deltas(TM2Context *ctx, int* Y, int stride, int *deltas, int *last)
455 {
456  unsigned ct, d;
457  int i, j;
458 
459  for (j = 0; j < 4; j++){
460  ct = ctx->D[j];
461  for (i = 0; i < 4; i++){
462  d = deltas[i + j * 4];
463  ct += d;
464  last[i] += ct;
465  Y[i] = av_clip_uint8(last[i]);
466  }
467  Y += stride;
468  ctx->D[j] = ct;
469  }
470 }
471 
472 static inline void tm2_high_chroma(int *data, int stride, int *last, unsigned *CD, int *deltas)
473 {
474  int i, j;
475  for (j = 0; j < 2; j++) {
476  for (i = 0; i < 2; i++) {
477  CD[j] += deltas[i + j * 2];
478  last[i] += CD[j];
479  data[i] = last[i];
480  }
481  data += stride;
482  }
483 }
484 
485 static inline void tm2_low_chroma(int *data, int stride, int *clast, unsigned *CD, int *deltas, int bx)
486 {
487  int t;
488  int l;
489  int prev;
490 
491  if (bx > 0)
492  prev = clast[-3];
493  else
494  prev = 0;
495  t = (int)(CD[0] + CD[1]) >> 1;
496  l = (int)(prev - CD[0] - CD[1] + clast[1]) >> 1;
497  CD[1] = CD[0] + CD[1] - t;
498  CD[0] = t;
499  clast[0] = l;
500 
501  tm2_high_chroma(data, stride, clast, CD, deltas);
502 }
503 
504 static inline void tm2_hi_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
505 {
506  int i;
507  int deltas[16];
509 
510  /* hi-res chroma */
511  for (i = 0; i < 4; i++) {
512  deltas[i] = GET_TOK(ctx, TM2_C_HI);
513  deltas[i + 4] = GET_TOK(ctx, TM2_C_HI);
514  }
515  tm2_high_chroma(U, Ustride, clast, ctx->CD, deltas);
516  tm2_high_chroma(V, Vstride, clast + 2, ctx->CD + 2, deltas + 4);
517 
518  /* hi-res luma */
519  for (i = 0; i < 16; i++)
520  deltas[i] = GET_TOK(ctx, TM2_L_HI);
521 
522  tm2_apply_deltas(ctx, Y, Ystride, deltas, last);
523 }
524 
525 static inline void tm2_med_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
526 {
527  int i;
528  int deltas[16];
530 
531  /* low-res chroma */
532  deltas[0] = GET_TOK(ctx, TM2_C_LO);
533  deltas[1] = deltas[2] = deltas[3] = 0;
534  tm2_low_chroma(U, Ustride, clast, ctx->CD, deltas, bx);
535 
536  deltas[0] = GET_TOK(ctx, TM2_C_LO);
537  deltas[1] = deltas[2] = deltas[3] = 0;
538  tm2_low_chroma(V, Vstride, clast + 2, ctx->CD + 2, deltas, bx);
539 
540  /* hi-res luma */
541  for (i = 0; i < 16; i++)
542  deltas[i] = GET_TOK(ctx, TM2_L_HI);
543 
544  tm2_apply_deltas(ctx, Y, Ystride, deltas, last);
545 }
546 
547 static inline void tm2_low_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
548 {
549  int i;
550  int t1, t2;
551  int deltas[16];
553 
554  /* low-res chroma */
555  deltas[0] = GET_TOK(ctx, TM2_C_LO);
556  deltas[1] = deltas[2] = deltas[3] = 0;
557  tm2_low_chroma(U, Ustride, clast, ctx->CD, deltas, bx);
558 
559  deltas[0] = GET_TOK(ctx, TM2_C_LO);
560  deltas[1] = deltas[2] = deltas[3] = 0;
561  tm2_low_chroma(V, Vstride, clast + 2, ctx->CD + 2, deltas, bx);
562 
563  /* low-res luma */
564  for (i = 0; i < 16; i++)
565  deltas[i] = 0;
566 
567  deltas[ 0] = GET_TOK(ctx, TM2_L_LO);
568  deltas[ 2] = GET_TOK(ctx, TM2_L_LO);
569  deltas[ 8] = GET_TOK(ctx, TM2_L_LO);
570  deltas[10] = GET_TOK(ctx, TM2_L_LO);
571 
572  if (bx > 0)
573  last[0] = (int)((unsigned)last[-1] - ctx->D[0] - ctx->D[1] - ctx->D[2] - ctx->D[3] + last[1]) >> 1;
574  else
575  last[0] = (int)((unsigned)last[1] - ctx->D[0] - ctx->D[1] - ctx->D[2] - ctx->D[3])>> 1;
576  last[2] = (int)((unsigned)last[1] + last[3]) >> 1;
577 
578  t1 = ctx->D[0] + (unsigned)ctx->D[1];
579  ctx->D[0] = t1 >> 1;
580  ctx->D[1] = t1 - (t1 >> 1);
581  t2 = ctx->D[2] + (unsigned)ctx->D[3];
582  ctx->D[2] = t2 >> 1;
583  ctx->D[3] = t2 - (t2 >> 1);
584 
585  tm2_apply_deltas(ctx, Y, Ystride, deltas, last);
586 }
587 
588 static inline void tm2_null_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
589 {
590  int i;
591  int ct;
592  unsigned left, right;
593  int diff;
594  int deltas[16];
596 
597  /* null chroma */
598  deltas[0] = deltas[1] = deltas[2] = deltas[3] = 0;
599  tm2_low_chroma(U, Ustride, clast, ctx->CD, deltas, bx);
600 
601  deltas[0] = deltas[1] = deltas[2] = deltas[3] = 0;
602  tm2_low_chroma(V, Vstride, clast + 2, ctx->CD + 2, deltas, bx);
603 
604  /* null luma */
605  for (i = 0; i < 16; i++)
606  deltas[i] = 0;
607 
608  ct = (unsigned)ctx->D[0] + ctx->D[1] + ctx->D[2] + ctx->D[3];
609 
610  if (bx > 0)
611  left = last[-1] - (unsigned)ct;
612  else
613  left = 0;
614 
615  right = last[3];
616  diff = right - left;
617  last[0] = left + (diff >> 2);
618  last[1] = left + (diff >> 1);
619  last[2] = right - (diff >> 2);
620  last[3] = right;
621  {
622  unsigned tp = left;
623 
624  ctx->D[0] = (tp + (ct >> 2)) - left;
625  left += ctx->D[0];
626  ctx->D[1] = (tp + (ct >> 1)) - left;
627  left += ctx->D[1];
628  ctx->D[2] = ((tp + ct) - (ct >> 2)) - left;
629  left += ctx->D[2];
630  ctx->D[3] = (tp + ct) - left;
631  }
632  tm2_apply_deltas(ctx, Y, Ystride, deltas, last);
633 }
634 
635 static inline void tm2_still_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
636 {
637  int i, j;
639 
640  /* update chroma */
641  for (j = 0; j < 2; j++) {
642  for (i = 0; i < 2; i++){
643  U[i] = Uo[i];
644  V[i] = Vo[i];
645  }
646  U += Ustride; V += Vstride;
647  Uo += oUstride; Vo += oVstride;
648  }
649  U -= Ustride * 2;
650  V -= Vstride * 2;
651  TM2_RECALC_BLOCK(U, Ustride, clast, ctx->CD);
652  TM2_RECALC_BLOCK(V, Vstride, (clast + 2), (ctx->CD + 2));
653 
654  /* update deltas */
655  ctx->D[0] = Yo[3] - last[3];
656  ctx->D[1] = Yo[3 + oYstride] - Yo[3];
657  ctx->D[2] = Yo[3 + oYstride * 2] - Yo[3 + oYstride];
658  ctx->D[3] = Yo[3 + oYstride * 3] - Yo[3 + oYstride * 2];
659 
660  for (j = 0; j < 4; j++) {
661  for (i = 0; i < 4; i++) {
662  Y[i] = Yo[i];
663  last[i] = Yo[i];
664  }
665  Y += Ystride;
666  Yo += oYstride;
667  }
668 }
669 
670 static inline void tm2_update_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
671 {
672  int i, j;
673  unsigned d;
675 
676  /* update chroma */
677  for (j = 0; j < 2; j++) {
678  for (i = 0; i < 2; i++) {
679  U[i] = Uo[i] + GET_TOK(ctx, TM2_UPD);
680  V[i] = Vo[i] + GET_TOK(ctx, TM2_UPD);
681  }
682  U += Ustride;
683  V += Vstride;
684  Uo += oUstride;
685  Vo += oVstride;
686  }
687  U -= Ustride * 2;
688  V -= Vstride * 2;
689  TM2_RECALC_BLOCK(U, Ustride, clast, ctx->CD);
690  TM2_RECALC_BLOCK(V, Vstride, (clast + 2), (ctx->CD + 2));
691 
692  /* update deltas */
693  ctx->D[0] = Yo[3] - last[3];
694  ctx->D[1] = Yo[3 + oYstride] - Yo[3];
695  ctx->D[2] = Yo[3 + oYstride * 2] - Yo[3 + oYstride];
696  ctx->D[3] = Yo[3 + oYstride * 3] - Yo[3 + oYstride * 2];
697 
698  for (j = 0; j < 4; j++) {
699  d = last[3];
700  for (i = 0; i < 4; i++) {
701  Y[i] = Yo[i] + (unsigned)GET_TOK(ctx, TM2_UPD);
702  last[i] = Y[i];
703  }
704  ctx->D[j] = last[3] - d;
705  Y += Ystride;
706  Yo += oYstride;
707  }
708 }
709 
710 static inline void tm2_motion_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
711 {
712  int i, j;
713  int mx, my;
715 
716  mx = GET_TOK(ctx, TM2_MOT);
717  my = GET_TOK(ctx, TM2_MOT);
718  mx = av_clip(mx, -(bx * 4 + 4), ctx->avctx->width - bx * 4);
719  my = av_clip(my, -(by * 4 + 4), ctx->avctx->height - by * 4);
720 
721  if (4*bx+mx<0 || 4*by+my<0 || 4*bx+mx+4 > ctx->avctx->width || 4*by+my+4 > ctx->avctx->height) {
722  av_log(ctx->avctx, AV_LOG_ERROR, "MV out of picture\n");
723  return;
724  }
725 
726  Yo += my * oYstride + mx;
727  Uo += (my >> 1) * oUstride + (mx >> 1);
728  Vo += (my >> 1) * oVstride + (mx >> 1);
729 
730  /* copy chroma */
731  for (j = 0; j < 2; j++) {
732  for (i = 0; i < 2; i++) {
733  U[i] = Uo[i];
734  V[i] = Vo[i];
735  }
736  U += Ustride;
737  V += Vstride;
738  Uo += oUstride;
739  Vo += oVstride;
740  }
741  U -= Ustride * 2;
742  V -= Vstride * 2;
743  TM2_RECALC_BLOCK(U, Ustride, clast, ctx->CD);
744  TM2_RECALC_BLOCK(V, Vstride, (clast + 2), (ctx->CD + 2));
745 
746  /* copy luma */
747  for (j = 0; j < 4; j++) {
748  for (i = 0; i < 4; i++) {
749  Y[i] = Yo[i];
750  }
751  Y += Ystride;
752  Yo += oYstride;
753  }
754  /* calculate deltas */
755  Y -= Ystride * 4;
756  ctx->D[0] = (unsigned)Y[3] - last[3];
757  ctx->D[1] = (unsigned)Y[3 + Ystride] - Y[3];
758  ctx->D[2] = (unsigned)Y[3 + Ystride * 2] - Y[3 + Ystride];
759  ctx->D[3] = (unsigned)Y[3 + Ystride * 3] - Y[3 + Ystride * 2];
760  for (i = 0; i < 4; i++)
761  last[i] = Y[i + Ystride * 3];
762 }
763 
765 {
766  int i, j;
767  int w = ctx->avctx->width, h = ctx->avctx->height, bw = w >> 2, bh = h >> 2, cw = w >> 1;
768  int type;
769  int keyframe = 1;
770  int *Y, *U, *V;
771  uint8_t *dst;
772 
773  for (i = 0; i < TM2_NUM_STREAMS; i++)
774  ctx->tok_ptrs[i] = 0;
775 
776  if (ctx->tok_lens[TM2_TYPE]<bw*bh) {
777  av_log(ctx->avctx,AV_LOG_ERROR,"Got %i tokens for %i blocks\n",ctx->tok_lens[TM2_TYPE],bw*bh);
778  return AVERROR_INVALIDDATA;
779  }
780 
781  memset(ctx->last, 0, 4 * bw * sizeof(int));
782  memset(ctx->clast, 0, 4 * bw * sizeof(int));
783 
784  for (j = 0; j < bh; j++) {
785  memset(ctx->D, 0, 4 * sizeof(int));
786  memset(ctx->CD, 0, 4 * sizeof(int));
787  for (i = 0; i < bw; i++) {
788  type = GET_TOK(ctx, TM2_TYPE);
789  switch(type) {
790  case TM2_HI_RES:
791  tm2_hi_res_block(ctx, p, i, j);
792  break;
793  case TM2_MED_RES:
794  tm2_med_res_block(ctx, p, i, j);
795  break;
796  case TM2_LOW_RES:
797  tm2_low_res_block(ctx, p, i, j);
798  break;
799  case TM2_NULL_RES:
800  tm2_null_res_block(ctx, p, i, j);
801  break;
802  case TM2_UPDATE:
803  tm2_update_block(ctx, p, i, j);
804  keyframe = 0;
805  break;
806  case TM2_STILL:
807  tm2_still_block(ctx, p, i, j);
808  keyframe = 0;
809  break;
810  case TM2_MOTION:
811  tm2_motion_block(ctx, p, i, j);
812  keyframe = 0;
813  break;
814  default:
815  av_log(ctx->avctx, AV_LOG_ERROR, "Skipping unknown block type %i\n", type);
816  }
817  if (ctx->error)
818  return AVERROR_INVALIDDATA;
819  }
820  }
821 
822  /* copy data from our buffer to AVFrame */
823  Y = (ctx->cur?ctx->Y2:ctx->Y1);
824  U = (ctx->cur?ctx->U2:ctx->U1);
825  V = (ctx->cur?ctx->V2:ctx->V1);
826  dst = p->data[0];
827  for (j = 0; j < h; j++) {
828  for (i = 0; i < w; i++) {
829  unsigned y = Y[i], u = U[i >> 1], v = V[i >> 1];
830  dst[3*i+0] = av_clip_uint8(y + v);
831  dst[3*i+1] = av_clip_uint8(y);
832  dst[3*i+2] = av_clip_uint8(y + u);
833  }
834 
835  /* horizontal edge extension */
836  Y[-4] = Y[-3] = Y[-2] = Y[-1] = Y[0];
837  Y[w + 3] = Y[w + 2] = Y[w + 1] = Y[w] = Y[w - 1];
838 
839  /* vertical edge extension */
840  if (j == 0) {
841  memcpy(Y - 4 - 1 * ctx->y_stride, Y - 4, ctx->y_stride);
842  memcpy(Y - 4 - 2 * ctx->y_stride, Y - 4, ctx->y_stride);
843  memcpy(Y - 4 - 3 * ctx->y_stride, Y - 4, ctx->y_stride);
844  memcpy(Y - 4 - 4 * ctx->y_stride, Y - 4, ctx->y_stride);
845  } else if (j == h - 1) {
846  memcpy(Y - 4 + 1 * ctx->y_stride, Y - 4, ctx->y_stride);
847  memcpy(Y - 4 + 2 * ctx->y_stride, Y - 4, ctx->y_stride);
848  memcpy(Y - 4 + 3 * ctx->y_stride, Y - 4, ctx->y_stride);
849  memcpy(Y - 4 + 4 * ctx->y_stride, Y - 4, ctx->y_stride);
850  }
851 
852  Y += ctx->y_stride;
853  if (j & 1) {
854  /* horizontal edge extension */
855  U[-2] = U[-1] = U[0];
856  V[-2] = V[-1] = V[0];
857  U[cw + 1] = U[cw] = U[cw - 1];
858  V[cw + 1] = V[cw] = V[cw - 1];
859 
860  /* vertical edge extension */
861  if (j == 1) {
862  memcpy(U - 2 - 1 * ctx->uv_stride, U - 2, ctx->uv_stride);
863  memcpy(V - 2 - 1 * ctx->uv_stride, V - 2, ctx->uv_stride);
864  memcpy(U - 2 - 2 * ctx->uv_stride, U - 2, ctx->uv_stride);
865  memcpy(V - 2 - 2 * ctx->uv_stride, V - 2, ctx->uv_stride);
866  } else if (j == h - 1) {
867  memcpy(U - 2 + 1 * ctx->uv_stride, U - 2, ctx->uv_stride);
868  memcpy(V - 2 + 1 * ctx->uv_stride, V - 2, ctx->uv_stride);
869  memcpy(U - 2 + 2 * ctx->uv_stride, U - 2, ctx->uv_stride);
870  memcpy(V - 2 + 2 * ctx->uv_stride, V - 2, ctx->uv_stride);
871  }
872 
873  U += ctx->uv_stride;
874  V += ctx->uv_stride;
875  }
876  dst += p->linesize[0];
877  }
878 
879  return keyframe;
880 }
881 
882 static const int tm2_stream_order[TM2_NUM_STREAMS] = {
884 };
885 
886 #define TM2_HEADER_SIZE 40
887 
888 static int decode_frame(AVCodecContext *avctx, AVFrame *rframe,
889  int *got_frame, AVPacket *avpkt)
890 {
891  TM2Context * const l = avctx->priv_data;
892  const uint8_t *buf = avpkt->data;
893  int buf_size = avpkt->size & ~3;
894  AVFrame * const p = l->pic;
895  int offset = TM2_HEADER_SIZE;
896  int i, t, ret;
897 
898  l->error = 0;
899 
900  av_fast_padded_malloc(&l->buffer, &l->buffer_size, buf_size);
901  if (!l->buffer) {
902  av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer\n");
903  return AVERROR(ENOMEM);
904  }
905 
906  if ((ret = ff_reget_buffer(avctx, p, 0)) < 0)
907  return ret;
908 
909  l->bdsp.bswap_buf((uint32_t *) l->buffer, (const uint32_t *) buf,
910  buf_size >> 2);
911 
912  if ((ret = tm2_read_header(l, l->buffer)) < 0) {
913  return ret;
914  }
915 
916  for (i = 0; i < TM2_NUM_STREAMS; i++) {
917  if (offset >= buf_size) {
918  av_log(avctx, AV_LOG_ERROR, "no space for tm2_read_stream\n");
919  return AVERROR_INVALIDDATA;
920  }
921 
923  buf_size - offset);
924  if (t < 0) {
925  int j = tm2_stream_order[i];
926  if (l->tok_lens[j])
927  memset(l->tokens[j], 0, sizeof(**l->tokens) * l->tok_lens[j]);
928  return t;
929  }
930  offset += t;
931  }
932  if (tm2_decode_blocks(l, p)) {
933  p->flags |= AV_FRAME_FLAG_KEY;
935  } else {
936  p->flags &= ~AV_FRAME_FLAG_KEY;
938  }
939 
940  l->cur = !l->cur;
941  *got_frame = 1;
942  ret = av_frame_ref(rframe, l->pic);
943 
944  return (ret < 0) ? ret : buf_size;
945 }
946 
948 {
949  TM2Context * const l = avctx->priv_data;
950  int w = avctx->width, h = avctx->height;
951 
952  if ((avctx->width & 3) || (avctx->height & 3)) {
953  av_log(avctx, AV_LOG_ERROR, "Width and height must be multiple of 4\n");
954  return AVERROR(EINVAL);
955  }
956 
957  l->avctx = avctx;
958  avctx->pix_fmt = AV_PIX_FMT_BGR24;
959 
960  l->pic = av_frame_alloc();
961  if (!l->pic)
962  return AVERROR(ENOMEM);
963 
964  ff_bswapdsp_init(&l->bdsp);
965 
966  l->last = av_malloc_array(w, 2 * sizeof(*l->last));
967  if (!l->last)
968  return AVERROR(ENOMEM);
969  l->clast = l->last + w;
970 
971  w += 8;
972  h += 8;
973  l->Y_base = av_calloc(w * h, 2 * sizeof(*l->Y_base));
974  if (!l->Y_base)
975  return AVERROR(ENOMEM);
976  l->y_stride = w;
977  l->Y1 = l->Y_base + l->y_stride * 4 + 4;
978  l->Y2 = l->Y1 + w * h;
979  w = (w + 1) >> 1;
980  h = (h + 1) >> 1;
981  l->UV_base = av_calloc(w * h, 4 * sizeof(*l->UV_base));
982  if (!l->UV_base)
983  return AVERROR(ENOMEM);
984  l->uv_stride = w;
985  l->U1 = l->UV_base + l->uv_stride * 2 + 2;
986  l->U2 = l->U1 + w * h;
987  l->V1 = l->U2 + w * h;
988  l->V2 = l->V1 + w * h;
989 
990  return 0;
991 }
992 
994 {
995  TM2Context * const l = avctx->priv_data;
996  int i;
997 
998  av_freep(&l->last);
999  for (i = 0; i < TM2_NUM_STREAMS; i++)
1000  av_freep(&l->tokens[i]);
1001 
1002  av_freep(&l->Y_base);
1003  av_freep(&l->UV_base);
1004  av_freep(&l->buffer);
1005  l->buffer_size = 0;
1006 
1007  av_frame_free(&l->pic);
1008 
1009  return 0;
1010 }
1011 
1013  .p.name = "truemotion2",
1014  CODEC_LONG_NAME("Duck TrueMotion 2.0"),
1015  .p.type = AVMEDIA_TYPE_VIDEO,
1016  .p.id = AV_CODEC_ID_TRUEMOTION2,
1017  .priv_data_size = sizeof(TM2Context),
1018  .init = decode_init,
1019  .close = decode_end,
1021  .p.capabilities = AV_CODEC_CAP_DR1,
1022  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
1023 };
AV_CODEC_ID_TRUEMOTION2
@ AV_CODEC_ID_TRUEMOTION2
Definition: codec_id.h:129
tm2_low_res_block
static void tm2_low_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
Definition: truemotion2.c:547
bswapdsp.h
TM2Huff::val_bits
int val_bits
length of literal
Definition: truemotion2.c:105
ff_vlc_init_from_lengths
int ff_vlc_init_from_lengths(VLC *vlc, int nb_bits, int nb_codes, const int8_t *lens, int lens_wrap, const void *symbols, int symbols_wrap, int symbols_size, int offset, int flags, void *logctx)
Build VLC decoding tables suitable for use with get_vlc2()
Definition: vlc.c:306
av_clip
#define av_clip
Definition: common.h:96
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:694
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
TM2Codes::length
int length
Definition: truemotion2.c:98
TM2_INIT_POINTERS
#define TM2_INIT_POINTERS()
Definition: truemotion2.c:420
out
FILE * out
Definition: movenc.c:54
tm2_decode_blocks
static int tm2_decode_blocks(TM2Context *ctx, AVFrame *p)
Definition: truemotion2.c:764
GetByteContext
Definition: bytestream.h:33
TM2Huff::min_bits
int min_bits
minimum length of code
Definition: truemotion2.c:107
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:250
TM2_MOT
@ TM2_MOT
Definition: truemotion2.c:46
TM2_LOW_RES
@ TM2_LOW_RES
Definition: truemotion2.c:55
TM2_STILL
@ TM2_STILL
Definition: truemotion2.c:58
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:421
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:266
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:100
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
w
uint8_t w
Definition: llviddspenc.c:38
AVPacket::data
uint8_t * data
Definition: packet.h:491
data
const char data[16]
Definition: mxf.c:148
TM2Huff::nodes
int nodes
total number of nodes in tree
Definition: truemotion2.c:108
tm2_update_block
static void tm2_update_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
Definition: truemotion2.c:670
FFCodec
Definition: codec_internal.h:127
TM2_C_HI
@ TM2_C_HI
Definition: truemotion2.c:41
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
t1
#define t1
Definition: regdef.h:29
TM2Context::uv_stride
int uv_stride
Definition: truemotion2.c:87
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:649
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
TM2Huff::nums
int * nums
literals
Definition: truemotion2.c:111
tm2_get_token
static int tm2_get_token(GetBitContext *gb, TM2Codes *code)
Definition: truemotion2.c:228
TM2Codes::bits
int bits
Definition: truemotion2.c:96
TM2Codes::vlc
VLC vlc
table for FFmpeg bitstream reader
Definition: truemotion2.c:95
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:514
TM2_UPD
@ TM2_UPD
Definition: truemotion2.c:45
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
tm2_med_res_block
static void tm2_med_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
Definition: truemotion2.c:525
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
TM2Codes::recode
int * recode
table for converting from code indexes to values
Definition: truemotion2.c:97
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
GetBitContext
Definition: get_bits.h:108
TM2Huff
structure for gathering Huffman codes information
Definition: truemotion2.c:104
val
static double val(void *priv, double ch)
Definition: aeval.c:78
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
TM2_NUM_STREAMS
@ TM2_NUM_STREAMS
Definition: truemotion2.c:48
TM2_INIT_POINTERS_2
#define TM2_INIT_POINTERS_2()
Definition: truemotion2.c:434
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:88
TM2Context::UV_base
int * UV_base
Definition: truemotion2.c:85
tm2_read_deltas
static int tm2_read_deltas(TM2Context *ctx, int stream_id)
Definition: truemotion2.c:257
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
TM2_OLD_HEADER_MAGIC
#define TM2_OLD_HEADER_MAGIC
Definition: truemotion2.c:237
av_cold
#define av_cold
Definition: attributes.h:90
tm2_build_huff_table
static int tm2_build_huff_table(TM2Context *ctx, TM2Codes *code)
Definition: truemotion2.c:149
TM2Context::buffer_size
int buffer_size
Definition: truemotion2.c:71
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:628
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:306
TM2Context::tok_ptrs
int tok_ptrs[TM2_NUM_STREAMS]
Definition: truemotion2.c:76
TM2Context::gb
GetBitContext gb
Definition: truemotion2.c:66
TM2_DELTAS
#define TM2_DELTAS
Definition: truemotion2.c:37
TM2_NEW_HEADER_MAGIC
#define TM2_NEW_HEADER_MAGIC
Definition: truemotion2.c:238
BswapDSPContext::bswap_buf
void(* bswap_buf)(uint32_t *dst, const uint32_t *src, int w)
Definition: bswapdsp.h:25
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:365
tm2_null_res_block
static void tm2_null_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
Definition: truemotion2.c:588
TM2_C_LO
@ TM2_C_LO
Definition: truemotion2.c:42
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
ctx
AVFormatContext * ctx
Definition: movenc.c:48
TM2Context::CD
int CD[4]
Definition: truemotion2.c:80
decode.h
get_bits.h
TM2Context::y_stride
int y_stride
Definition: truemotion2.c:87
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
if
if(ret)
Definition: filter_design.txt:179
ff_bswapdsp_init
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
NULL
#define NULL
Definition: coverity.c:32
TM2_L_LO
@ TM2_L_LO
Definition: truemotion2.c:44
TM2Context
Definition: truemotion2.c:62
V
#define V
Definition: avdct.c:30
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:388
TM2_MED_RES
@ TM2_MED_RES
Definition: truemotion2.c:54
TM2Context::last
int * last
Definition: truemotion2.c:81
tm2_read_tree
static int tm2_read_tree(TM2Context *ctx, int length, TM2Huff *huff)
Definition: truemotion2.c:119
TM2Context::Y1
int * Y1
Definition: truemotion2.c:86
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:652
tm2_motion_block
static void tm2_motion_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
Definition: truemotion2.c:710
tm2_apply_deltas
static void tm2_apply_deltas(TM2Context *ctx, int *Y, int stride, int *deltas, int *last)
Definition: truemotion2.c:454
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
TM2_MOTION
@ TM2_MOTION
Definition: truemotion2.c:59
GET_TOK
static int GET_TOK(TM2Context *ctx, int type)
Definition: truemotion2.c:400
TM2Huff::max_bits
int max_bits
maximum length of code
Definition: truemotion2.c:106
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:442
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:492
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:361
codec_internal.h
TM2Context::cur
int cur
Definition: truemotion2.c:88
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:164
tm2_stream_order
static const int tm2_stream_order[TM2_NUM_STREAMS]
Definition: truemotion2.c:882
TM2_HEADER_SIZE
#define TM2_HEADER_SIZE
Definition: truemotion2.c:886
TM2Context::deltas
int deltas[TM2_NUM_STREAMS][TM2_DELTAS]
Definition: truemotion2.c:77
av_reallocp_array
int av_reallocp_array(void *ptr, size_t nmemb, size_t size)
Allocate, reallocate an array through a pointer to a pointer.
Definition: mem.c:223
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
TM2_ESCAPE
#define TM2_ESCAPE
Definition: truemotion2.c:36
mb
#define mb
Definition: vf_colormatrix.c:99
Y
#define Y
Definition: boxblur.h:37
TM2Context::error
int error
Definition: truemotion2.c:67
tm2_high_chroma
static void tm2_high_chroma(int *data, int stride, int *last, unsigned *CD, int *deltas)
Definition: truemotion2.c:472
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
TM2_HI_RES
@ TM2_HI_RES
Definition: truemotion2.c:53
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
TM2Context::Y_base
int * Y_base
Definition: truemotion2.c:85
tm2_hi_res_block
static void tm2_hi_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
Definition: truemotion2.c:504
decode_frame
static int decode_frame(AVCodecContext *avctx, AVFrame *rframe, int *got_frame, AVPacket *avpkt)
Definition: truemotion2.c:888
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
ff_truemotion2_decoder
const FFCodec ff_truemotion2_decoder
Definition: truemotion2.c:1012
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:52
TM2_NULL_RES
@ TM2_NULL_RES
Definition: truemotion2.c:56
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
TM2Context::D
int D[4]
Definition: truemotion2.c:79
tm2_still_block
static void tm2_still_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
Definition: truemotion2.c:635
len
int len
Definition: vorbis_enc_data.h:426
AVCodecContext::height
int height
Definition: avcodec.h:621
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:658
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
TM2Context::Y2
int * Y2
Definition: truemotion2.c:86
ff_vlc_free
void ff_vlc_free(VLC *vlc)
Definition: vlc.c:509
TM2Context::avctx
AVCodecContext * avctx
Definition: truemotion2.c:63
ff_reget_buffer
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Identical in function to ff_get_buffer(), except it reuses the existing buffer if available.
Definition: decode.c:1735
ret
ret
Definition: filter_design.txt:187
TM2Context::pic
AVFrame * pic
Definition: truemotion2.c:64
TM2Huff::max_num
int max_num
total number of codes
Definition: truemotion2.c:110
pos
unsigned int pos
Definition: spdifenc.c:413
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
U
#define U(x)
Definition: vpx_arith.h:37
TM2Context::tokens
int * tokens[TM2_NUM_STREAMS]
Definition: truemotion2.c:74
TM2Context::U2
int * U2
Definition: truemotion2.c:86
AVCodecContext
main external API structure.
Definition: avcodec.h:441
tm2_free_codes
static void tm2_free_codes(TM2Codes *code)
Definition: truemotion2.c:222
TM2_UPDATE
@ TM2_UPDATE
Definition: truemotion2.c:57
TM2_L_HI
@ TM2_L_HI
Definition: truemotion2.c:43
t2
#define t2
Definition: regdef.h:30
TM2Context::clast
int * clast
Definition: truemotion2.c:82
VLC
Definition: vlc.h:33
TM2Context::buffer
uint8_t * buffer
Definition: truemotion2.c:70
TM2_RECALC_BLOCK
#define TM2_RECALC_BLOCK(CHR, stride, last, CD)
Definition: truemotion2.c:447
TM2Huff::num
int num
current number filled
Definition: truemotion2.c:109
av_clip_uint8
#define av_clip_uint8
Definition: common.h:102
decode_end
static av_cold int decode_end(AVCodecContext *avctx)
Definition: truemotion2.c:993
TM2Context::tok_lens
int tok_lens[TM2_NUM_STREAMS]
Definition: truemotion2.c:75
tm2_low_chroma
static void tm2_low_chroma(int *data, int stride, int *clast, unsigned *CD, int *deltas, int bx)
Definition: truemotion2.c:485
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
TM2Codes
Huffman codes for each of streams.
Definition: truemotion2.c:94
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
TM2_BLOCKS
TM2_BLOCKS
Definition: truemotion2.c:52
TM2_STREAMS
TM2_STREAMS
Definition: truemotion2.c:40
decode_init
static av_cold int decode_init(AVCodecContext *avctx)
Definition: truemotion2.c:947
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:468
AVPacket
This structure stores compressed data.
Definition: packet.h:468
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
d
d
Definition: ffmpeg_filter.c:368
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:621
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:385
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
TM2Context::bdsp
BswapDSPContext bdsp
Definition: truemotion2.c:68
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
BswapDSPContext
Definition: bswapdsp.h:24
h
h
Definition: vp9dsp_template.c:2038
TM2Context::V1
int * V1
Definition: truemotion2.c:86
int
int
Definition: ffmpeg_filter.c:368
tm2_read_stream
static int tm2_read_stream(TM2Context *ctx, const uint8_t *buf, int stream_id, int buf_size)
Definition: truemotion2.c:284
tm2_read_header
static int tm2_read_header(TM2Context *ctx, const uint8_t *buf)
Definition: truemotion2.c:240
skip
static void BS_FUNC() skip(BSCTX *bc, unsigned int n)
Skip n bits in the buffer.
Definition: bitstream_template.h:375
TM2Context::U1
int * U1
Definition: truemotion2.c:86
TM2Huff::lens
uint8_t * lens
codelengths
Definition: truemotion2.c:112
TM2Context::V2
int * V2
Definition: truemotion2.c:86
TM2_TYPE
@ TM2_TYPE
Definition: truemotion2.c:47