FFmpeg
truemotion2.c
Go to the documentation of this file.
1 /*
2  * Duck/ON2 TrueMotion 2 Decoder
3  * Copyright (c) 2005 Konstantin Shishkov
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Duck TrueMotion2 decoder.
25  */
26 
27 #include <inttypes.h>
28 
29 #include "avcodec.h"
30 #include "bswapdsp.h"
31 #include "bytestream.h"
32 #include "get_bits.h"
33 #include "internal.h"
34 
35 #define TM2_ESCAPE 0x80000000
36 #define TM2_DELTAS 64
37 
38 /* Huffman-coded streams of different types of blocks */
40  TM2_C_HI = 0,
48 };
49 
50 /* Block types */
51 enum TM2_BLOCKS {
59 };
60 
61 typedef struct TM2Context {
64 
66  int error;
68 
71 
72  /* TM2 streams */
77  /* for blocks decoding */
78  int D[4];
79  int CD[4];
80  int *last;
81  int *clast;
82 
83  /* data for current and previous frame */
85  int *Y1, *U1, *V1, *Y2, *U2, *V2;
87  int cur;
88 } TM2Context;
89 
90 /**
91 * Huffman codes for each of streams
92 */
93 typedef struct TM2Codes {
94  VLC vlc; ///< table for FFmpeg bitstream reader
95  int bits;
96  int *recode; ///< table for converting from code indexes to values
97  int length;
98 } TM2Codes;
99 
100 /**
101 * structure for gathering Huffman codes information
102 */
103 typedef struct TM2Huff {
104  int val_bits; ///< length of literal
105  int max_bits; ///< maximum length of code
106  int min_bits; ///< minimum length of code
107  int nodes; ///< total number of nodes in tree
108  int num; ///< current number filled
109  int max_num; ///< total number of codes
110  int *nums; ///< literals
111  uint32_t *bits; ///< codes
112  int *lens; ///< codelengths
113 } TM2Huff;
114 
115 /**
116  *
117  * @returns the length of the longest code or an AVERROR code
118  */
119 static int tm2_read_tree(TM2Context *ctx, uint32_t prefix, int length, TM2Huff *huff)
120 {
121  int ret, ret2;
122  if (length > huff->max_bits) {
123  av_log(ctx->avctx, AV_LOG_ERROR, "Tree exceeded its given depth (%i)\n",
124  huff->max_bits);
125  return AVERROR_INVALIDDATA;
126  }
127 
128  if (!get_bits1(&ctx->gb)) { /* literal */
129  if (length == 0) {
130  length = 1;
131  }
132  if (huff->num >= huff->max_num) {
133  av_log(ctx->avctx, AV_LOG_DEBUG, "Too many literals\n");
134  return AVERROR_INVALIDDATA;
135  }
136  huff->nums[huff->num] = get_bits_long(&ctx->gb, huff->val_bits);
137  huff->bits[huff->num] = prefix;
138  huff->lens[huff->num] = length;
139  huff->num++;
140  return length;
141  } else { /* non-terminal node */
142  if ((ret2 = tm2_read_tree(ctx, prefix << 1, length + 1, huff)) < 0)
143  return ret2;
144  if ((ret = tm2_read_tree(ctx, (prefix << 1) | 1, length + 1, huff)) < 0)
145  return ret;
146  }
147  return FFMAX(ret, ret2);
148 }
149 
151 {
152  TM2Huff huff;
153  int res = 0;
154 
155  huff.val_bits = get_bits(&ctx->gb, 5);
156  huff.max_bits = get_bits(&ctx->gb, 5);
157  huff.min_bits = get_bits(&ctx->gb, 5);
158  huff.nodes = get_bits_long(&ctx->gb, 17);
159  huff.num = 0;
160 
161  /* check for correct codes parameters */
162  if ((huff.val_bits < 1) || (huff.val_bits > 32) ||
163  (huff.max_bits < 0) || (huff.max_bits > 25)) {
164  av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect tree parameters - literal "
165  "length: %i, max code length: %i\n", huff.val_bits, huff.max_bits);
166  return AVERROR_INVALIDDATA;
167  }
168  if ((huff.nodes <= 0) || (huff.nodes > 0x10000)) {
169  av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect number of Huffman tree "
170  "nodes: %i\n", huff.nodes);
171  return AVERROR_INVALIDDATA;
172  }
173  /* one-node tree */
174  if (huff.max_bits == 0)
175  huff.max_bits = 1;
176 
177  /* allocate space for codes - it is exactly ceil(nodes / 2) entries */
178  huff.max_num = (huff.nodes + 1) >> 1;
179  huff.nums = av_calloc(huff.max_num, sizeof(int));
180  huff.bits = av_calloc(huff.max_num, sizeof(uint32_t));
181  huff.lens = av_calloc(huff.max_num, sizeof(int));
182 
183  if (!huff.nums || !huff.bits || !huff.lens) {
184  res = AVERROR(ENOMEM);
185  goto out;
186  }
187 
188  res = tm2_read_tree(ctx, 0, 0, &huff);
189 
190  if (res >= 0 && res != huff.max_bits) {
191  av_log(ctx->avctx, AV_LOG_ERROR, "Got less bits than expected: %i of %i\n",
192  res, huff.max_bits);
193  res = AVERROR_INVALIDDATA;
194  }
195  if (huff.num != huff.max_num) {
196  av_log(ctx->avctx, AV_LOG_ERROR, "Got less codes than expected: %i of %i\n",
197  huff.num, huff.max_num);
198  res = AVERROR_INVALIDDATA;
199  }
200 
201  /* convert codes to vlc_table */
202  if (res >= 0) {
203  int i;
204 
205  res = init_vlc(&code->vlc, huff.max_bits, huff.max_num,
206  huff.lens, sizeof(int), sizeof(int),
207  huff.bits, sizeof(uint32_t), sizeof(uint32_t), 0);
208  if (res < 0)
209  av_log(ctx->avctx, AV_LOG_ERROR, "Cannot build VLC table\n");
210  else {
211  code->bits = huff.max_bits;
212  code->length = huff.max_num;
213  code->recode = av_malloc_array(code->length, sizeof(int));
214  if (!code->recode) {
215  res = AVERROR(ENOMEM);
216  goto out;
217  }
218  for (i = 0; i < code->length; i++)
219  code->recode[i] = huff.nums[i];
220  }
221  }
222 
223 out:
224  /* free allocated memory */
225  av_free(huff.nums);
226  av_free(huff.bits);
227  av_free(huff.lens);
228 
229  return res;
230 }
231 
233 {
234  av_free(code->recode);
235  if (code->vlc.table)
236  ff_free_vlc(&code->vlc);
237 }
238 
240 {
241  int val;
242  val = get_vlc2(gb, code->vlc.table, code->bits, 1);
243  if(val<0)
244  return -1;
245  return code->recode[val];
246 }
247 
248 #define TM2_OLD_HEADER_MAGIC 0x00000100
249 #define TM2_NEW_HEADER_MAGIC 0x00000101
250 
251 static inline int tm2_read_header(TM2Context *ctx, const uint8_t *buf)
252 {
253  uint32_t magic = AV_RL32(buf);
254 
255  switch (magic) {
257  avpriv_request_sample(ctx->avctx, "Old TM2 header");
258  return 0;
260  return 0;
261  default:
262  av_log(ctx->avctx, AV_LOG_ERROR, "Not a TM2 header: 0x%08"PRIX32"\n",
263  magic);
264  return AVERROR_INVALIDDATA;
265  }
266 }
267 
268 static int tm2_read_deltas(TM2Context *ctx, int stream_id)
269 {
270  int d, mb;
271  int i, v;
272 
273  d = get_bits(&ctx->gb, 9);
274  mb = get_bits(&ctx->gb, 5);
275 
276  av_assert2(mb < 32);
277  if ((d < 1) || (d > TM2_DELTAS) || (mb < 1)) {
278  av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect delta table: %i deltas x %i bits\n", d, mb);
279  return AVERROR_INVALIDDATA;
280  }
281 
282  for (i = 0; i < d; i++) {
283  v = get_bits_long(&ctx->gb, mb);
284  if (v & (1 << (mb - 1)))
285  ctx->deltas[stream_id][i] = v - (1U << mb);
286  else
287  ctx->deltas[stream_id][i] = v;
288  }
289  for (; i < TM2_DELTAS; i++)
290  ctx->deltas[stream_id][i] = 0;
291 
292  return 0;
293 }
294 
295 static int tm2_read_stream(TM2Context *ctx, const uint8_t *buf, int stream_id, int buf_size)
296 {
297  int i, ret;
298  int skip = 0;
299  int len, toks, pos;
300  TM2Codes codes;
302 
303  if (buf_size < 4) {
304  av_log(ctx->avctx, AV_LOG_ERROR, "not enough space for len left\n");
305  return AVERROR_INVALIDDATA;
306  }
307 
308  /* get stream length in dwords */
309  bytestream2_init(&gb, buf, buf_size);
310  len = bytestream2_get_be32(&gb);
311 
312  if (len == 0)
313  return 4;
314 
315  if (len >= INT_MAX / 4 - 1 || len < 0 || len * 4 + 4 > buf_size) {
316  av_log(ctx->avctx, AV_LOG_ERROR, "Error, invalid stream size.\n");
317  return AVERROR_INVALIDDATA;
318  }
319  skip = len * 4 + 4;
320 
321  toks = bytestream2_get_be32(&gb);
322  if (toks & 1) {
323  len = bytestream2_get_be32(&gb);
324  if (len == TM2_ESCAPE) {
325  len = bytestream2_get_be32(&gb);
326  }
327  if (len > 0) {
328  pos = bytestream2_tell(&gb);
329  if (skip <= pos)
330  return AVERROR_INVALIDDATA;
331  init_get_bits(&ctx->gb, buf + pos, (skip - pos) * 8);
332  if ((ret = tm2_read_deltas(ctx, stream_id)) < 0)
333  return ret;
334  bytestream2_skip(&gb, ((get_bits_count(&ctx->gb) + 31) >> 5) << 2);
335  }
336  }
337  /* skip unused fields */
338  len = bytestream2_get_be32(&gb);
339  if (len == TM2_ESCAPE) { /* some unknown length - could be escaped too */
340  bytestream2_skip(&gb, 8); /* unused by decoder */
341  } else {
342  bytestream2_skip(&gb, 4); /* unused by decoder */
343  }
344 
345  pos = bytestream2_tell(&gb);
346  if (skip <= pos)
347  return AVERROR_INVALIDDATA;
348  init_get_bits(&ctx->gb, buf + pos, (skip - pos) * 8);
349  if ((ret = tm2_build_huff_table(ctx, &codes)) < 0)
350  return ret;
351  bytestream2_skip(&gb, ((get_bits_count(&ctx->gb) + 31) >> 5) << 2);
352 
353  toks >>= 1;
354  /* check if we have sane number of tokens */
355  if ((toks < 0) || (toks > 0xFFFFFF)) {
356  av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect number of tokens: %i\n", toks);
357  ret = AVERROR_INVALIDDATA;
358  goto end;
359  }
360  ret = av_reallocp_array(&ctx->tokens[stream_id], toks, sizeof(int));
361  if (ret < 0) {
362  ctx->tok_lens[stream_id] = 0;
363  goto end;
364  }
365  ctx->tok_lens[stream_id] = toks;
366  len = bytestream2_get_be32(&gb);
367  if (len > 0) {
368  pos = bytestream2_tell(&gb);
369  if (skip <= pos) {
370  ret = AVERROR_INVALIDDATA;
371  goto end;
372  }
373  init_get_bits(&ctx->gb, buf + pos, (skip - pos) * 8);
374  for (i = 0; i < toks; i++) {
375  if (get_bits_left(&ctx->gb) <= 0) {
376  av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect number of tokens: %i\n", toks);
377  ret = AVERROR_INVALIDDATA;
378  goto end;
379  }
380  ctx->tokens[stream_id][i] = tm2_get_token(&ctx->gb, &codes);
381  if (stream_id <= TM2_MOT && ctx->tokens[stream_id][i] >= TM2_DELTAS || ctx->tokens[stream_id][i]<0) {
382  av_log(ctx->avctx, AV_LOG_ERROR, "Invalid delta token index %d for type %d, n=%d\n",
383  ctx->tokens[stream_id][i], stream_id, i);
384  ret = AVERROR_INVALIDDATA;
385  goto end;
386  }
387  }
388  } else {
389  if (len < 0) {
390  ret = AVERROR_INVALIDDATA;
391  goto end;
392  }
393  for (i = 0; i < toks; i++) {
394  ctx->tokens[stream_id][i] = codes.recode[0];
395  if (stream_id <= TM2_MOT && ctx->tokens[stream_id][i] >= TM2_DELTAS) {
396  av_log(ctx->avctx, AV_LOG_ERROR, "Invalid delta token index %d for type %d, n=%d\n",
397  ctx->tokens[stream_id][i], stream_id, i);
398  ret = AVERROR_INVALIDDATA;
399  goto end;
400  }
401  }
402  }
403 
404  ret = skip;
405 
406 end:
407  tm2_free_codes(&codes);
408  return ret;
409 }
410 
411 static inline int GET_TOK(TM2Context *ctx,int type)
412 {
413  if (ctx->tok_ptrs[type] >= ctx->tok_lens[type]) {
414  av_log(ctx->avctx, AV_LOG_ERROR, "Read token from stream %i out of bounds (%i>=%i)\n", type, ctx->tok_ptrs[type], ctx->tok_lens[type]);
415  ctx->error = 1;
416  return 0;
417  }
418  if (type <= TM2_MOT) {
419  if (ctx->tokens[type][ctx->tok_ptrs[type]] >= TM2_DELTAS) {
420  av_log(ctx->avctx, AV_LOG_ERROR, "token %d is too large\n", ctx->tokens[type][ctx->tok_ptrs[type]]);
421  return 0;
422  }
423  return ctx->deltas[type][ctx->tokens[type][ctx->tok_ptrs[type]++]];
424  }
425  return ctx->tokens[type][ctx->tok_ptrs[type]++];
426 }
427 
428 /* blocks decoding routines */
429 
430 /* common Y, U, V pointers initialisation */
431 #define TM2_INIT_POINTERS() \
432  int *last, *clast; \
433  int *Y, *U, *V;\
434  int Ystride, Ustride, Vstride;\
435 \
436  Ystride = ctx->y_stride;\
437  Vstride = ctx->uv_stride;\
438  Ustride = ctx->uv_stride;\
439  Y = (ctx->cur?ctx->Y2:ctx->Y1) + by * 4 * Ystride + bx * 4;\
440  V = (ctx->cur?ctx->V2:ctx->V1) + by * 2 * Vstride + bx * 2;\
441  U = (ctx->cur?ctx->U2:ctx->U1) + by * 2 * Ustride + bx * 2;\
442  last = ctx->last + bx * 4;\
443  clast = ctx->clast + bx * 4;
444 
445 #define TM2_INIT_POINTERS_2() \
446  int *Yo, *Uo, *Vo;\
447  int oYstride, oUstride, oVstride;\
448 \
449  TM2_INIT_POINTERS();\
450  oYstride = Ystride;\
451  oVstride = Vstride;\
452  oUstride = Ustride;\
453  Yo = (ctx->cur?ctx->Y1:ctx->Y2) + by * 4 * oYstride + bx * 4;\
454  Vo = (ctx->cur?ctx->V1:ctx->V2) + by * 2 * oVstride + bx * 2;\
455  Uo = (ctx->cur?ctx->U1:ctx->U2) + by * 2 * oUstride + bx * 2;
456 
457 /* recalculate last and delta values for next blocks */
458 #define TM2_RECALC_BLOCK(CHR, stride, last, CD) {\
459  CD[0] = (unsigned)CHR[ 1] - (unsigned)last[1];\
460  CD[1] = (unsigned)CHR[stride + 1] - (unsigned) CHR[1];\
461  last[0] = (int)CHR[stride + 0];\
462  last[1] = (int)CHR[stride + 1];}
463 
464 /* common operations - add deltas to 4x4 block of luma or 2x2 blocks of chroma */
465 static inline void tm2_apply_deltas(TM2Context *ctx, int* Y, int stride, int *deltas, int *last)
466 {
467  unsigned ct, d;
468  int i, j;
469 
470  for (j = 0; j < 4; j++){
471  ct = ctx->D[j];
472  for (i = 0; i < 4; i++){
473  d = deltas[i + j * 4];
474  ct += d;
475  last[i] += ct;
476  Y[i] = av_clip_uint8(last[i]);
477  }
478  Y += stride;
479  ctx->D[j] = ct;
480  }
481 }
482 
483 static inline void tm2_high_chroma(int *data, int stride, int *last, unsigned *CD, int *deltas)
484 {
485  int i, j;
486  for (j = 0; j < 2; j++) {
487  for (i = 0; i < 2; i++) {
488  CD[j] += deltas[i + j * 2];
489  last[i] += CD[j];
490  data[i] = last[i];
491  }
492  data += stride;
493  }
494 }
495 
496 static inline void tm2_low_chroma(int *data, int stride, int *clast, unsigned *CD, int *deltas, int bx)
497 {
498  int t;
499  int l;
500  int prev;
501 
502  if (bx > 0)
503  prev = clast[-3];
504  else
505  prev = 0;
506  t = (int)(CD[0] + CD[1]) >> 1;
507  l = (int)(prev - CD[0] - CD[1] + clast[1]) >> 1;
508  CD[1] = CD[0] + CD[1] - t;
509  CD[0] = t;
510  clast[0] = l;
511 
512  tm2_high_chroma(data, stride, clast, CD, deltas);
513 }
514 
515 static inline void tm2_hi_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
516 {
517  int i;
518  int deltas[16];
520 
521  /* hi-res chroma */
522  for (i = 0; i < 4; i++) {
523  deltas[i] = GET_TOK(ctx, TM2_C_HI);
524  deltas[i + 4] = GET_TOK(ctx, TM2_C_HI);
525  }
526  tm2_high_chroma(U, Ustride, clast, ctx->CD, deltas);
527  tm2_high_chroma(V, Vstride, clast + 2, ctx->CD + 2, deltas + 4);
528 
529  /* hi-res luma */
530  for (i = 0; i < 16; i++)
531  deltas[i] = GET_TOK(ctx, TM2_L_HI);
532 
533  tm2_apply_deltas(ctx, Y, Ystride, deltas, last);
534 }
535 
536 static inline void tm2_med_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
537 {
538  int i;
539  int deltas[16];
541 
542  /* low-res chroma */
543  deltas[0] = GET_TOK(ctx, TM2_C_LO);
544  deltas[1] = deltas[2] = deltas[3] = 0;
545  tm2_low_chroma(U, Ustride, clast, ctx->CD, deltas, bx);
546 
547  deltas[0] = GET_TOK(ctx, TM2_C_LO);
548  deltas[1] = deltas[2] = deltas[3] = 0;
549  tm2_low_chroma(V, Vstride, clast + 2, ctx->CD + 2, deltas, bx);
550 
551  /* hi-res luma */
552  for (i = 0; i < 16; i++)
553  deltas[i] = GET_TOK(ctx, TM2_L_HI);
554 
555  tm2_apply_deltas(ctx, Y, Ystride, deltas, last);
556 }
557 
558 static inline void tm2_low_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
559 {
560  int i;
561  int t1, t2;
562  int deltas[16];
564 
565  /* low-res chroma */
566  deltas[0] = GET_TOK(ctx, TM2_C_LO);
567  deltas[1] = deltas[2] = deltas[3] = 0;
568  tm2_low_chroma(U, Ustride, clast, ctx->CD, deltas, bx);
569 
570  deltas[0] = GET_TOK(ctx, TM2_C_LO);
571  deltas[1] = deltas[2] = deltas[3] = 0;
572  tm2_low_chroma(V, Vstride, clast + 2, ctx->CD + 2, deltas, bx);
573 
574  /* low-res luma */
575  for (i = 0; i < 16; i++)
576  deltas[i] = 0;
577 
578  deltas[ 0] = GET_TOK(ctx, TM2_L_LO);
579  deltas[ 2] = GET_TOK(ctx, TM2_L_LO);
580  deltas[ 8] = GET_TOK(ctx, TM2_L_LO);
581  deltas[10] = GET_TOK(ctx, TM2_L_LO);
582 
583  if (bx > 0)
584  last[0] = (last[-1] - ctx->D[0] - ctx->D[1] - ctx->D[2] - ctx->D[3] + last[1]) >> 1;
585  else
586  last[0] = (last[1] - ctx->D[0] - ctx->D[1] - ctx->D[2] - ctx->D[3])>> 1;
587  last[2] = (last[1] + last[3]) >> 1;
588 
589  t1 = ctx->D[0] + ctx->D[1];
590  ctx->D[0] = t1 >> 1;
591  ctx->D[1] = t1 - (t1 >> 1);
592  t2 = ctx->D[2] + ctx->D[3];
593  ctx->D[2] = t2 >> 1;
594  ctx->D[3] = t2 - (t2 >> 1);
595 
596  tm2_apply_deltas(ctx, Y, Ystride, deltas, last);
597 }
598 
599 static inline void tm2_null_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
600 {
601  int i;
602  int ct;
603  unsigned left, right;
604  int diff;
605  int deltas[16];
607 
608  /* null chroma */
609  deltas[0] = deltas[1] = deltas[2] = deltas[3] = 0;
610  tm2_low_chroma(U, Ustride, clast, ctx->CD, deltas, bx);
611 
612  deltas[0] = deltas[1] = deltas[2] = deltas[3] = 0;
613  tm2_low_chroma(V, Vstride, clast + 2, ctx->CD + 2, deltas, bx);
614 
615  /* null luma */
616  for (i = 0; i < 16; i++)
617  deltas[i] = 0;
618 
619  ct = ctx->D[0] + ctx->D[1] + ctx->D[2] + ctx->D[3];
620 
621  if (bx > 0)
622  left = last[-1] - ct;
623  else
624  left = 0;
625 
626  right = last[3];
627  diff = right - left;
628  last[0] = left + (diff >> 2);
629  last[1] = left + (diff >> 1);
630  last[2] = right - (diff >> 2);
631  last[3] = right;
632  {
633  int tp = left;
634 
635  ctx->D[0] = (tp + (ct >> 2)) - left;
636  left += ctx->D[0];
637  ctx->D[1] = (tp + (ct >> 1)) - left;
638  left += ctx->D[1];
639  ctx->D[2] = ((tp + ct) - (ct >> 2)) - left;
640  left += ctx->D[2];
641  ctx->D[3] = (tp + ct) - left;
642  }
643  tm2_apply_deltas(ctx, Y, Ystride, deltas, last);
644 }
645 
646 static inline void tm2_still_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
647 {
648  int i, j;
650 
651  /* update chroma */
652  for (j = 0; j < 2; j++) {
653  for (i = 0; i < 2; i++){
654  U[i] = Uo[i];
655  V[i] = Vo[i];
656  }
657  U += Ustride; V += Vstride;
658  Uo += oUstride; Vo += oVstride;
659  }
660  U -= Ustride * 2;
661  V -= Vstride * 2;
662  TM2_RECALC_BLOCK(U, Ustride, clast, ctx->CD);
663  TM2_RECALC_BLOCK(V, Vstride, (clast + 2), (ctx->CD + 2));
664 
665  /* update deltas */
666  ctx->D[0] = Yo[3] - last[3];
667  ctx->D[1] = Yo[3 + oYstride] - Yo[3];
668  ctx->D[2] = Yo[3 + oYstride * 2] - Yo[3 + oYstride];
669  ctx->D[3] = Yo[3 + oYstride * 3] - Yo[3 + oYstride * 2];
670 
671  for (j = 0; j < 4; j++) {
672  for (i = 0; i < 4; i++) {
673  Y[i] = Yo[i];
674  last[i] = Yo[i];
675  }
676  Y += Ystride;
677  Yo += oYstride;
678  }
679 }
680 
681 static inline void tm2_update_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
682 {
683  int i, j;
684  unsigned d;
686 
687  /* update chroma */
688  for (j = 0; j < 2; j++) {
689  for (i = 0; i < 2; i++) {
690  U[i] = Uo[i] + (unsigned)GET_TOK(ctx, TM2_UPD);
691  V[i] = Vo[i] + (unsigned)GET_TOK(ctx, TM2_UPD);
692  }
693  U += Ustride;
694  V += Vstride;
695  Uo += oUstride;
696  Vo += oVstride;
697  }
698  U -= Ustride * 2;
699  V -= Vstride * 2;
700  TM2_RECALC_BLOCK(U, Ustride, clast, ctx->CD);
701  TM2_RECALC_BLOCK(V, Vstride, (clast + 2), (ctx->CD + 2));
702 
703  /* update deltas */
704  ctx->D[0] = (unsigned)Yo[3] - last[3];
705  ctx->D[1] = (unsigned)Yo[3 + oYstride] - Yo[3];
706  ctx->D[2] = (unsigned)Yo[3 + oYstride * 2] - Yo[3 + oYstride];
707  ctx->D[3] = (unsigned)Yo[3 + oYstride * 3] - Yo[3 + oYstride * 2];
708 
709  for (j = 0; j < 4; j++) {
710  d = last[3];
711  for (i = 0; i < 4; i++) {
712  Y[i] = Yo[i] + (unsigned)GET_TOK(ctx, TM2_UPD);
713  last[i] = Y[i];
714  }
715  ctx->D[j] = last[3] - d;
716  Y += Ystride;
717  Yo += oYstride;
718  }
719 }
720 
721 static inline void tm2_motion_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
722 {
723  int i, j;
724  int mx, my;
726 
727  mx = GET_TOK(ctx, TM2_MOT);
728  my = GET_TOK(ctx, TM2_MOT);
729  mx = av_clip(mx, -(bx * 4 + 4), ctx->avctx->width - bx * 4);
730  my = av_clip(my, -(by * 4 + 4), ctx->avctx->height - by * 4);
731 
732  if (4*bx+mx<0 || 4*by+my<0 || 4*bx+mx+4 > ctx->avctx->width || 4*by+my+4 > ctx->avctx->height) {
733  av_log(ctx->avctx, AV_LOG_ERROR, "MV out of picture\n");
734  return;
735  }
736 
737  Yo += my * oYstride + mx;
738  Uo += (my >> 1) * oUstride + (mx >> 1);
739  Vo += (my >> 1) * oVstride + (mx >> 1);
740 
741  /* copy chroma */
742  for (j = 0; j < 2; j++) {
743  for (i = 0; i < 2; i++) {
744  U[i] = Uo[i];
745  V[i] = Vo[i];
746  }
747  U += Ustride;
748  V += Vstride;
749  Uo += oUstride;
750  Vo += oVstride;
751  }
752  U -= Ustride * 2;
753  V -= Vstride * 2;
754  TM2_RECALC_BLOCK(U, Ustride, clast, ctx->CD);
755  TM2_RECALC_BLOCK(V, Vstride, (clast + 2), (ctx->CD + 2));
756 
757  /* copy luma */
758  for (j = 0; j < 4; j++) {
759  for (i = 0; i < 4; i++) {
760  Y[i] = Yo[i];
761  }
762  Y += Ystride;
763  Yo += oYstride;
764  }
765  /* calculate deltas */
766  Y -= Ystride * 4;
767  ctx->D[0] = (unsigned)Y[3] - last[3];
768  ctx->D[1] = (unsigned)Y[3 + Ystride] - Y[3];
769  ctx->D[2] = (unsigned)Y[3 + Ystride * 2] - Y[3 + Ystride];
770  ctx->D[3] = (unsigned)Y[3 + Ystride * 3] - Y[3 + Ystride * 2];
771  for (i = 0; i < 4; i++)
772  last[i] = Y[i + Ystride * 3];
773 }
774 
776 {
777  int i, j;
778  int w = ctx->avctx->width, h = ctx->avctx->height, bw = w >> 2, bh = h >> 2, cw = w >> 1;
779  int type;
780  int keyframe = 1;
781  int *Y, *U, *V;
782  uint8_t *dst;
783 
784  for (i = 0; i < TM2_NUM_STREAMS; i++)
785  ctx->tok_ptrs[i] = 0;
786 
787  if (ctx->tok_lens[TM2_TYPE]<bw*bh) {
788  av_log(ctx->avctx,AV_LOG_ERROR,"Got %i tokens for %i blocks\n",ctx->tok_lens[TM2_TYPE],bw*bh);
789  return AVERROR_INVALIDDATA;
790  }
791 
792  memset(ctx->last, 0, 4 * bw * sizeof(int));
793  memset(ctx->clast, 0, 4 * bw * sizeof(int));
794 
795  for (j = 0; j < bh; j++) {
796  memset(ctx->D, 0, 4 * sizeof(int));
797  memset(ctx->CD, 0, 4 * sizeof(int));
798  for (i = 0; i < bw; i++) {
799  type = GET_TOK(ctx, TM2_TYPE);
800  switch(type) {
801  case TM2_HI_RES:
802  tm2_hi_res_block(ctx, p, i, j);
803  break;
804  case TM2_MED_RES:
805  tm2_med_res_block(ctx, p, i, j);
806  break;
807  case TM2_LOW_RES:
808  tm2_low_res_block(ctx, p, i, j);
809  break;
810  case TM2_NULL_RES:
811  tm2_null_res_block(ctx, p, i, j);
812  break;
813  case TM2_UPDATE:
814  tm2_update_block(ctx, p, i, j);
815  keyframe = 0;
816  break;
817  case TM2_STILL:
818  tm2_still_block(ctx, p, i, j);
819  keyframe = 0;
820  break;
821  case TM2_MOTION:
822  tm2_motion_block(ctx, p, i, j);
823  keyframe = 0;
824  break;
825  default:
826  av_log(ctx->avctx, AV_LOG_ERROR, "Skipping unknown block type %i\n", type);
827  }
828  if (ctx->error)
829  return AVERROR_INVALIDDATA;
830  }
831  }
832 
833  /* copy data from our buffer to AVFrame */
834  Y = (ctx->cur?ctx->Y2:ctx->Y1);
835  U = (ctx->cur?ctx->U2:ctx->U1);
836  V = (ctx->cur?ctx->V2:ctx->V1);
837  dst = p->data[0];
838  for (j = 0; j < h; j++) {
839  for (i = 0; i < w; i++) {
840  unsigned y = Y[i], u = U[i >> 1], v = V[i >> 1];
841  dst[3*i+0] = av_clip_uint8(y + v);
842  dst[3*i+1] = av_clip_uint8(y);
843  dst[3*i+2] = av_clip_uint8(y + u);
844  }
845 
846  /* horizontal edge extension */
847  Y[-4] = Y[-3] = Y[-2] = Y[-1] = Y[0];
848  Y[w + 3] = Y[w + 2] = Y[w + 1] = Y[w] = Y[w - 1];
849 
850  /* vertical edge extension */
851  if (j == 0) {
852  memcpy(Y - 4 - 1 * ctx->y_stride, Y - 4, ctx->y_stride);
853  memcpy(Y - 4 - 2 * ctx->y_stride, Y - 4, ctx->y_stride);
854  memcpy(Y - 4 - 3 * ctx->y_stride, Y - 4, ctx->y_stride);
855  memcpy(Y - 4 - 4 * ctx->y_stride, Y - 4, ctx->y_stride);
856  } else if (j == h - 1) {
857  memcpy(Y - 4 + 1 * ctx->y_stride, Y - 4, ctx->y_stride);
858  memcpy(Y - 4 + 2 * ctx->y_stride, Y - 4, ctx->y_stride);
859  memcpy(Y - 4 + 3 * ctx->y_stride, Y - 4, ctx->y_stride);
860  memcpy(Y - 4 + 4 * ctx->y_stride, Y - 4, ctx->y_stride);
861  }
862 
863  Y += ctx->y_stride;
864  if (j & 1) {
865  /* horizontal edge extension */
866  U[-2] = U[-1] = U[0];
867  V[-2] = V[-1] = V[0];
868  U[cw + 1] = U[cw] = U[cw - 1];
869  V[cw + 1] = V[cw] = V[cw - 1];
870 
871  /* vertical edge extension */
872  if (j == 1) {
873  memcpy(U - 2 - 1 * ctx->uv_stride, U - 2, ctx->uv_stride);
874  memcpy(V - 2 - 1 * ctx->uv_stride, V - 2, ctx->uv_stride);
875  memcpy(U - 2 - 2 * ctx->uv_stride, U - 2, ctx->uv_stride);
876  memcpy(V - 2 - 2 * ctx->uv_stride, V - 2, ctx->uv_stride);
877  } else if (j == h - 1) {
878  memcpy(U - 2 + 1 * ctx->uv_stride, U - 2, ctx->uv_stride);
879  memcpy(V - 2 + 1 * ctx->uv_stride, V - 2, ctx->uv_stride);
880  memcpy(U - 2 + 2 * ctx->uv_stride, U - 2, ctx->uv_stride);
881  memcpy(V - 2 + 2 * ctx->uv_stride, V - 2, ctx->uv_stride);
882  }
883 
884  U += ctx->uv_stride;
885  V += ctx->uv_stride;
886  }
887  dst += p->linesize[0];
888  }
889 
890  return keyframe;
891 }
892 
893 static const int tm2_stream_order[TM2_NUM_STREAMS] = {
895 };
896 
897 #define TM2_HEADER_SIZE 40
898 
900  void *data, int *got_frame,
901  AVPacket *avpkt)
902 {
903  TM2Context * const l = avctx->priv_data;
904  const uint8_t *buf = avpkt->data;
905  int buf_size = avpkt->size & ~3;
906  AVFrame * const p = l->pic;
907  int offset = TM2_HEADER_SIZE;
908  int i, t, ret;
909 
910  l->error = 0;
911 
912  av_fast_padded_malloc(&l->buffer, &l->buffer_size, buf_size);
913  if (!l->buffer) {
914  av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer\n");
915  return AVERROR(ENOMEM);
916  }
917 
918  if ((ret = ff_reget_buffer(avctx, p)) < 0)
919  return ret;
920 
921  l->bdsp.bswap_buf((uint32_t *) l->buffer, (const uint32_t *) buf,
922  buf_size >> 2);
923 
924  if ((ret = tm2_read_header(l, l->buffer)) < 0) {
925  return ret;
926  }
927 
928  for (i = 0; i < TM2_NUM_STREAMS; i++) {
929  if (offset >= buf_size) {
930  av_log(avctx, AV_LOG_ERROR, "no space for tm2_read_stream\n");
931  return AVERROR_INVALIDDATA;
932  }
933 
934  t = tm2_read_stream(l, l->buffer + offset, tm2_stream_order[i],
935  buf_size - offset);
936  if (t < 0) {
937  int j = tm2_stream_order[i];
938  if (l->tok_lens[j])
939  memset(l->tokens[j], 0, sizeof(**l->tokens) * l->tok_lens[j]);
940  return t;
941  }
942  offset += t;
943  }
944  p->key_frame = tm2_decode_blocks(l, p);
945  if (p->key_frame)
947  else
949 
950  l->cur = !l->cur;
951  *got_frame = 1;
952  ret = av_frame_ref(data, l->pic);
953 
954  return (ret < 0) ? ret : buf_size;
955 }
956 
958 {
959  TM2Context * const l = avctx->priv_data;
960  int i, w = avctx->width, h = avctx->height;
961 
962  if ((avctx->width & 3) || (avctx->height & 3)) {
963  av_log(avctx, AV_LOG_ERROR, "Width and height must be multiple of 4\n");
964  return AVERROR(EINVAL);
965  }
966 
967  l->avctx = avctx;
968  avctx->pix_fmt = AV_PIX_FMT_BGR24;
969 
970  l->pic = av_frame_alloc();
971  if (!l->pic)
972  return AVERROR(ENOMEM);
973 
974  ff_bswapdsp_init(&l->bdsp);
975 
976  l->last = av_malloc_array(w >> 2, 4 * sizeof(*l->last) );
977  l->clast = av_malloc_array(w >> 2, 4 * sizeof(*l->clast));
978 
979  for (i = 0; i < TM2_NUM_STREAMS; i++) {
980  l->tokens[i] = NULL;
981  l->tok_lens[i] = 0;
982  }
983 
984  w += 8;
985  h += 8;
986  l->Y1_base = av_calloc(w * h, sizeof(*l->Y1_base));
987  l->Y2_base = av_calloc(w * h, sizeof(*l->Y2_base));
988  l->y_stride = w;
989  w = (w + 1) >> 1;
990  h = (h + 1) >> 1;
991  l->U1_base = av_calloc(w * h, sizeof(*l->U1_base));
992  l->V1_base = av_calloc(w * h, sizeof(*l->V1_base));
993  l->U2_base = av_calloc(w * h, sizeof(*l->U2_base));
994  l->V2_base = av_calloc(w * h, sizeof(*l->V1_base));
995  l->uv_stride = w;
996  l->cur = 0;
997  if (!l->Y1_base || !l->Y2_base || !l->U1_base ||
998  !l->V1_base || !l->U2_base || !l->V2_base ||
999  !l->last || !l->clast) {
1000  av_freep(&l->Y1_base);
1001  av_freep(&l->Y2_base);
1002  av_freep(&l->U1_base);
1003  av_freep(&l->U2_base);
1004  av_freep(&l->V1_base);
1005  av_freep(&l->V2_base);
1006  av_freep(&l->last);
1007  av_freep(&l->clast);
1008  av_frame_free(&l->pic);
1009  return AVERROR(ENOMEM);
1010  }
1011  l->Y1 = l->Y1_base + l->y_stride * 4 + 4;
1012  l->Y2 = l->Y2_base + l->y_stride * 4 + 4;
1013  l->U1 = l->U1_base + l->uv_stride * 2 + 2;
1014  l->U2 = l->U2_base + l->uv_stride * 2 + 2;
1015  l->V1 = l->V1_base + l->uv_stride * 2 + 2;
1016  l->V2 = l->V2_base + l->uv_stride * 2 + 2;
1017 
1018  return 0;
1019 }
1020 
1022 {
1023  TM2Context * const l = avctx->priv_data;
1024  int i;
1025 
1026  av_free(l->last);
1027  av_free(l->clast);
1028  for (i = 0; i < TM2_NUM_STREAMS; i++)
1029  av_freep(&l->tokens[i]);
1030  if (l->Y1) {
1031  av_freep(&l->Y1_base);
1032  av_freep(&l->U1_base);
1033  av_freep(&l->V1_base);
1034  av_freep(&l->Y2_base);
1035  av_freep(&l->U2_base);
1036  av_freep(&l->V2_base);
1037  }
1038  av_freep(&l->buffer);
1039  l->buffer_size = 0;
1040 
1041  av_frame_free(&l->pic);
1042 
1043  return 0;
1044 }
1045 
1047  .name = "truemotion2",
1048  .long_name = NULL_IF_CONFIG_SMALL("Duck TrueMotion 2.0"),
1049  .type = AVMEDIA_TYPE_VIDEO,
1051  .priv_data_size = sizeof(TM2Context),
1052  .init = decode_init,
1053  .close = decode_end,
1054  .decode = decode_frame,
1055  .capabilities = AV_CODEC_CAP_DR1,
1056 };
#define NULL
Definition: coverity.c:32
const char const char void * val
Definition: avisynth_c.h:863
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
int * V2
Definition: truemotion2.c:85
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
int * U1_base
Definition: truemotion2.c:84
int D[4]
Definition: truemotion2.c:78
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
#define avpriv_request_sample(...)
int * last
Definition: truemotion2.c:80
int * recode
table for converting from code indexes to values
Definition: truemotion2.c:96
int size
Definition: avcodec.h:1478
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1775
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
GLint GLenum type
Definition: opengl_enc.c:104
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:70
AVCodec ff_truemotion2_decoder
Definition: truemotion2.c:1046
AVCodec.
Definition: avcodec.h:3481
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:42
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame)
Identical in function to av_frame_make_writable(), except it uses ff_get_buffer() to allocate the buf...
Definition: decode.c:2011
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:244
int num
current number filled
Definition: truemotion2.c:108
static int tm2_read_header(TM2Context *ctx, const uint8_t *buf)
Definition: truemotion2.c:251
#define TM2_HEADER_SIZE
Definition: truemotion2.c:897
structure for gathering Huffman codes information
Definition: truemotion2.c:103
uint8_t
#define av_cold
Definition: attributes.h:82
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
#define mb
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
void(* bswap_buf)(uint32_t *dst, const uint32_t *src, int w)
Definition: bswapdsp.h:25
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
TM2_STREAMS
Definition: truemotion2.c:39
static const int tm2_stream_order[TM2_NUM_STREAMS]
Definition: truemotion2.c:893
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
AVCodecContext * avctx
Definition: truemotion2.c:62
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
int max_bits
maximum length of code
Definition: truemotion2.c:105
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:252
static int tm2_read_deltas(TM2Context *ctx, int stream_id)
Definition: truemotion2.c:268
static void tm2_free_codes(TM2Codes *code)
Definition: truemotion2.c:232
uint8_t * data
Definition: avcodec.h:1477
int min_bits
minimum length of code
Definition: truemotion2.c:106
int * U2
Definition: truemotion2.c:85
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
static int tm2_read_tree(TM2Context *ctx, uint32_t prefix, int length, TM2Huff *huff)
Definition: truemotion2.c:119
VLC vlc
table for FFmpeg bitstream reader
Definition: truemotion2.c:94
bitstream reader API header.
static void tm2_hi_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
Definition: truemotion2.c:515
static int tm2_read_stream(TM2Context *ctx, const uint8_t *buf, int stream_id, int buf_size)
Definition: truemotion2.c:295
#define av_log(a,...)
static void tm2_update_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
Definition: truemotion2.c:681
#define U(x)
Definition: vp56_arith.h:37
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int val_bits
length of literal
Definition: truemotion2.c:104
#define init_vlc(vlc, nb_bits, nb_codes,bits, bits_wrap, bits_size,codes, codes_wrap, codes_size,flags)
Definition: vlc.h:38
static int tm2_build_huff_table(TM2Context *ctx, TM2Codes *code)
Definition: truemotion2.c:150
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:164
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
static void tm2_low_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
Definition: truemotion2.c:558
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
static int tm2_decode_blocks(TM2Context *ctx, AVFrame *p)
Definition: truemotion2.c:775
int * tokens[TM2_NUM_STREAMS]
Definition: truemotion2.c:73
#define t1
Definition: regdef.h:29
GLsizei GLsizei * length
Definition: opengl_enc.c:114
const char * name
Name of the codec implementation.
Definition: avcodec.h:3488
int * clast
Definition: truemotion2.c:81
#define FFMAX(a, b)
Definition: common.h:94
Definition: vlc.h:26
int av_reallocp_array(void *ptr, size_t nmemb, size_t size)
Allocate, reallocate, or free an array through a pointer to a pointer.
Definition: mem.c:205
AVFrame * pic
Definition: truemotion2.c:63
int * V1
Definition: truemotion2.c:85
int * V2_base
Definition: truemotion2.c:84
int max_num
total number of codes
Definition: truemotion2.c:109
int bits
Definition: truemotion2.c:95
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:378
#define Y
Definition: boxblur.h:38
static int GET_TOK(TM2Context *ctx, int type)
Definition: truemotion2.c:411
int width
picture width / height.
Definition: avcodec.h:1738
uint8_t w
Definition: llviddspenc.c:38
#define TM2_OLD_HEADER_MAGIC
Definition: truemotion2.c:248
AVFormatContext * ctx
Definition: movenc.c:48
int uv_stride
Definition: truemotion2.c:86
uint8_t * buffer
Definition: truemotion2.c:69
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:797
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
static int tm2_get_token(GetBitContext *gb, TM2Codes *code)
Definition: truemotion2.c:239
int CD[4]
Definition: truemotion2.c:79
int * nums
literals
Definition: truemotion2.c:110
int nodes
total number of nodes in tree
Definition: truemotion2.c:107
if(ret)
static av_cold int decode_end(AVCodecContext *avctx)
Definition: truemotion2.c:1021
int * Y1
Definition: truemotion2.c:85
int * V1_base
Definition: truemotion2.c:84
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
Libavcodec external API header.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
#define TM2_INIT_POINTERS_2()
Definition: truemotion2.c:445
static void tm2_low_chroma(int *data, int stride, int *clast, unsigned *CD, int *deltas, int bx)
Definition: truemotion2.c:496
main external API structure.
Definition: avcodec.h:1565
static void tm2_still_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
Definition: truemotion2.c:646
#define TM2_DELTAS
Definition: truemotion2.c:36
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:206
void * buf
Definition: avisynth_c.h:766
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
#define TM2_INIT_POINTERS()
Definition: truemotion2.c:431
int * Y1_base
Definition: truemotion2.c:84
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
static void tm2_med_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
Definition: truemotion2.c:536
static void tm2_motion_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
Definition: truemotion2.c:721
uint32_t * bits
codes
Definition: truemotion2.c:111
int length
Definition: truemotion2.c:97
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:546
static void tm2_apply_deltas(TM2Context *ctx, int *Y, int stride, int *deltas, int *last)
Definition: truemotion2.c:465
#define TM2_ESCAPE
Definition: truemotion2.c:35
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
#define TM2_RECALC_BLOCK(CHR, stride, last, CD)
Definition: truemotion2.c:458
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
int * U1
Definition: truemotion2.c:85
int * Y2
Definition: truemotion2.c:85
int
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
common internal api header.
int y_stride
Definition: truemotion2.c:86
static void tm2_null_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
Definition: truemotion2.c:599
int deltas[TM2_NUM_STREAMS][TM2_DELTAS]
Definition: truemotion2.c:76
int * lens
codelengths
Definition: truemotion2.c:112
static av_cold int decode_init(AVCodecContext *avctx)
Definition: truemotion2.c:957
int * Y2_base
Definition: truemotion2.c:84
int buffer_size
Definition: truemotion2.c:70
static void tm2_high_chroma(int *data, int stride, int *last, unsigned *CD, int *deltas)
Definition: truemotion2.c:483
#define TM2_NEW_HEADER_MAGIC
Definition: truemotion2.c:249
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
void * priv_data
Definition: avcodec.h:1592
int * U2_base
Definition: truemotion2.c:84
TM2_BLOCKS
Definition: truemotion2.c:51
static av_always_inline int diff(const uint32_t a, const uint32_t b)
#define av_free(p)
GetBitContext gb
Definition: truemotion2.c:65
int len
VLC_TYPE(* table)[2]
code, bits
Definition: vlc.h:28
int tok_lens[TM2_NUM_STREAMS]
Definition: truemotion2.c:74
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:373
FILE * out
Definition: movenc.c:54
#define av_freep(p)
Huffman codes for each of streams.
Definition: truemotion2.c:93
#define av_malloc_array(a, b)
#define stride
int tok_ptrs[TM2_NUM_STREAMS]
Definition: truemotion2.c:75
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
This structure stores compressed data.
Definition: avcodec.h:1454
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:359
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:981
BswapDSPContext bdsp
Definition: truemotion2.c:67
#define t2
Definition: regdef.h:30
Predicted.
Definition: avutil.h:275
#define V
Definition: avdct.c:30
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: truemotion2.c:899