FFmpeg
g2meet.c
Go to the documentation of this file.
1 /*
2  * Go2Webinar / Go2Meeting decoder
3  * Copyright (c) 2012 Konstantin Shishkov
4  * Copyright (c) 2013 Maxim Poliakovski
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * Go2Webinar / Go2Meeting decoder
26  */
27 
28 #include <inttypes.h>
29 #include <zlib.h>
30 
31 #include "libavutil/imgutils.h"
32 #include "libavutil/intreadwrite.h"
33 #include "libavutil/mem_internal.h"
34 
35 #include "avcodec.h"
36 #include "blockdsp.h"
37 #include "bytestream.h"
38 #include "elsdec.h"
39 #include "get_bits.h"
40 #include "idctdsp.h"
41 #include "internal.h"
42 #include "jpegtables.h"
43 #include "mjpeg.h"
44 
45 #define EPIC_PIX_STACK_SIZE 1024
46 #define EPIC_PIX_STACK_MAX (EPIC_PIX_STACK_SIZE - 1)
47 
48 enum ChunkType {
49  DISPLAY_INFO = 0xC8,
55 };
56 
60 };
61 
62 static const uint8_t luma_quant[64] = {
63  8, 6, 5, 8, 12, 20, 26, 31,
64  6, 6, 7, 10, 13, 29, 30, 28,
65  7, 7, 8, 12, 20, 29, 35, 28,
66  7, 9, 11, 15, 26, 44, 40, 31,
67  9, 11, 19, 28, 34, 55, 52, 39,
68  12, 18, 28, 32, 41, 52, 57, 46,
69  25, 32, 39, 44, 52, 61, 60, 51,
70  36, 46, 48, 49, 56, 50, 52, 50
71 };
72 
73 static const uint8_t chroma_quant[64] = {
74  9, 9, 12, 24, 50, 50, 50, 50,
75  9, 11, 13, 33, 50, 50, 50, 50,
76  12, 13, 28, 50, 50, 50, 50, 50,
77  24, 33, 50, 50, 50, 50, 50, 50,
78  50, 50, 50, 50, 50, 50, 50, 50,
79  50, 50, 50, 50, 50, 50, 50, 50,
80  50, 50, 50, 50, 50, 50, 50, 50,
81  50, 50, 50, 50, 50, 50, 50, 50,
82 };
83 
84 typedef struct ePICPixListElem {
86  uint32_t pixel;
89 
90 typedef struct ePICPixHashElem {
91  uint32_t pix_id;
94 
95 #define EPIC_HASH_SIZE 256
96 typedef struct ePICPixHash {
98  int bucket_size[EPIC_HASH_SIZE];
99  int bucket_fill[EPIC_HASH_SIZE];
100 } ePICPixHash;
101 
102 typedef struct ePICContext {
108  uint8_t W_ctx_rung[256];
109  uint8_t N_ctx_rung[512];
110  uint8_t nw_pred_rung[256];
111  uint8_t ne_pred_rung[256];
112  uint8_t prev_row_rung[14];
113  uint8_t runlen_zeroes[14];
116  uint32_t stack[EPIC_PIX_STACK_SIZE];
118 } ePICContext;
119 
120 typedef struct JPGContext {
124 
125  VLC dc_vlc[2], ac_vlc[2];
126  int prev_dc[3];
127  DECLARE_ALIGNED(32, int16_t, block)[6][64];
128 
130 } JPGContext;
131 
132 typedef struct G2MContext {
135 
136  int version;
137 
139  int width, height, bpp;
140  int orig_width, orig_height;
141  int tile_width, tile_height;
142  int tiles_x, tiles_y, tile_x, tile_y;
143 
145 
147  int framebuf_stride, old_width, old_height;
148 
149  uint8_t *synth_tile, *jpeg_tile, *epic_buf, *epic_buf_base;
150  int tile_stride, epic_buf_stride, old_tile_w, old_tile_h;
151  int swapuv;
152 
153  uint8_t *kempf_buf, *kempf_flags;
154 
158  int cursor_w, cursor_h, cursor_x, cursor_y;
159  int cursor_hot_x, cursor_hot_y;
160 } G2MContext;
161 
162 static av_cold int build_vlc(VLC *vlc, const uint8_t *bits_table,
163  const uint8_t *val_table, int nb_codes,
164  int is_ac)
165 {
166  uint8_t huff_size[256] = { 0 };
167  uint16_t huff_code[256];
168  uint16_t huff_sym[256];
169  int i;
170 
171  ff_mjpeg_build_huffman_codes(huff_size, huff_code, bits_table, val_table);
172 
173  for (i = 0; i < 256; i++)
174  huff_sym[i] = i + 16 * is_ac;
175 
176  if (is_ac)
177  huff_sym[0] = 16 * 256;
178 
179  return ff_init_vlc_sparse(vlc, 9, nb_codes, huff_size, 1, 1,
180  huff_code, 2, 2, huff_sym, 2, 2, 0);
181 }
182 
184 {
185  int ret;
186 
188  avpriv_mjpeg_val_dc, 12, 0);
189  if (ret)
190  return ret;
192  avpriv_mjpeg_val_dc, 12, 0);
193  if (ret)
194  return ret;
197  if (ret)
198  return ret;
201  if (ret)
202  return ret;
203 
204  ff_blockdsp_init(&c->bdsp, avctx);
205  ff_idctdsp_init(&c->idsp, avctx);
208 
209  return 0;
210 }
211 
213 {
214  int i;
215 
216  for (i = 0; i < 2; i++) {
217  ff_free_vlc(&ctx->dc_vlc[i]);
218  ff_free_vlc(&ctx->ac_vlc[i]);
219  }
220 
221  av_freep(&ctx->buf);
222 }
223 
224 static void jpg_unescape(const uint8_t *src, int src_size,
225  uint8_t *dst, int *dst_size)
226 {
227  const uint8_t *src_end = src + src_size;
228  uint8_t *dst_start = dst;
229 
230  while (src < src_end) {
231  uint8_t x = *src++;
232 
233  *dst++ = x;
234 
235  if (x == 0xFF && !*src)
236  src++;
237  }
238  *dst_size = dst - dst_start;
239 }
240 
242  int plane, int16_t *block)
243 {
244  int dc, val, pos;
245  const int is_chroma = !!plane;
246  const uint8_t *qmat = is_chroma ? chroma_quant : luma_quant;
247 
248  if (get_bits_left(gb) < 1)
249  return AVERROR_INVALIDDATA;
250 
251  c->bdsp.clear_block(block);
252  dc = get_vlc2(gb, c->dc_vlc[is_chroma].table, 9, 2);
253  if (dc < 0)
254  return AVERROR_INVALIDDATA;
255  if (dc)
256  dc = get_xbits(gb, dc);
257  dc = dc * qmat[0] + c->prev_dc[plane];
258  block[0] = dc;
259  c->prev_dc[plane] = dc;
260 
261  pos = 0;
262  while (pos < 63) {
263  val = get_vlc2(gb, c->ac_vlc[is_chroma].table, 9, 2);
264  if (val < 0)
265  return AVERROR_INVALIDDATA;
266  pos += val >> 4;
267  val &= 0xF;
268  if (pos > 63)
269  return val ? AVERROR_INVALIDDATA : 0;
270  if (val) {
271  int nbits = val;
272 
273  val = get_xbits(gb, nbits);
274  val *= qmat[ff_zigzag_direct[pos]];
275  block[c->scantable.permutated[pos]] = val;
276  }
277  }
278  return 0;
279 }
280 
281 static inline void yuv2rgb(uint8_t *out, int ridx, int Y, int U, int V)
282 {
283  out[ridx] = av_clip_uint8(Y + (91881 * V + 32768 >> 16));
284  out[1] = av_clip_uint8(Y + (-22554 * U - 46802 * V + 32768 >> 16));
285  out[2 - ridx] = av_clip_uint8(Y + (116130 * U + 32768 >> 16));
286 }
287 
288 static int jpg_decode_data(JPGContext *c, int width, int height,
289  const uint8_t *src, int src_size,
290  uint8_t *dst, int dst_stride,
291  const uint8_t *mask, int mask_stride, int num_mbs,
292  int swapuv)
293 {
294  GetBitContext gb;
295  int mb_w, mb_h, mb_x, mb_y, i, j;
296  int bx, by;
297  int unesc_size;
298  int ret;
299  const int ridx = swapuv ? 2 : 0;
300 
301  if ((ret = av_reallocp(&c->buf,
302  src_size + AV_INPUT_BUFFER_PADDING_SIZE)) < 0)
303  return ret;
304  jpg_unescape(src, src_size, c->buf, &unesc_size);
305  memset(c->buf + unesc_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
306  if((ret = init_get_bits8(&gb, c->buf, unesc_size)) < 0)
307  return ret;
308 
309  width = FFALIGN(width, 16);
310  mb_w = width >> 4;
311  mb_h = (height + 15) >> 4;
312 
313  if (!num_mbs)
314  num_mbs = mb_w * mb_h * 4;
315 
316  for (i = 0; i < 3; i++)
317  c->prev_dc[i] = 1024;
318  bx =
319  by = 0;
320  c->bdsp.clear_blocks(c->block[0]);
321  for (mb_y = 0; mb_y < mb_h; mb_y++) {
322  for (mb_x = 0; mb_x < mb_w; mb_x++) {
323  if (mask && !mask[mb_x * 2] && !mask[mb_x * 2 + 1] &&
324  !mask[mb_x * 2 + mask_stride] &&
325  !mask[mb_x * 2 + 1 + mask_stride]) {
326  bx += 16;
327  continue;
328  }
329  for (j = 0; j < 2; j++) {
330  for (i = 0; i < 2; i++) {
331  if (mask && !mask[mb_x * 2 + i + j * mask_stride])
332  continue;
333  num_mbs--;
334  if ((ret = jpg_decode_block(c, &gb, 0,
335  c->block[i + j * 2])) != 0)
336  return ret;
337  c->idsp.idct(c->block[i + j * 2]);
338  }
339  }
340  for (i = 1; i < 3; i++) {
341  if ((ret = jpg_decode_block(c, &gb, i, c->block[i + 3])) != 0)
342  return ret;
343  c->idsp.idct(c->block[i + 3]);
344  }
345 
346  for (j = 0; j < 16; j++) {
347  uint8_t *out = dst + bx * 3 + (by + j) * dst_stride;
348  for (i = 0; i < 16; i++) {
349  int Y, U, V;
350 
351  Y = c->block[(j >> 3) * 2 + (i >> 3)][(i & 7) + (j & 7) * 8];
352  U = c->block[4][(i >> 1) + (j >> 1) * 8] - 128;
353  V = c->block[5][(i >> 1) + (j >> 1) * 8] - 128;
354  yuv2rgb(out + i * 3, ridx, Y, U, V);
355  }
356  }
357 
358  if (!num_mbs)
359  return 0;
360  bx += 16;
361  }
362  bx = 0;
363  by += 16;
364  if (mask)
365  mask += mask_stride * 2;
366  }
367 
368  return 0;
369 }
370 
371 #define LOAD_NEIGHBOURS(x) \
372  W = curr_row[(x) - 1]; \
373  N = above_row[(x)]; \
374  WW = curr_row[(x) - 2]; \
375  NW = above_row[(x) - 1]; \
376  NE = above_row[(x) + 1]; \
377  NN = above2_row[(x)]; \
378  NNW = above2_row[(x) - 1]; \
379  NWW = above_row[(x) - 2]; \
380  NNE = above2_row[(x) + 1]
381 
382 #define UPDATE_NEIGHBOURS(x) \
383  NNW = NN; \
384  NN = NNE; \
385  NWW = NW; \
386  NW = N; \
387  N = NE; \
388  NE = above_row[(x) + 1]; \
389  NNE = above2_row[(x) + 1]
390 
391 #define R_shift 16
392 #define G_shift 8
393 #define B_shift 0
394 
395 /* improved djb2 hash from http://www.cse.yorku.ca/~oz/hash.html */
396 static int djb2_hash(uint32_t key)
397 {
398  uint32_t h = 5381;
399 
400  h = (h * 33) ^ ((key >> 24) & 0xFF); // xxx: probably not needed at all
401  h = (h * 33) ^ ((key >> 16) & 0xFF);
402  h = (h * 33) ^ ((key >> 8) & 0xFF);
403  h = (h * 33) ^ (key & 0xFF);
404 
405  return h & (EPIC_HASH_SIZE - 1);
406 }
407 
409 {
410  memset(hash, 0, sizeof(*hash));
411 }
412 
414 {
415  int i, idx = djb2_hash(key);
416  ePICPixHashElem *bucket = hash->bucket[idx];
417 
418  for (i = 0; i < hash->bucket_fill[idx]; i++)
419  if (bucket[i].pix_id == key)
420  return &bucket[i];
421 
422  return NULL;
423 }
424 
426 {
428  int idx = djb2_hash(key);
429 
430  if (hash->bucket_size[idx] > INT_MAX / sizeof(**hash->bucket))
431  return NULL;
432 
433  if (!(hash->bucket_fill[idx] < hash->bucket_size[idx])) {
434  int new_size = hash->bucket_size[idx] + 16;
435  bucket = av_realloc(hash->bucket[idx], new_size * sizeof(*bucket));
436  if (!bucket)
437  return NULL;
438  hash->bucket[idx] = bucket;
439  hash->bucket_size[idx] = new_size;
440  }
441 
442  ret = &hash->bucket[idx][hash->bucket_fill[idx]++];
443  memset(ret, 0, sizeof(*ret));
444  ret->pix_id = key;
445  return ret;
446 }
447 
448 static int epic_add_pixel_to_cache(ePICPixHash *hash, uint32_t key, uint32_t pix)
449 {
450  ePICPixListElem *new_elem;
451  ePICPixHashElem *hash_elem = epic_hash_find(hash, key);
452 
453  if (!hash_elem) {
454  if (!(hash_elem = epic_hash_add(hash, key)))
455  return AVERROR(ENOMEM);
456  }
457 
458  new_elem = av_mallocz(sizeof(*new_elem));
459  if (!new_elem)
460  return AVERROR(ENOMEM);
461 
462  new_elem->pixel = pix;
463  new_elem->next = hash_elem->list;
464  hash_elem->list = new_elem;
465 
466  return 0;
467 }
468 
470  uint32_t pix)
471 {
472  ePICPixHashElem *hash_elem = epic_hash_find(hash, pix);
473 
474  if (hash_elem != NULL && hash_elem->list != NULL)
475  return 1;
476 
477  return 0;
478 }
479 
481 {
482  int i, j;
483 
484  for (i = 0; i < EPIC_HASH_SIZE; i++) {
485  for (j = 0; j < hash->bucket_fill[i]; j++) {
486  ePICPixListElem *list_elem = hash->bucket[i][j].list;
487  while (list_elem) {
488  ePICPixListElem *tmp = list_elem->next;
489  av_free(list_elem);
490  list_elem = tmp;
491  }
492  }
493  av_freep(&hash->bucket[i]);
494  hash->bucket_size[i] =
495  hash->bucket_fill[i] = 0;
496  }
497 }
498 
499 static inline int is_pixel_on_stack(const ePICContext *dc, uint32_t pix)
500 {
501  int i;
502 
503  for (i = 0; i < dc->stack_pos; i++)
504  if (dc->stack[i] == pix)
505  break;
506 
507  return i != dc->stack_pos;
508 }
509 
510 #define TOSIGNED(val) (((val) >> 1) ^ -((val) & 1))
511 
513  int N, int W, int NW)
514 {
515  unsigned delta = ff_els_decode_unsigned(&dc->els_ctx, &dc->unsigned_rung);
516  return mid_pred(N, N + W - NW, W) - TOSIGNED(delta);
517 }
518 
519 static uint32_t epic_decode_pixel_pred(ePICContext *dc, int x, int y,
520  const uint32_t *curr_row,
521  const uint32_t *above_row)
522 {
523  uint32_t N, W, NW, pred;
524  unsigned delta;
525  int GN, GW, GNW, R, G, B;
526 
527  if (x && y) {
528  W = curr_row[x - 1];
529  N = above_row[x];
530  NW = above_row[x - 1];
531 
532  GN = (N >> G_shift) & 0xFF;
533  GW = (W >> G_shift) & 0xFF;
534  GNW = (NW >> G_shift) & 0xFF;
535 
536  G = epic_decode_component_pred(dc, GN, GW, GNW);
537 
538  R = G + epic_decode_component_pred(dc,
539  ((N >> R_shift) & 0xFF) - GN,
540  ((W >> R_shift) & 0xFF) - GW,
541  ((NW >> R_shift) & 0xFF) - GNW);
542 
543  B = G + epic_decode_component_pred(dc,
544  ((N >> B_shift) & 0xFF) - GN,
545  ((W >> B_shift) & 0xFF) - GW,
546  ((NW >> B_shift) & 0xFF) - GNW);
547  } else {
548  if (x)
549  pred = curr_row[x - 1];
550  else
551  pred = above_row[x];
552 
553  delta = ff_els_decode_unsigned(&dc->els_ctx, &dc->unsigned_rung);
554  R = ((pred >> R_shift) & 0xFF) - TOSIGNED(delta);
555 
556  delta = ff_els_decode_unsigned(&dc->els_ctx, &dc->unsigned_rung);
557  G = ((pred >> G_shift) & 0xFF) - TOSIGNED(delta);
558 
559  delta = ff_els_decode_unsigned(&dc->els_ctx, &dc->unsigned_rung);
560  B = ((pred >> B_shift) & 0xFF) - TOSIGNED(delta);
561  }
562 
563  if (R<0 || G<0 || B<0 || R > 255 || G > 255 || B > 255) {
564  avpriv_request_sample(NULL, "RGB %d %d %d (out of range)", R, G, B);
565  return 0;
566  }
567 
568  return (R << R_shift) | (G << G_shift) | (B << B_shift);
569 }
570 
572  uint32_t *pPix, uint32_t pix)
573 {
574  if (!ff_els_decode_bit(&dc->els_ctx, rung)) {
575  *pPix = pix;
576  return 1;
577  }
578  dc->stack[dc->stack_pos++ & EPIC_PIX_STACK_MAX] = pix;
579  return 0;
580 }
581 
582 static int epic_handle_edges(ePICContext *dc, int x, int y,
583  const uint32_t *curr_row,
584  const uint32_t *above_row, uint32_t *pPix)
585 {
586  uint32_t pix;
587 
588  if (!x && !y) { /* special case: top-left pixel */
589  /* the top-left pixel is coded independently with 3 unsigned numbers */
590  *pPix = (ff_els_decode_unsigned(&dc->els_ctx, &dc->unsigned_rung) << R_shift) |
593  return 1;
594  }
595 
596  if (x) { /* predict from W first */
597  pix = curr_row[x - 1];
598  if (epic_predict_pixel(dc, &dc->W_flag_rung, pPix, pix))
599  return 1;
600  }
601 
602  if (y) { /* then try to predict from N */
603  pix = above_row[x];
604  if (!dc->stack_pos || dc->stack[0] != pix) {
605  if (epic_predict_pixel(dc, &dc->N_flag_rung, pPix, pix))
606  return 1;
607  }
608  }
609 
610  return 0;
611 }
612 
613 static int epic_decode_run_length(ePICContext *dc, int x, int y, int tile_width,
614  const uint32_t *curr_row,
615  const uint32_t *above_row,
616  const uint32_t *above2_row,
617  uint32_t *pPix, int *pRun)
618 {
619  int idx, got_pixel = 0, WWneW, old_WWneW = 0;
620  uint32_t W, WW, N, NN, NW, NE, NWW, NNW, NNE;
621 
622  *pRun = 0;
623 
624  LOAD_NEIGHBOURS(x);
625 
626  if (dc->next_run_pos == x) {
627  /* can't reuse W for the new pixel in this case */
628  WWneW = 1;
629  } else {
630  idx = (WW != W) << 7 |
631  (NW != W) << 6 |
632  (N != NE) << 5 |
633  (NW != N) << 4 |
634  (NWW != NW) << 3 |
635  (NNE != NE) << 2 |
636  (NN != N) << 1 |
637  (NNW != NW);
638  WWneW = ff_els_decode_bit(&dc->els_ctx, &dc->W_ctx_rung[idx]);
639  if (WWneW < 0)
640  return WWneW;
641  }
642 
643  if (WWneW)
644  dc->stack[dc->stack_pos++ & EPIC_PIX_STACK_MAX] = W;
645  else {
646  *pPix = W;
647  got_pixel = 1;
648  }
649 
650  do {
651  int NWneW = 1;
652  if (got_pixel) // pixel value already known (derived from either W or N)
653  NWneW = *pPix != N;
654  else { // pixel value is unknown and will be decoded later
655  NWneW = *pRun ? NWneW : NW != W;
656 
657  /* TODO: RFC this mess! */
658  switch (((NW != N) << 2) | (NWneW << 1) | WWneW) {
659  case 0:
660  break; // do nothing here
661  case 3:
662  case 5:
663  case 6:
664  case 7:
665  if (!is_pixel_on_stack(dc, N)) {
666  idx = WWneW << 8 |
667  (*pRun ? old_WWneW : WW != W) << 7 |
668  NWneW << 6 |
669  (N != NE) << 5 |
670  (NW != N) << 4 |
671  (NWW != NW) << 3 |
672  (NNE != NE) << 2 |
673  (NN != N) << 1 |
674  (NNW != NW);
675  if (!ff_els_decode_bit(&dc->els_ctx, &dc->N_ctx_rung[idx])) {
676  NWneW = 0;
677  *pPix = N;
678  got_pixel = 1;
679  break;
680  }
681  }
682  /* fall through */
683  default:
684  NWneW = 1;
685  old_WWneW = WWneW;
686  if (!is_pixel_on_stack(dc, N))
687  dc->stack[dc->stack_pos++ & EPIC_PIX_STACK_MAX] = N;
688  }
689  }
690 
691  (*pRun)++;
692  if (x + *pRun >= tile_width - 1)
693  break;
694 
695  UPDATE_NEIGHBOURS(x + *pRun);
696 
697  if (!NWneW && NW == N && N == NE) {
698  int pos, run, rle;
699  int start_pos = x + *pRun;
700 
701  /* scan for a run of pix in the line above */
702  uint32_t pix = above_row[start_pos + 1];
703  for (pos = start_pos + 2; pos < tile_width; pos++)
704  if (!(above_row[pos] == pix))
705  break;
706  run = pos - start_pos - 1;
707  idx = av_ceil_log2(run);
708  if (ff_els_decode_bit(&dc->els_ctx, &dc->prev_row_rung[idx]))
709  *pRun += run;
710  else {
711  int flag;
712  /* run-length is coded as plain binary number of idx - 1 bits */
713  for (pos = idx - 1, rle = 0, flag = 0; pos >= 0; pos--) {
714  if ((1 << pos) + rle < run &&
716  flag ? &dc->runlen_one
717  : &dc->runlen_zeroes[pos])) {
718  flag = 1;
719  rle |= 1 << pos;
720  }
721  }
722  *pRun += rle;
723  break; // return immediately
724  }
725  if (x + *pRun >= tile_width - 1)
726  break;
727 
728  LOAD_NEIGHBOURS(x + *pRun);
729  WWneW = 0;
730  NWneW = 0;
731  }
732 
733  idx = WWneW << 7 |
734  NWneW << 6 |
735  (N != NE) << 5 |
736  (NW != N) << 4 |
737  (NWW != NW) << 3 |
738  (NNE != NE) << 2 |
739  (NN != N) << 1 |
740  (NNW != NW);
741  WWneW = ff_els_decode_bit(&dc->els_ctx, &dc->W_ctx_rung[idx]);
742  } while (!WWneW);
743 
744  dc->next_run_pos = x + *pRun;
745  return got_pixel;
746 }
747 
749  uint32_t *pPix, uint32_t pix)
750 {
751  if (ff_els_decode_bit(&dc->els_ctx, rung)) {
752  *pPix = pix;
753  return 1;
754  }
755  dc->stack[dc->stack_pos++ & EPIC_PIX_STACK_MAX] = pix;
756  return 0;
757 }
758 
759 static int epic_predict_from_NW_NE(ePICContext *dc, int x, int y, int run,
760  int tile_width, const uint32_t *curr_row,
761  const uint32_t *above_row, uint32_t *pPix)
762 {
763  int pos;
764 
765  /* try to reuse the NW pixel first */
766  if (x && y) {
767  uint32_t NW = above_row[x - 1];
768  if (NW != curr_row[x - 1] && NW != above_row[x] && !is_pixel_on_stack(dc, NW)) {
769  if (epic_predict_pixel2(dc, &dc->nw_pred_rung[NW & 0xFF], pPix, NW))
770  return 1;
771  }
772  }
773 
774  /* try to reuse the NE[x + run, y] pixel */
775  pos = x + run - 1;
776  if (pos < tile_width - 1 && y) {
777  uint32_t NE = above_row[pos + 1];
778  if (NE != above_row[pos] && !is_pixel_on_stack(dc, NE)) {
779  if (epic_predict_pixel2(dc, &dc->ne_pred_rung[NE & 0xFF], pPix, NE))
780  return 1;
781  }
782  }
783 
784  return 0;
785 }
786 
787 static int epic_decode_from_cache(ePICContext *dc, uint32_t W, uint32_t *pPix)
788 {
789  ePICPixListElem *list, *prev = NULL;
790  ePICPixHashElem *hash_elem = epic_hash_find(&dc->hash, W);
791 
792  if (!hash_elem || !hash_elem->list)
793  return 0;
794 
795  list = hash_elem->list;
796  while (list) {
797  if (!is_pixel_on_stack(dc, list->pixel)) {
798  if (ff_els_decode_bit(&dc->els_ctx, &list->rung)) {
799  *pPix = list->pixel;
800  if (list != hash_elem->list) {
801  prev->next = list->next;
802  list->next = hash_elem->list;
803  hash_elem->list = list;
804  }
805  return 1;
806  }
807  dc->stack[dc->stack_pos++ & EPIC_PIX_STACK_MAX] = list->pixel;
808  }
809  prev = list;
810  list = list->next;
811  }
812 
813  return 0;
814 }
815 
816 static int epic_decode_tile(ePICContext *dc, uint8_t *out, int tile_height,
817  int tile_width, int stride)
818 {
819  int x, y;
820  uint32_t pix;
821  uint32_t *curr_row = NULL, *above_row = NULL, *above2_row;
822 
823  for (y = 0; y < tile_height; y++, out += stride) {
824  above2_row = above_row;
825  above_row = curr_row;
826  curr_row = (uint32_t *) out;
827 
828  for (x = 0, dc->next_run_pos = 0; x < tile_width;) {
829  if (dc->els_ctx.err)
830  return AVERROR_INVALIDDATA; // bail out in the case of ELS overflow
831 
832  pix = curr_row[x - 1]; // get W pixel
833 
834  if (y >= 1 && x >= 2 &&
835  pix != curr_row[x - 2] && pix != above_row[x - 1] &&
836  pix != above_row[x - 2] && pix != above_row[x] &&
837  !epic_cache_entries_for_pixel(&dc->hash, pix)) {
838  curr_row[x] = epic_decode_pixel_pred(dc, x, y, curr_row, above_row);
839  x++;
840  } else {
841  int got_pixel, run;
842  dc->stack_pos = 0; // empty stack
843 
844  if (y < 2 || x < 2 || x == tile_width - 1) {
845  run = 1;
846  got_pixel = epic_handle_edges(dc, x, y, curr_row, above_row, &pix);
847  } else {
848  got_pixel = epic_decode_run_length(dc, x, y, tile_width,
849  curr_row, above_row,
850  above2_row, &pix, &run);
851  if (got_pixel < 0)
852  return got_pixel;
853  }
854 
855  if (!got_pixel && !epic_predict_from_NW_NE(dc, x, y, run,
856  tile_width, curr_row,
857  above_row, &pix)) {
858  uint32_t ref_pix = curr_row[x - 1];
859  if (!x || !epic_decode_from_cache(dc, ref_pix, &pix)) {
860  pix = epic_decode_pixel_pred(dc, x, y, curr_row, above_row);
861  if (is_pixel_on_stack(dc, pix))
862  return AVERROR_INVALIDDATA;
863 
864  if (x) {
865  int ret = epic_add_pixel_to_cache(&dc->hash,
866  ref_pix,
867  pix);
868  if (ret)
869  return ret;
870  }
871  }
872  }
873  for (; run > 0; x++, run--)
874  curr_row[x] = pix;
875  }
876  }
877  }
878 
879  return 0;
880 }
881 
882 static int epic_jb_decode_tile(G2MContext *c, int tile_x, int tile_y,
883  const uint8_t *src, size_t src_size,
884  AVCodecContext *avctx)
885 {
886  uint8_t prefix, mask = 0x80;
887  int extrabytes, tile_width, tile_height, awidth, aheight;
888  size_t els_dsize;
889  uint8_t *dst;
890 
891  if (!src_size)
892  return 0;
893 
894  /* get data size of the ELS partition as unsigned variable-length integer */
895  prefix = *src++;
896  src_size--;
897  for (extrabytes = 0; (prefix & mask) && (extrabytes < 7); extrabytes++)
898  mask >>= 1;
899  if (extrabytes > 3 || src_size < extrabytes) {
900  av_log(avctx, AV_LOG_ERROR, "ePIC: invalid data size VLI\n");
901  return AVERROR_INVALIDDATA;
902  }
903 
904  els_dsize = prefix & ((0x80 >> extrabytes) - 1); // mask out the length prefix
905  while (extrabytes-- > 0) {
906  els_dsize = (els_dsize << 8) | *src++;
907  src_size--;
908  }
909 
910  if (src_size < els_dsize) {
911  av_log(avctx, AV_LOG_ERROR, "ePIC: data too short, needed %"SIZE_SPECIFIER", got %"SIZE_SPECIFIER"\n",
912  els_dsize, src_size);
913  return AVERROR_INVALIDDATA;
914  }
915 
916  tile_width = FFMIN(c->width - tile_x * c->tile_width, c->tile_width);
917  tile_height = FFMIN(c->height - tile_y * c->tile_height, c->tile_height);
918  awidth = FFALIGN(tile_width, 16);
919  aheight = FFALIGN(tile_height, 16);
920 
921  if (tile_width > (1 << FF_ARRAY_ELEMS(c->ec.prev_row_rung))) {
922  avpriv_request_sample(avctx, "large tile width");
923  return AVERROR_INVALIDDATA;
924  }
925 
926  if (els_dsize) {
927  int ret, i, j, k;
928  uint8_t tr_r, tr_g, tr_b, *buf;
929  uint32_t *in;
930  /* ELS decoder initializations */
931  memset(&c->ec, 0, sizeof(c->ec));
932  ff_els_decoder_init(&c->ec.els_ctx, src, els_dsize);
933  epic_hash_init(&c->ec.hash);
934 
935  /* decode transparent pixel value */
939  if (c->ec.els_ctx.err != 0) {
940  av_log(avctx, AV_LOG_ERROR,
941  "ePIC: couldn't decode transparency pixel!\n");
943  return AVERROR_INVALIDDATA;
944  }
945 
946  ret = epic_decode_tile(&c->ec, c->epic_buf, tile_height, tile_width,
947  c->epic_buf_stride);
948 
951 
952  if (ret) {
953  av_log(avctx, AV_LOG_ERROR,
954  "ePIC: tile decoding failed, frame=%d, tile_x=%d, tile_y=%d\n",
955  avctx->frame_number, tile_x, tile_y);
956  return AVERROR_INVALIDDATA;
957  }
958 
959  buf = c->epic_buf;
960  dst = c->framebuf + tile_x * c->tile_width * 3 +
961  tile_y * c->tile_height * c->framebuf_stride;
962 
963  for (j = 0; j < tile_height; j++) {
964  uint8_t *out = dst;
965  in = (uint32_t *) buf;
966  for (i = 0; i < tile_width; i++) {
967  out[0] = (in[i] >> R_shift) & 0xFF;
968  out[1] = (in[i] >> G_shift) & 0xFF;
969  out[2] = (in[i] >> B_shift) & 0xFF;
970  out += 3;
971  }
972  buf += c->epic_buf_stride;
973  dst += c->framebuf_stride;
974  }
975 
976  if (src_size > els_dsize) {
977  uint8_t *jpg;
978  uint32_t tr;
979  int bstride = FFALIGN(tile_width, 16) >> 3;
980  int nblocks = 0;
981  int estride = c->epic_buf_stride >> 2;
982 
983  src += els_dsize;
984  src_size -= els_dsize;
985 
986  in = (uint32_t *) c->epic_buf;
987  tr = (tr_r << R_shift) | (tr_g << G_shift) | (tr_b << B_shift);
988 
989  memset(c->kempf_flags, 0,
990  (aheight >> 3) * bstride * sizeof(*c->kempf_flags));
991  for (j = 0; j < tile_height; j += 8) {
992  for (i = 0; i < tile_width; i += 8) {
993  c->kempf_flags[(i >> 3) + (j >> 3) * bstride] = 0;
994  for (k = 0; k < 8 * 8; k++) {
995  if (in[i + (k & 7) + (k >> 3) * estride] == tr) {
996  c->kempf_flags[(i >> 3) + (j >> 3) * bstride] = 1;
997  nblocks++;
998  break;
999  }
1000  }
1001  }
1002  in += 8 * estride;
1003  }
1004 
1005  memset(c->jpeg_tile, 0, c->tile_stride * aheight);
1006  jpg_decode_data(&c->jc, awidth, aheight, src, src_size,
1007  c->jpeg_tile, c->tile_stride,
1008  c->kempf_flags, bstride, nblocks, c->swapuv);
1009 
1010  in = (uint32_t *) c->epic_buf;
1011  dst = c->framebuf + tile_x * c->tile_width * 3 +
1012  tile_y * c->tile_height * c->framebuf_stride;
1013  jpg = c->jpeg_tile;
1014  for (j = 0; j < tile_height; j++) {
1015  for (i = 0; i < tile_width; i++)
1016  if (in[i] == tr)
1017  memcpy(dst + i * 3, jpg + i * 3, 3);
1018  in += c->epic_buf_stride >> 2;
1019  dst += c->framebuf_stride;
1020  jpg += c->tile_stride;
1021  }
1022  }
1023  } else {
1024  dst = c->framebuf + tile_x * c->tile_width * 3 +
1025  tile_y * c->tile_height * c->framebuf_stride;
1026  return jpg_decode_data(&c->jc, tile_width, tile_height, src, src_size,
1027  dst, c->framebuf_stride, NULL, 0, 0, c->swapuv);
1028  }
1029 
1030  return 0;
1031 }
1032 
1033 static int kempf_restore_buf(const uint8_t *src, int len,
1034  uint8_t *dst, int stride,
1035  const uint8_t *jpeg_tile, int tile_stride,
1036  int width, int height,
1037  const uint8_t *pal, int npal, int tidx)
1038 {
1039  GetBitContext gb;
1040  int i, j, nb, col;
1041  int ret;
1042  int align_width = FFALIGN(width, 16);
1043 
1044  if ((ret = init_get_bits8(&gb, src, len)) < 0)
1045  return ret;
1046 
1047  if (npal <= 2) nb = 1;
1048  else if (npal <= 4) nb = 2;
1049  else if (npal <= 16) nb = 4;
1050  else nb = 8;
1051 
1052  for (j = 0; j < height; j++, dst += stride, jpeg_tile += tile_stride) {
1053  if (get_bits(&gb, 8))
1054  continue;
1055  for (i = 0; i < width; i++) {
1056  col = get_bits(&gb, nb);
1057  if (col != tidx)
1058  memcpy(dst + i * 3, pal + col * 3, 3);
1059  else
1060  memcpy(dst + i * 3, jpeg_tile + i * 3, 3);
1061  }
1062  skip_bits_long(&gb, nb * (align_width - width));
1063  }
1064 
1065  return 0;
1066 }
1067 
1068 static int kempf_decode_tile(G2MContext *c, int tile_x, int tile_y,
1069  const uint8_t *src, int src_size)
1070 {
1071  int width, height;
1072  int hdr, zsize, npal, tidx = -1, ret;
1073  int i, j;
1074  const uint8_t *src_end = src + src_size;
1075  uint8_t pal[768], transp[3];
1076  uLongf dlen = (c->tile_width + 1) * c->tile_height;
1077  int sub_type;
1078  int nblocks, cblocks, bstride;
1079  int bits, bitbuf, coded;
1080  uint8_t *dst = c->framebuf + tile_x * c->tile_width * 3 +
1081  tile_y * c->tile_height * c->framebuf_stride;
1082 
1083  if (src_size < 2)
1084  return AVERROR_INVALIDDATA;
1085 
1086  width = FFMIN(c->width - tile_x * c->tile_width, c->tile_width);
1087  height = FFMIN(c->height - tile_y * c->tile_height, c->tile_height);
1088 
1089  hdr = *src++;
1090  sub_type = hdr >> 5;
1091  if (sub_type == 0) {
1092  int j;
1093  memcpy(transp, src, 3);
1094  src += 3;
1095  for (j = 0; j < height; j++, dst += c->framebuf_stride)
1096  for (i = 0; i < width; i++)
1097  memcpy(dst + i * 3, transp, 3);
1098  return 0;
1099  } else if (sub_type == 1) {
1100  return jpg_decode_data(&c->jc, width, height, src, src_end - src,
1101  dst, c->framebuf_stride, NULL, 0, 0, 0);
1102  }
1103 
1104  if (sub_type != 2) {
1105  memcpy(transp, src, 3);
1106  src += 3;
1107  }
1108  npal = *src++ + 1;
1109  if (src_end - src < npal * 3)
1110  return AVERROR_INVALIDDATA;
1111  memcpy(pal, src, npal * 3);
1112  src += npal * 3;
1113  if (sub_type != 2) {
1114  for (i = 0; i < npal; i++) {
1115  if (!memcmp(pal + i * 3, transp, 3)) {
1116  tidx = i;
1117  break;
1118  }
1119  }
1120  }
1121 
1122  if (src_end - src < 2)
1123  return 0;
1124  zsize = (src[0] << 8) | src[1];
1125  src += 2;
1126 
1127  if (src_end - src < zsize + (sub_type != 2))
1128  return AVERROR_INVALIDDATA;
1129 
1130  ret = uncompress(c->kempf_buf, &dlen, src, zsize);
1131  if (ret)
1132  return AVERROR_INVALIDDATA;
1133  src += zsize;
1134 
1135  if (sub_type == 2) {
1136  kempf_restore_buf(c->kempf_buf, dlen, dst, c->framebuf_stride,
1137  NULL, 0, width, height, pal, npal, tidx);
1138  return 0;
1139  }
1140 
1141  nblocks = *src++ + 1;
1142  cblocks = 0;
1143  bstride = FFALIGN(width, 16) >> 3;
1144  // blocks are coded LSB and we need normal bitreader for JPEG data
1145  bits = 0;
1146  for (i = 0; i < (FFALIGN(height, 16) >> 4); i++) {
1147  for (j = 0; j < (FFALIGN(width, 16) >> 4); j++) {
1148  if (!bits) {
1149  if (src >= src_end)
1150  return AVERROR_INVALIDDATA;
1151  bitbuf = *src++;
1152  bits = 8;
1153  }
1154  coded = bitbuf & 1;
1155  bits--;
1156  bitbuf >>= 1;
1157  cblocks += coded;
1158  if (cblocks > nblocks)
1159  return AVERROR_INVALIDDATA;
1160  c->kempf_flags[j * 2 + i * 2 * bstride] =
1161  c->kempf_flags[j * 2 + 1 + i * 2 * bstride] =
1162  c->kempf_flags[j * 2 + (i * 2 + 1) * bstride] =
1163  c->kempf_flags[j * 2 + 1 + (i * 2 + 1) * bstride] = coded;
1164  }
1165  }
1166 
1167  memset(c->jpeg_tile, 0, c->tile_stride * height);
1168  jpg_decode_data(&c->jc, width, height, src, src_end - src,
1169  c->jpeg_tile, c->tile_stride,
1170  c->kempf_flags, bstride, nblocks * 4, 0);
1171 
1172  kempf_restore_buf(c->kempf_buf, dlen, dst, c->framebuf_stride,
1173  c->jpeg_tile, c->tile_stride,
1174  width, height, pal, npal, tidx);
1175 
1176  return 0;
1177 }
1178 
1180 {
1181  int aligned_height;
1182 
1183  if (!c->framebuf || c->old_width < c->width || c->old_height < c->height) {
1184  c->framebuf_stride = FFALIGN(c->width + 15, 16) * 3;
1185  aligned_height = c->height + 15;
1186  av_free(c->framebuf);
1187  c->framebuf = av_mallocz_array(c->framebuf_stride, aligned_height);
1188  if (!c->framebuf)
1189  return AVERROR(ENOMEM);
1190  }
1191  if (!c->synth_tile || !c->jpeg_tile ||
1192  (c->compression == 2 && !c->epic_buf_base) ||
1193  c->old_tile_w < c->tile_width ||
1194  c->old_tile_h < c->tile_height) {
1195  c->tile_stride = FFALIGN(c->tile_width, 16) * 3;
1196  c->epic_buf_stride = FFALIGN(c->tile_width * 4, 16);
1197  aligned_height = FFALIGN(c->tile_height, 16);
1198  av_freep(&c->synth_tile);
1199  av_freep(&c->jpeg_tile);
1200  av_freep(&c->kempf_buf);
1201  av_freep(&c->kempf_flags);
1202  av_freep(&c->epic_buf_base);
1203  c->epic_buf = NULL;
1204  c->synth_tile = av_mallocz(c->tile_stride * aligned_height);
1205  c->jpeg_tile = av_mallocz(c->tile_stride * aligned_height);
1206  c->kempf_buf = av_mallocz((c->tile_width + 1) * aligned_height +
1208  c->kempf_flags = av_mallocz(c->tile_width * aligned_height);
1209  if (!c->synth_tile || !c->jpeg_tile ||
1210  !c->kempf_buf || !c->kempf_flags)
1211  return AVERROR(ENOMEM);
1212  if (c->compression == 2) {
1213  c->epic_buf_base = av_mallocz(c->epic_buf_stride * aligned_height + 4);
1214  if (!c->epic_buf_base)
1215  return AVERROR(ENOMEM);
1216  c->epic_buf = c->epic_buf_base + 4;
1217  }
1218  }
1219 
1220  return 0;
1221 }
1222 
1224  GetByteContext *gb)
1225 {
1226  int i, j, k;
1227  uint8_t *dst;
1228  uint32_t bits;
1229  uint32_t cur_size, cursor_w, cursor_h, cursor_stride;
1230  uint32_t cursor_hot_x, cursor_hot_y;
1231  int cursor_fmt, err;
1232 
1233  cur_size = bytestream2_get_be32(gb);
1234  cursor_w = bytestream2_get_byte(gb);
1235  cursor_h = bytestream2_get_byte(gb);
1236  cursor_hot_x = bytestream2_get_byte(gb);
1237  cursor_hot_y = bytestream2_get_byte(gb);
1238  cursor_fmt = bytestream2_get_byte(gb);
1239 
1240  cursor_stride = FFALIGN(cursor_w, cursor_fmt==1 ? 32 : 1) * 4;
1241 
1242  if (cursor_w < 1 || cursor_w > 256 ||
1243  cursor_h < 1 || cursor_h > 256) {
1244  av_log(avctx, AV_LOG_ERROR, "Invalid cursor dimensions %"PRIu32"x%"PRIu32"\n",
1245  cursor_w, cursor_h);
1246  return AVERROR_INVALIDDATA;
1247  }
1248  if (cursor_hot_x > cursor_w || cursor_hot_y > cursor_h) {
1249  av_log(avctx, AV_LOG_WARNING, "Invalid hotspot position %"PRIu32",%"PRIu32"\n",
1250  cursor_hot_x, cursor_hot_y);
1251  cursor_hot_x = FFMIN(cursor_hot_x, cursor_w - 1);
1252  cursor_hot_y = FFMIN(cursor_hot_y, cursor_h - 1);
1253  }
1254  if (cur_size - 9 > bytestream2_get_bytes_left(gb) ||
1255  c->cursor_w * c->cursor_h / 4 > cur_size) {
1256  av_log(avctx, AV_LOG_ERROR, "Invalid cursor data size %"PRIu32"/%u\n",
1257  cur_size, bytestream2_get_bytes_left(gb));
1258  return AVERROR_INVALIDDATA;
1259  }
1260  if (cursor_fmt != 1 && cursor_fmt != 32) {
1261  avpriv_report_missing_feature(avctx, "Cursor format %d",
1262  cursor_fmt);
1263  return AVERROR_PATCHWELCOME;
1264  }
1265 
1266  if ((err = av_reallocp(&c->cursor, cursor_stride * cursor_h)) < 0) {
1267  av_log(avctx, AV_LOG_ERROR, "Cannot allocate cursor buffer\n");
1268  return err;
1269  }
1270 
1271  c->cursor_w = cursor_w;
1272  c->cursor_h = cursor_h;
1273  c->cursor_hot_x = cursor_hot_x;
1274  c->cursor_hot_y = cursor_hot_y;
1275  c->cursor_fmt = cursor_fmt;
1276  c->cursor_stride = cursor_stride;
1277 
1278  dst = c->cursor;
1279  switch (c->cursor_fmt) {
1280  case 1: // old monochrome
1281  for (j = 0; j < c->cursor_h; j++) {
1282  for (i = 0; i < c->cursor_w; i += 32) {
1283  bits = bytestream2_get_be32(gb);
1284  for (k = 0; k < 32; k++) {
1285  dst[0] = !!(bits & 0x80000000);
1286  dst += 4;
1287  bits <<= 1;
1288  }
1289  }
1290  }
1291 
1292  dst = c->cursor;
1293  for (j = 0; j < c->cursor_h; j++) {
1294  for (i = 0; i < c->cursor_w; i += 32) {
1295  bits = bytestream2_get_be32(gb);
1296  for (k = 0; k < 32; k++) {
1297  int mask_bit = !!(bits & 0x80000000);
1298  switch (dst[0] * 2 + mask_bit) {
1299  case 0:
1300  dst[0] = 0xFF;
1301  dst[1] = 0x00;
1302  dst[2] = 0x00;
1303  dst[3] = 0x00;
1304  break;
1305  case 1:
1306  dst[0] = 0xFF;
1307  dst[1] = 0xFF;
1308  dst[2] = 0xFF;
1309  dst[3] = 0xFF;
1310  break;
1311  default:
1312  dst[0] = 0x00;
1313  dst[1] = 0x00;
1314  dst[2] = 0x00;
1315  dst[3] = 0x00;
1316  }
1317  dst += 4;
1318  bits <<= 1;
1319  }
1320  }
1321  }
1322  break;
1323  case 32: // full colour
1324  /* skip monochrome version of the cursor and decode RGBA instead */
1325  bytestream2_skip(gb, c->cursor_h * (FFALIGN(c->cursor_w, 32) >> 3));
1326  for (j = 0; j < c->cursor_h; j++) {
1327  for (i = 0; i < c->cursor_w; i++) {
1328  int val = bytestream2_get_be32(gb);
1329  *dst++ = val >> 0;
1330  *dst++ = val >> 8;
1331  *dst++ = val >> 16;
1332  *dst++ = val >> 24;
1333  }
1334  }
1335  break;
1336  default:
1337  return AVERROR_PATCHWELCOME;
1338  }
1339  return 0;
1340 }
1341 
1342 #define APPLY_ALPHA(src, new, alpha) \
1343  src = (src * (256 - alpha) + new * alpha) >> 8
1344 
1345 static void g2m_paint_cursor(G2MContext *c, uint8_t *dst, int stride)
1346 {
1347  int i, j;
1348  int x, y, w, h;
1349  const uint8_t *cursor;
1350 
1351  if (!c->cursor)
1352  return;
1353 
1354  x = c->cursor_x - c->cursor_hot_x;
1355  y = c->cursor_y - c->cursor_hot_y;
1356 
1357  cursor = c->cursor;
1358  w = c->cursor_w;
1359  h = c->cursor_h;
1360 
1361  if (x + w > c->width)
1362  w = c->width - x;
1363  if (y + h > c->height)
1364  h = c->height - y;
1365  if (x < 0) {
1366  w += x;
1367  cursor += -x * 4;
1368  } else {
1369  dst += x * 3;
1370  }
1371 
1372  if (y < 0)
1373  h += y;
1374  if (w < 0 || h < 0)
1375  return;
1376  if (y < 0) {
1377  cursor += -y * c->cursor_stride;
1378  } else {
1379  dst += y * stride;
1380  }
1381 
1382  for (j = 0; j < h; j++) {
1383  for (i = 0; i < w; i++) {
1384  uint8_t alpha = cursor[i * 4];
1385  APPLY_ALPHA(dst[i * 3 + 0], cursor[i * 4 + 1], alpha);
1386  APPLY_ALPHA(dst[i * 3 + 1], cursor[i * 4 + 2], alpha);
1387  APPLY_ALPHA(dst[i * 3 + 2], cursor[i * 4 + 3], alpha);
1388  }
1389  dst += stride;
1390  cursor += c->cursor_stride;
1391  }
1392 }
1393 
1394 static int g2m_decode_frame(AVCodecContext *avctx, void *data,
1395  int *got_picture_ptr, AVPacket *avpkt)
1396 {
1397  const uint8_t *buf = avpkt->data;
1398  int buf_size = avpkt->size;
1399  G2MContext *c = avctx->priv_data;
1400  AVFrame *pic = data;
1401  GetByteContext bc, tbc;
1402  int magic;
1403  int got_header = 0;
1404  uint32_t chunk_size, r_mask, g_mask, b_mask;
1405  int chunk_type, chunk_start;
1406  int i;
1407  int ret;
1408 
1409  if (buf_size < 12) {
1410  av_log(avctx, AV_LOG_ERROR,
1411  "Frame should have at least 12 bytes, got %d instead\n",
1412  buf_size);
1413  return AVERROR_INVALIDDATA;
1414  }
1415 
1416  bytestream2_init(&bc, buf, buf_size);
1417 
1418  magic = bytestream2_get_be32(&bc);
1419  if ((magic & ~0xF) != MKBETAG('G', '2', 'M', '0') ||
1420  (magic & 0xF) < 2 || (magic & 0xF) > 5) {
1421  av_log(avctx, AV_LOG_ERROR, "Wrong magic %08X\n", magic);
1422  return AVERROR_INVALIDDATA;
1423  }
1424 
1425  c->swapuv = magic == MKBETAG('G', '2', 'M', '2');
1426 
1427  while (bytestream2_get_bytes_left(&bc) > 5) {
1428  chunk_size = bytestream2_get_le32(&bc) - 1;
1429  chunk_type = bytestream2_get_byte(&bc);
1430  chunk_start = bytestream2_tell(&bc);
1431  if (chunk_size > bytestream2_get_bytes_left(&bc)) {
1432  av_log(avctx, AV_LOG_ERROR, "Invalid chunk size %"PRIu32" type %02X\n",
1433  chunk_size, chunk_type);
1434  break;
1435  }
1436  switch (chunk_type) {
1437  case DISPLAY_INFO:
1438  got_header =
1439  c->got_header = 0;
1440  if (chunk_size < 21) {
1441  av_log(avctx, AV_LOG_ERROR, "Invalid display info size %"PRIu32"\n",
1442  chunk_size);
1443  break;
1444  }
1445  c->width = bytestream2_get_be32(&bc);
1446  c->height = bytestream2_get_be32(&bc);
1447  if (c->width < 16 || c->height < 16) {
1448  av_log(avctx, AV_LOG_ERROR,
1449  "Invalid frame dimensions %dx%d\n",
1450  c->width, c->height);
1451  ret = AVERROR_INVALIDDATA;
1452  goto header_fail;
1453  }
1454  if (c->width != avctx->width || c->height != avctx->height) {
1455  ret = ff_set_dimensions(avctx, c->width, c->height);
1456  if (ret < 0)
1457  goto header_fail;
1458  }
1459  c->compression = bytestream2_get_be32(&bc);
1460  if (c->compression != 2 && c->compression != 3) {
1461  avpriv_report_missing_feature(avctx, "Compression method %d",
1462  c->compression);
1463  ret = AVERROR_PATCHWELCOME;
1464  goto header_fail;
1465  }
1466  c->tile_width = bytestream2_get_be32(&bc);
1467  c->tile_height = bytestream2_get_be32(&bc);
1468  if (c->tile_width <= 0 || c->tile_height <= 0 ||
1469  ((c->tile_width | c->tile_height) & 0xF) ||
1470  c->tile_width * (uint64_t)c->tile_height >= INT_MAX / 4 ||
1471  av_image_check_size2(c->tile_width, c->tile_height, avctx->max_pixels, avctx->pix_fmt, 0, avctx) < 0
1472  ) {
1473  av_log(avctx, AV_LOG_ERROR,
1474  "Invalid tile dimensions %dx%d\n",
1475  c->tile_width, c->tile_height);
1476  ret = AVERROR_INVALIDDATA;
1477  goto header_fail;
1478  }
1479  c->tiles_x = (c->width + c->tile_width - 1) / c->tile_width;
1480  c->tiles_y = (c->height + c->tile_height - 1) / c->tile_height;
1481  c->bpp = bytestream2_get_byte(&bc);
1482  if (c->bpp == 32) {
1483  if (bytestream2_get_bytes_left(&bc) < 16 ||
1484  (chunk_size - 21) < 16) {
1485  av_log(avctx, AV_LOG_ERROR,
1486  "Display info: missing bitmasks!\n");
1487  ret = AVERROR_INVALIDDATA;
1488  goto header_fail;
1489  }
1490  r_mask = bytestream2_get_be32(&bc);
1491  g_mask = bytestream2_get_be32(&bc);
1492  b_mask = bytestream2_get_be32(&bc);
1493  if (r_mask != 0xFF0000 || g_mask != 0xFF00 || b_mask != 0xFF) {
1495  "Bitmasks: R=%"PRIX32", G=%"PRIX32", B=%"PRIX32,
1496  r_mask, g_mask, b_mask);
1497  ret = AVERROR_PATCHWELCOME;
1498  goto header_fail;
1499  }
1500  } else {
1501  avpriv_request_sample(avctx, "bpp=%d", c->bpp);
1502  ret = AVERROR_PATCHWELCOME;
1503  goto header_fail;
1504  }
1505  if (g2m_init_buffers(c)) {
1506  ret = AVERROR(ENOMEM);
1507  goto header_fail;
1508  }
1509  got_header = 1;
1510  break;
1511  case TILE_DATA:
1512  if (!c->tiles_x || !c->tiles_y) {
1513  av_log(avctx, AV_LOG_WARNING,
1514  "No display info - skipping tile\n");
1515  break;
1516  }
1517  if (chunk_size < 2) {
1518  av_log(avctx, AV_LOG_ERROR, "Invalid tile data size %"PRIu32"\n",
1519  chunk_size);
1520  break;
1521  }
1522  c->tile_x = bytestream2_get_byte(&bc);
1523  c->tile_y = bytestream2_get_byte(&bc);
1524  if (c->tile_x >= c->tiles_x || c->tile_y >= c->tiles_y) {
1525  av_log(avctx, AV_LOG_ERROR,
1526  "Invalid tile pos %d,%d (in %dx%d grid)\n",
1527  c->tile_x, c->tile_y, c->tiles_x, c->tiles_y);
1528  break;
1529  }
1530  ret = 0;
1531  switch (c->compression) {
1532  case COMPR_EPIC_J_B:
1533  ret = epic_jb_decode_tile(c, c->tile_x, c->tile_y,
1534  buf + bytestream2_tell(&bc),
1535  chunk_size - 2, avctx);
1536  break;
1537  case COMPR_KEMPF_J_B:
1538  ret = kempf_decode_tile(c, c->tile_x, c->tile_y,
1539  buf + bytestream2_tell(&bc),
1540  chunk_size - 2);
1541  break;
1542  }
1543  if (ret && c->framebuf)
1544  av_log(avctx, AV_LOG_ERROR, "Error decoding tile %d,%d\n",
1545  c->tile_x, c->tile_y);
1546  break;
1547  case CURSOR_POS:
1548  if (chunk_size < 5) {
1549  av_log(avctx, AV_LOG_ERROR, "Invalid cursor pos size %"PRIu32"\n",
1550  chunk_size);
1551  break;
1552  }
1553  c->cursor_x = bytestream2_get_be16(&bc);
1554  c->cursor_y = bytestream2_get_be16(&bc);
1555  break;
1556  case CURSOR_SHAPE:
1557  if (chunk_size < 8) {
1558  av_log(avctx, AV_LOG_ERROR, "Invalid cursor data size %"PRIu32"\n",
1559  chunk_size);
1560  break;
1561  }
1562  bytestream2_init(&tbc, buf + bytestream2_tell(&bc),
1563  chunk_size - 4);
1564  g2m_load_cursor(avctx, c, &tbc);
1565  break;
1566  case CHUNK_CC:
1567  case CHUNK_CD:
1568  break;
1569  default:
1570  av_log(avctx, AV_LOG_WARNING, "Skipping chunk type %02d\n",
1571  chunk_type);
1572  }
1573 
1574  /* navigate to next chunk */
1575  bytestream2_skip(&bc, chunk_start + chunk_size - bytestream2_tell(&bc));
1576  }
1577  if (got_header)
1578  c->got_header = 1;
1579 
1580  if (c->width && c->height && c->framebuf) {
1581  if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
1582  return ret;
1583 
1584  pic->key_frame = got_header;
1585  pic->pict_type = got_header ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
1586 
1587  for (i = 0; i < avctx->height; i++)
1588  memcpy(pic->data[0] + i * pic->linesize[0],
1589  c->framebuf + i * c->framebuf_stride,
1590  c->width * 3);
1591  g2m_paint_cursor(c, pic->data[0], pic->linesize[0]);
1592 
1593  *got_picture_ptr = 1;
1594  }
1595 
1596  return buf_size;
1597 
1598 header_fail:
1599  c->width =
1600  c->height = 0;
1601  c->tiles_x =
1602  c->tiles_y = 0;
1603  c->tile_width =
1604  c->tile_height = 0;
1605  return ret;
1606 }
1607 
1609 {
1610  G2MContext *const c = avctx->priv_data;
1611  int ret;
1612 
1613  if ((ret = jpg_init(avctx, &c->jc)) != 0) {
1614  av_log(avctx, AV_LOG_ERROR, "Cannot initialise VLCs\n");
1615  jpg_free_context(&c->jc);
1616  return AVERROR(ENOMEM);
1617  }
1618 
1619  avctx->pix_fmt = AV_PIX_FMT_RGB24;
1620 
1621  // store original sizes and check against those if resize happens
1622  c->orig_width = avctx->width;
1623  c->orig_height = avctx->height;
1624 
1625  return 0;
1626 }
1627 
1629 {
1630  G2MContext *const c = avctx->priv_data;
1631 
1632  jpg_free_context(&c->jc);
1633 
1634  av_freep(&c->epic_buf_base);
1635  c->epic_buf = NULL;
1636  av_freep(&c->kempf_buf);
1637  av_freep(&c->kempf_flags);
1638  av_freep(&c->synth_tile);
1639  av_freep(&c->jpeg_tile);
1640  av_freep(&c->cursor);
1641  av_freep(&c->framebuf);
1642 
1643  return 0;
1644 }
1645 
1647  .name = "g2m",
1648  .long_name = NULL_IF_CONFIG_SMALL("Go2Meeting"),
1649  .type = AVMEDIA_TYPE_VIDEO,
1650  .id = AV_CODEC_ID_G2M,
1651  .priv_data_size = sizeof(G2MContext),
1652  .init = g2m_decode_init,
1653  .close = g2m_decode_end,
1655  .capabilities = AV_CODEC_CAP_DR1,
1656  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
1657 };
#define LOAD_NEIGHBOURS(x)
Definition: g2meet.c:371
static int epic_predict_from_NW_NE(ePICContext *dc, int x, int y, int run, int tile_width, const uint32_t *curr_row, const uint32_t *above_row, uint32_t *pPix)
Definition: g2meet.c:759
int tiles_y
Definition: g2meet.c:142
int cursor_hot_y
Definition: g2meet.c:159
#define NULL
Definition: coverity.c:32
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static int epic_handle_edges(ePICContext *dc, int x, int y, const uint32_t *curr_row, const uint32_t *above_row, uint32_t *pPix)
Definition: g2meet.c:582
static int kempf_decode_tile(G2MContext *c, int tile_x, int tile_y, const uint8_t *src, int src_size)
Definition: g2meet.c:1068
static int epic_add_pixel_to_cache(ePICPixHash *hash, uint32_t key, uint32_t pix)
Definition: g2meet.c:448
This structure describes decoded (raw) audio or video data.
Definition: frame.h:314
uint8_t * kempf_flags
Definition: g2meet.c:153
void * av_realloc(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory.
Definition: mem.c:134
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
int height
Definition: g2meet.c:139
ChunkType
Definition: g2meet.c:48
static av_cold void jpg_free_context(JPGContext *ctx)
Definition: g2meet.c:212
uint8_t * epic_buf
Definition: g2meet.c:149
uint8_t * kempf_buf
Definition: g2meet.c:153
misc image utilities
static int chunk_start(AVFormatContext *s)
Definition: webm_chunk.c:169
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
int width
Definition: g2meet.c:139
uint32_t pixel
Definition: g2meet.c:86
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:106
static int epic_predict_pixel(ePICContext *dc, uint8_t *rung, uint32_t *pPix, uint32_t pix)
Definition: g2meet.c:571
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:291
int got_header
Definition: g2meet.c:144
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
int cursor_fmt
Definition: g2meet.c:157
uint8_t nw_pred_rung[256]
Definition: g2meet.c:110
Entropy Logarithmic-Scale binary arithmetic coder.
void(* clear_block)(int16_t *block)
Definition: blockdsp.h:36
#define avpriv_request_sample(...)
static void yuv2rgb(uint8_t *out, int ridx, int Y, int U, int V)
Definition: g2meet.c:281
static int epic_decode_tile(ePICContext *dc, uint8_t *out, int tile_height, int tile_width, int stride)
Definition: g2meet.c:816
uint32_t stack[EPIC_PIX_STACK_SIZE]
Definition: g2meet.c:116
int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
Definition: bitstream.c:323
Scantable.
Definition: idctdsp.h:31
int size
Definition: packet.h:364
int next_run_pos
Definition: g2meet.c:104
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
Definition: blockdsp.c:60
#define EPIC_HASH_SIZE
Definition: g2meet.c:95
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:741
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
const char * key
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
VLC dc_vlc[2]
Definition: g2meet.c:125
uint8_t permutated[64]
Definition: idctdsp.h:33
uint8_t run
Definition: svq3.c:205
#define R_shift
Definition: g2meet.c:391
static int g2m_init_buffers(G2MContext *c)
Definition: g2meet.c:1179
int swapuv
Definition: g2meet.c:151
AVCodec.
Definition: codec.h:190
int16_t block[6][64]
Definition: g2meet.c:127
MJPEG encoder and decoder.
static void jpg_unescape(const uint8_t *src, int src_size, uint8_t *dst, int *dst_size)
Definition: g2meet.c:224
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
#define B_shift
Definition: g2meet.c:393
#define N
Definition: af_mcompand.c:54
int tile_width
Definition: g2meet.c:141
uint8_t rung
Definition: g2meet.c:87
int bpp
Definition: g2meet.c:139
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
void ff_mjpeg_build_huffman_codes(uint8_t *huff_size, uint16_t *huff_code, const uint8_t *bits_table, const uint8_t *val_table)
Definition: jpegtables.c:127
The exact code depends on how similar the blocks are and how related they are to the block
uint8_t
#define av_cold
Definition: attributes.h:88
int tile_x
Definition: g2meet.c:142
static int epic_cache_entries_for_pixel(const ePICPixHash *hash, uint32_t pix)
Definition: g2meet.c:469
float delta
static ePICPixHashElem * epic_hash_find(const ePICPixHash *hash, uint32_t key)
Definition: g2meet.c:413
int cursor_h
Definition: g2meet.c:158
static int epic_decode_component_pred(ePICContext *dc, int N, int W, int NW)
Definition: g2meet.c:512
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
static int is_pixel_on_stack(const ePICContext *dc, uint32_t pix)
Definition: g2meet.c:499
ePICPixHash hash
Definition: g2meet.c:117
const uint8_t avpriv_mjpeg_bits_dc_luminance[17]
Definition: jpegtables.c:65
int framebuf_stride
Definition: g2meet.c:147
struct ePICPixListElem * list
Definition: g2meet.c:92
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
Definition: mem.h:117
#define height
uint8_t N_ctx_rung[512]
Definition: g2meet.c:109
uint8_t * data
Definition: packet.h:363
bitstream reader API header.
uint8_t * framebuf
Definition: g2meet.c:146
static void epic_free_pixel_cache(ePICPixHash *hash)
Definition: g2meet.c:480
int version
Definition: g2meet.c:136
ScanTable scantable
Definition: g2meet.c:123
#define G_shift
Definition: g2meet.c:392
static av_cold int g2m_decode_init(AVCodecContext *avctx)
Definition: g2meet.c:1608
uint8_t W_ctx_rung[256]
Definition: g2meet.c:108
#define FFALIGN(x, a)
Definition: macros.h:48
VLC ac_vlc[2]
Definition: g2meet.c:125
#define av_log(a,...)
static av_cold int jpg_init(AVCodecContext *avctx, JPGContext *c)
Definition: g2meet.c:183
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
uint8_t hash[HASH_SIZE]
Definition: movenc.c:57
#define U(x)
Definition: vp56_arith.h:37
#define src
Definition: vp8dsp.c:255
int err
Definition: elsdec.h:40
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
uint8_t * buf
Definition: g2meet.c:129
#define NN(type, name)
Definition: vf_shear.c:123
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
#define R
Definition: huffyuvdsp.h:34
ElsUnsignedRung unsigned_rung
Definition: g2meet.c:105
static const uint16_t mask[17]
Definition: lzw.c:38
static int g2m_load_cursor(AVCodecContext *avctx, G2MContext *c, GetByteContext *gb)
Definition: g2meet.c:1223
static av_cold int g2m_decode_end(AVCodecContext *avctx)
Definition: g2meet.c:1628
ePICContext ec
Definition: g2meet.c:133
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
const uint8_t avpriv_mjpeg_bits_dc_chrominance[17]
Definition: jpegtables.c:70
void(* clear_blocks)(int16_t *blocks)
Definition: blockdsp.h:37
uint8_t runlen_zeroes[14]
Definition: g2meet.c:113
#define B
Definition: huffyuvdsp.h:32
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:115
int old_tile_h
Definition: g2meet.c:150
int cursor_stride
Definition: g2meet.c:156
int orig_height
Definition: g2meet.c:140
unsigned int pos
Definition: spdifenc.c:410
static int epic_jb_decode_tile(G2MContext *c, int tile_x, int tile_y, const uint8_t *src, size_t src_size, AVCodecContext *avctx)
Definition: g2meet.c:882
const char * name
Name of the codec implementation.
Definition: codec.h:197
void ff_els_decoder_init(ElsDecCtx *ctx, const uint8_t *in, size_t data_size)
Definition: elsdec.c:247
static ePICPixHashElem * epic_hash_add(ePICPixHash *hash, uint32_t key)
Definition: g2meet.c:425
uint8_t bits
Definition: vp3data.h:141
int64_t max_pixels
The number of pixels per image to maximally accept.
Definition: avcodec.h:2250
int orig_width
Definition: g2meet.c:140
Definition: vlc.h:26
static int jpg_decode_block(JPGContext *c, GetBitContext *gb, int plane, int16_t *block)
Definition: g2meet.c:241
uint8_t prev_row_rung[14]
Definition: g2meet.c:112
uint8_t * cursor
Definition: g2meet.c:155
int prev_dc[3]
Definition: g2meet.c:126
static FFFrameBucket * bucket(FFFrameQueue *fq, size_t idx)
Definition: framequeue.c:25
static VLC dc_vlc
Definition: clearvideo.c:84
Compression
Definition: g2meet.c:57
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:397
#define Y
Definition: boxblur.h:38
static int jpg_decode_data(JPGContext *c, int width, int height, const uint8_t *src, int src_size, uint8_t *dst, int dst_stride, const uint8_t *mask, int mask_stride, int num_mbs, int swapuv)
Definition: g2meet.c:288
#define FFMIN(a, b)
Definition: common.h:104
static int g2m_decode_frame(AVCodecContext *avctx, void *data, int *got_picture_ptr, AVPacket *avpkt)
Definition: g2meet.c:1394
uint8_t * synth_tile
Definition: g2meet.c:149
#define width
int width
picture width / height.
Definition: avcodec.h:704
uint8_t w
Definition: llviddspenc.c:39
AVFormatContext * ctx
Definition: movenc.c:48
int cursor_y
Definition: g2meet.c:158
static int epic_predict_pixel2(ePICContext *dc, uint8_t *rung, uint32_t *pPix, uint32_t pix)
Definition: g2meet.c:748
static void g2m_paint_cursor(G2MContext *c, uint8_t *dst, int stride)
Definition: g2meet.c:1345
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:797
int tile_y
Definition: g2meet.c:142
uint8_t idct_permutation[64]
IDCT input permutation.
Definition: idctdsp.h:96
int old_tile_w
Definition: g2meet.c:150
uint8_t ne_pred_rung[256]
Definition: g2meet.c:111
#define FF_ARRAY_ELEMS(a)
if(ret)
static const float pred[4]
Definition: siprdata.h:259
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
ElsDecCtx els_ctx
Definition: g2meet.c:103
static uint32_t epic_decode_pixel_pred(ePICContext *dc, int x, int y, const uint32_t *curr_row, const uint32_t *above_row)
Definition: g2meet.c:519
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
#define TOSIGNED(val)
Definition: g2meet.c:510
#define EPIC_PIX_STACK_MAX
Definition: g2meet.c:46
AVCodec ff_g2m_decoder
Definition: g2meet.c:1646
int av_reallocp(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory through a pointer to a pointer.
Definition: mem.c:161
Libavcodec external API header.
static int kempf_restore_buf(const uint8_t *src, int len, uint8_t *dst, int stride, const uint8_t *jpeg_tile, int tile_stride, int width, int height, const uint8_t *pal, int npal, int tidx)
Definition: g2meet.c:1033
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:345
#define APPLY_ALPHA(src, new, alpha)
Definition: g2meet.c:1342
int tile_stride
Definition: g2meet.c:150
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
int bucket_size[EPIC_HASH_SIZE]
Definition: g2meet.c:98
static const int16_t alpha[]
Definition: ilbcdata.h:55
main external API structure.
Definition: avcodec.h:531
static av_cold int build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int nb_codes, int is_ac)
Definition: g2meet.c:162
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff)*mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
Definition: get_bits.h:321
JPGContext jc
Definition: g2meet.c:134
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1884
const uint8_t avpriv_mjpeg_val_dc[12]
Definition: jpegtables.c:67
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
int compression
Definition: g2meet.c:138
static int djb2_hash(uint32_t key)
Definition: g2meet.c:396
int epic_buf_stride
Definition: g2meet.c:150
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
#define mid_pred
Definition: mathops.h:97
int cursor_w
Definition: g2meet.c:158
ePICPixHashElem * bucket[EPIC_HASH_SIZE]
Definition: g2meet.c:97
uint8_t runlen_one
Definition: g2meet.c:114
int cursor_hot_x
Definition: g2meet.c:159
static VLC ac_vlc
Definition: clearvideo.c:84
const uint8_t avpriv_mjpeg_bits_ac_chrominance[17]
Definition: jpegtables.c:99
unsigned ff_els_decode_unsigned(ElsDecCtx *ctx, ElsUnsignedRung *ur)
Definition: elsdec.c:350
static const uint8_t chroma_quant[64]
Definition: g2meet.c:73
const uint8_t avpriv_mjpeg_val_ac_chrominance[]
Definition: jpegtables.c:102
static int epic_decode_from_cache(ePICContext *dc, uint32_t W, uint32_t *pPix)
Definition: g2meet.c:787
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
#define SIZE_SPECIFIER
Definition: internal.h:191
struct ePICPixListElem * next
Definition: g2meet.c:85
int bucket_fill[EPIC_HASH_SIZE]
Definition: g2meet.c:99
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:328
int tiles_x
Definition: g2meet.c:142
uint8_t W_flag_rung
Definition: g2meet.c:106
uint8_t * epic_buf_base
Definition: g2meet.c:149
static void epic_hash_init(ePICPixHash *hash)
Definition: g2meet.c:408
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
common internal api header.
uint32_t pix_id
Definition: g2meet.c:91
uint8_t N_flag_rung
Definition: g2meet.c:107
#define G
Definition: huffyuvdsp.h:33
#define flag(name)
Definition: cbs_av1.c:553
BlockDSPContext bdsp
Definition: g2meet.c:121
int cursor_x
Definition: g2meet.c:158
int av_image_check_size2(unsigned int w, unsigned int h, int64_t max_pixels, enum AVPixelFormat pix_fmt, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of a plane of an image with...
Definition: imgutils.c:288
IDCTDSPContext idsp
Definition: g2meet.c:122
static const uint8_t luma_quant[64]
Definition: g2meet.c:62
#define MKBETAG(a, b, c, d)
Definition: common.h:414
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:215
const uint8_t avpriv_mjpeg_bits_ac_luminance[17]
Definition: jpegtables.c:73
void * priv_data
Definition: avcodec.h:558
int tile_height
Definition: g2meet.c:141
#define av_free(p)
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: idctdsp.c:29
int stack_pos
Definition: g2meet.c:115
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:238
int len
#define UPDATE_NEIGHBOURS(x)
Definition: g2meet.c:382
const uint8_t avpriv_mjpeg_val_ac_luminance[]
Definition: jpegtables.c:75
#define EPIC_PIX_STACK_SIZE
Definition: g2meet.c:45
VLC_TYPE(* table)[2]
code, bits
Definition: vlc.h:28
void ff_els_decoder_uninit(ElsUnsignedRung *rung)
Definition: elsdec.c:272
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:392
int ff_els_decode_bit(ElsDecCtx *ctx, uint8_t *rung)
Definition: elsdec.c:291
int old_width
Definition: g2meet.c:147
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:1222
FILE * out
Definition: movenc.c:54
static int epic_decode_run_length(ePICContext *dc, int x, int y, int tile_width, const uint32_t *curr_row, const uint32_t *above_row, const uint32_t *above2_row, uint32_t *pPix, int *pRun)
Definition: g2meet.c:613
#define av_freep(p)
#define stride
void(* idct)(int16_t *block)
Definition: idctdsp.h:65
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
uint8_t * jpeg_tile
Definition: g2meet.c:149
Definition: vf_addroi.c:26
int old_height
Definition: g2meet.c:147
static double val(void *priv, double ch)
Definition: aeval.c:76
This structure stores compressed data.
Definition: packet.h:340
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:431
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
for(j=16;j >0;--j)
int i
Definition: input.c:407
Predicted.
Definition: avutil.h:275
#define V
Definition: avdct.c:30
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190
static uint8_t tmp[11]
Definition: aes_ctr.c:27