FFmpeg
g2meet.c
Go to the documentation of this file.
1 /*
2  * Go2Webinar / Go2Meeting decoder
3  * Copyright (c) 2012 Konstantin Shishkov
4  * Copyright (c) 2013 Maxim Poliakovski
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * Go2Webinar / Go2Meeting decoder
26  */
27 
28 #include <inttypes.h>
29 #include <zlib.h>
30 
31 #include "libavutil/imgutils.h"
32 #include "libavutil/intreadwrite.h"
33 
34 #include "avcodec.h"
35 #include "blockdsp.h"
36 #include "bytestream.h"
37 #include "elsdec.h"
38 #include "get_bits.h"
39 #include "idctdsp.h"
40 #include "internal.h"
41 #include "jpegtables.h"
42 #include "mjpeg.h"
43 
44 #define EPIC_PIX_STACK_SIZE 1024
45 #define EPIC_PIX_STACK_MAX (EPIC_PIX_STACK_SIZE - 1)
46 
47 enum ChunkType {
48  DISPLAY_INFO = 0xC8,
54 };
55 
59 };
60 
61 static const uint8_t luma_quant[64] = {
62  8, 6, 5, 8, 12, 20, 26, 31,
63  6, 6, 7, 10, 13, 29, 30, 28,
64  7, 7, 8, 12, 20, 29, 35, 28,
65  7, 9, 11, 15, 26, 44, 40, 31,
66  9, 11, 19, 28, 34, 55, 52, 39,
67  12, 18, 28, 32, 41, 52, 57, 46,
68  25, 32, 39, 44, 52, 61, 60, 51,
69  36, 46, 48, 49, 56, 50, 52, 50
70 };
71 
72 static const uint8_t chroma_quant[64] = {
73  9, 9, 12, 24, 50, 50, 50, 50,
74  9, 11, 13, 33, 50, 50, 50, 50,
75  12, 13, 28, 50, 50, 50, 50, 50,
76  24, 33, 50, 50, 50, 50, 50, 50,
77  50, 50, 50, 50, 50, 50, 50, 50,
78  50, 50, 50, 50, 50, 50, 50, 50,
79  50, 50, 50, 50, 50, 50, 50, 50,
80  50, 50, 50, 50, 50, 50, 50, 50,
81 };
82 
83 typedef struct ePICPixListElem {
85  uint32_t pixel;
88 
89 typedef struct ePICPixHashElem {
90  uint32_t pix_id;
93 
94 #define EPIC_HASH_SIZE 256
95 typedef struct ePICPixHash {
97  int bucket_size[EPIC_HASH_SIZE];
98  int bucket_fill[EPIC_HASH_SIZE];
99 } ePICPixHash;
100 
101 typedef struct ePICContext {
107  uint8_t W_ctx_rung[256];
108  uint8_t N_ctx_rung[512];
109  uint8_t nw_pred_rung[256];
110  uint8_t ne_pred_rung[256];
111  uint8_t prev_row_rung[14];
112  uint8_t runlen_zeroes[14];
115  uint32_t stack[EPIC_PIX_STACK_SIZE];
117 } ePICContext;
118 
119 typedef struct JPGContext {
123 
124  VLC dc_vlc[2], ac_vlc[2];
125  int prev_dc[3];
126  DECLARE_ALIGNED(32, int16_t, block)[6][64];
127 
129 } JPGContext;
130 
131 typedef struct G2MContext {
134 
135  int version;
136 
138  int width, height, bpp;
139  int orig_width, orig_height;
140  int tile_width, tile_height;
141  int tiles_x, tiles_y, tile_x, tile_y;
142 
144 
146  int framebuf_stride, old_width, old_height;
147 
148  uint8_t *synth_tile, *jpeg_tile, *epic_buf, *epic_buf_base;
149  int tile_stride, epic_buf_stride, old_tile_w, old_tile_h;
150  int swapuv;
151 
152  uint8_t *kempf_buf, *kempf_flags;
153 
157  int cursor_w, cursor_h, cursor_x, cursor_y;
158  int cursor_hot_x, cursor_hot_y;
159 } G2MContext;
160 
161 static av_cold int build_vlc(VLC *vlc, const uint8_t *bits_table,
162  const uint8_t *val_table, int nb_codes,
163  int is_ac)
164 {
165  uint8_t huff_size[256] = { 0 };
166  uint16_t huff_code[256];
167  uint16_t huff_sym[256];
168  int i;
169 
170  ff_mjpeg_build_huffman_codes(huff_size, huff_code, bits_table, val_table);
171 
172  for (i = 0; i < 256; i++)
173  huff_sym[i] = i + 16 * is_ac;
174 
175  if (is_ac)
176  huff_sym[0] = 16 * 256;
177 
178  return ff_init_vlc_sparse(vlc, 9, nb_codes, huff_size, 1, 1,
179  huff_code, 2, 2, huff_sym, 2, 2, 0);
180 }
181 
183 {
184  int ret;
185 
187  avpriv_mjpeg_val_dc, 12, 0);
188  if (ret)
189  return ret;
191  avpriv_mjpeg_val_dc, 12, 0);
192  if (ret)
193  return ret;
196  if (ret)
197  return ret;
200  if (ret)
201  return ret;
202 
203  ff_blockdsp_init(&c->bdsp, avctx);
204  ff_idctdsp_init(&c->idsp, avctx);
207 
208  return 0;
209 }
210 
212 {
213  int i;
214 
215  for (i = 0; i < 2; i++) {
216  ff_free_vlc(&ctx->dc_vlc[i]);
217  ff_free_vlc(&ctx->ac_vlc[i]);
218  }
219 
220  av_freep(&ctx->buf);
221 }
222 
223 static void jpg_unescape(const uint8_t *src, int src_size,
224  uint8_t *dst, int *dst_size)
225 {
226  const uint8_t *src_end = src + src_size;
227  uint8_t *dst_start = dst;
228 
229  while (src < src_end) {
230  uint8_t x = *src++;
231 
232  *dst++ = x;
233 
234  if (x == 0xFF && !*src)
235  src++;
236  }
237  *dst_size = dst - dst_start;
238 }
239 
241  int plane, int16_t *block)
242 {
243  int dc, val, pos;
244  const int is_chroma = !!plane;
245  const uint8_t *qmat = is_chroma ? chroma_quant : luma_quant;
246 
247  if (get_bits_left(gb) < 1)
248  return AVERROR_INVALIDDATA;
249 
250  c->bdsp.clear_block(block);
251  dc = get_vlc2(gb, c->dc_vlc[is_chroma].table, 9, 3);
252  if (dc < 0)
253  return AVERROR_INVALIDDATA;
254  if (dc)
255  dc = get_xbits(gb, dc);
256  dc = dc * qmat[0] + c->prev_dc[plane];
257  block[0] = dc;
258  c->prev_dc[plane] = dc;
259 
260  pos = 0;
261  while (pos < 63) {
262  val = get_vlc2(gb, c->ac_vlc[is_chroma].table, 9, 3);
263  if (val < 0)
264  return AVERROR_INVALIDDATA;
265  pos += val >> 4;
266  val &= 0xF;
267  if (pos > 63)
268  return val ? AVERROR_INVALIDDATA : 0;
269  if (val) {
270  int nbits = val;
271 
272  val = get_xbits(gb, nbits);
273  val *= qmat[ff_zigzag_direct[pos]];
274  block[c->scantable.permutated[pos]] = val;
275  }
276  }
277  return 0;
278 }
279 
280 static inline void yuv2rgb(uint8_t *out, int ridx, int Y, int U, int V)
281 {
282  out[ridx] = av_clip_uint8(Y + (91881 * V + 32768 >> 16));
283  out[1] = av_clip_uint8(Y + (-22554 * U - 46802 * V + 32768 >> 16));
284  out[2 - ridx] = av_clip_uint8(Y + (116130 * U + 32768 >> 16));
285 }
286 
287 static int jpg_decode_data(JPGContext *c, int width, int height,
288  const uint8_t *src, int src_size,
289  uint8_t *dst, int dst_stride,
290  const uint8_t *mask, int mask_stride, int num_mbs,
291  int swapuv)
292 {
293  GetBitContext gb;
294  int mb_w, mb_h, mb_x, mb_y, i, j;
295  int bx, by;
296  int unesc_size;
297  int ret;
298  const int ridx = swapuv ? 2 : 0;
299 
300  if ((ret = av_reallocp(&c->buf,
301  src_size + AV_INPUT_BUFFER_PADDING_SIZE)) < 0)
302  return ret;
303  jpg_unescape(src, src_size, c->buf, &unesc_size);
304  memset(c->buf + unesc_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
305  if((ret = init_get_bits8(&gb, c->buf, unesc_size)) < 0)
306  return ret;
307 
308  width = FFALIGN(width, 16);
309  mb_w = width >> 4;
310  mb_h = (height + 15) >> 4;
311 
312  if (!num_mbs)
313  num_mbs = mb_w * mb_h * 4;
314 
315  for (i = 0; i < 3; i++)
316  c->prev_dc[i] = 1024;
317  bx =
318  by = 0;
319  c->bdsp.clear_blocks(c->block[0]);
320  for (mb_y = 0; mb_y < mb_h; mb_y++) {
321  for (mb_x = 0; mb_x < mb_w; mb_x++) {
322  if (mask && !mask[mb_x * 2] && !mask[mb_x * 2 + 1] &&
323  !mask[mb_x * 2 + mask_stride] &&
324  !mask[mb_x * 2 + 1 + mask_stride]) {
325  bx += 16;
326  continue;
327  }
328  for (j = 0; j < 2; j++) {
329  for (i = 0; i < 2; i++) {
330  if (mask && !mask[mb_x * 2 + i + j * mask_stride])
331  continue;
332  num_mbs--;
333  if ((ret = jpg_decode_block(c, &gb, 0,
334  c->block[i + j * 2])) != 0)
335  return ret;
336  c->idsp.idct(c->block[i + j * 2]);
337  }
338  }
339  for (i = 1; i < 3; i++) {
340  if ((ret = jpg_decode_block(c, &gb, i, c->block[i + 3])) != 0)
341  return ret;
342  c->idsp.idct(c->block[i + 3]);
343  }
344 
345  for (j = 0; j < 16; j++) {
346  uint8_t *out = dst + bx * 3 + (by + j) * dst_stride;
347  for (i = 0; i < 16; i++) {
348  int Y, U, V;
349 
350  Y = c->block[(j >> 3) * 2 + (i >> 3)][(i & 7) + (j & 7) * 8];
351  U = c->block[4][(i >> 1) + (j >> 1) * 8] - 128;
352  V = c->block[5][(i >> 1) + (j >> 1) * 8] - 128;
353  yuv2rgb(out + i * 3, ridx, Y, U, V);
354  }
355  }
356 
357  if (!num_mbs)
358  return 0;
359  bx += 16;
360  }
361  bx = 0;
362  by += 16;
363  if (mask)
364  mask += mask_stride * 2;
365  }
366 
367  return 0;
368 }
369 
370 #define LOAD_NEIGHBOURS(x) \
371  W = curr_row[(x) - 1]; \
372  N = above_row[(x)]; \
373  WW = curr_row[(x) - 2]; \
374  NW = above_row[(x) - 1]; \
375  NE = above_row[(x) + 1]; \
376  NN = above2_row[(x)]; \
377  NNW = above2_row[(x) - 1]; \
378  NWW = above_row[(x) - 2]; \
379  NNE = above2_row[(x) + 1]
380 
381 #define UPDATE_NEIGHBOURS(x) \
382  NNW = NN; \
383  NN = NNE; \
384  NWW = NW; \
385  NW = N; \
386  N = NE; \
387  NE = above_row[(x) + 1]; \
388  NNE = above2_row[(x) + 1]
389 
390 #define R_shift 16
391 #define G_shift 8
392 #define B_shift 0
393 
394 /* improved djb2 hash from http://www.cse.yorku.ca/~oz/hash.html */
395 static int djb2_hash(uint32_t key)
396 {
397  uint32_t h = 5381;
398 
399  h = (h * 33) ^ ((key >> 24) & 0xFF); // xxx: probably not needed at all
400  h = (h * 33) ^ ((key >> 16) & 0xFF);
401  h = (h * 33) ^ ((key >> 8) & 0xFF);
402  h = (h * 33) ^ (key & 0xFF);
403 
404  return h & (EPIC_HASH_SIZE - 1);
405 }
406 
408 {
409  memset(hash, 0, sizeof(*hash));
410 }
411 
413 {
414  int i, idx = djb2_hash(key);
415  ePICPixHashElem *bucket = hash->bucket[idx];
416 
417  for (i = 0; i < hash->bucket_fill[idx]; i++)
418  if (bucket[i].pix_id == key)
419  return &bucket[i];
420 
421  return NULL;
422 }
423 
425 {
427  int idx = djb2_hash(key);
428 
429  if (hash->bucket_size[idx] > INT_MAX / sizeof(**hash->bucket))
430  return NULL;
431 
432  if (!(hash->bucket_fill[idx] < hash->bucket_size[idx])) {
433  int new_size = hash->bucket_size[idx] + 16;
434  bucket = av_realloc(hash->bucket[idx], new_size * sizeof(*bucket));
435  if (!bucket)
436  return NULL;
437  hash->bucket[idx] = bucket;
438  hash->bucket_size[idx] = new_size;
439  }
440 
441  ret = &hash->bucket[idx][hash->bucket_fill[idx]++];
442  memset(ret, 0, sizeof(*ret));
443  ret->pix_id = key;
444  return ret;
445 }
446 
447 static int epic_add_pixel_to_cache(ePICPixHash *hash, uint32_t key, uint32_t pix)
448 {
449  ePICPixListElem *new_elem;
450  ePICPixHashElem *hash_elem = epic_hash_find(hash, key);
451 
452  if (!hash_elem) {
453  if (!(hash_elem = epic_hash_add(hash, key)))
454  return AVERROR(ENOMEM);
455  }
456 
457  new_elem = av_mallocz(sizeof(*new_elem));
458  if (!new_elem)
459  return AVERROR(ENOMEM);
460 
461  new_elem->pixel = pix;
462  new_elem->next = hash_elem->list;
463  hash_elem->list = new_elem;
464 
465  return 0;
466 }
467 
469  uint32_t pix)
470 {
471  ePICPixHashElem *hash_elem = epic_hash_find(hash, pix);
472 
473  if (hash_elem != NULL && hash_elem->list != NULL)
474  return 1;
475 
476  return 0;
477 }
478 
480 {
481  int i, j;
482 
483  for (i = 0; i < EPIC_HASH_SIZE; i++) {
484  for (j = 0; j < hash->bucket_fill[i]; j++) {
485  ePICPixListElem *list_elem = hash->bucket[i][j].list;
486  while (list_elem) {
487  ePICPixListElem *tmp = list_elem->next;
488  av_free(list_elem);
489  list_elem = tmp;
490  }
491  }
492  av_freep(&hash->bucket[i]);
493  hash->bucket_size[i] =
494  hash->bucket_fill[i] = 0;
495  }
496 }
497 
498 static inline int is_pixel_on_stack(const ePICContext *dc, uint32_t pix)
499 {
500  int i;
501 
502  for (i = 0; i < dc->stack_pos; i++)
503  if (dc->stack[i] == pix)
504  break;
505 
506  return i != dc->stack_pos;
507 }
508 
509 #define TOSIGNED(val) (((val) >> 1) ^ -((val) & 1))
510 
512  int N, int W, int NW)
513 {
514  unsigned delta = ff_els_decode_unsigned(&dc->els_ctx, &dc->unsigned_rung);
515  return mid_pred(N, N + W - NW, W) - TOSIGNED(delta);
516 }
517 
518 static uint32_t epic_decode_pixel_pred(ePICContext *dc, int x, int y,
519  const uint32_t *curr_row,
520  const uint32_t *above_row)
521 {
522  uint32_t N, W, NW, pred;
523  unsigned delta;
524  int GN, GW, GNW, R, G, B;
525 
526  if (x && y) {
527  W = curr_row[x - 1];
528  N = above_row[x];
529  NW = above_row[x - 1];
530 
531  GN = (N >> G_shift) & 0xFF;
532  GW = (W >> G_shift) & 0xFF;
533  GNW = (NW >> G_shift) & 0xFF;
534 
535  G = epic_decode_component_pred(dc, GN, GW, GNW);
536 
537  R = G + epic_decode_component_pred(dc,
538  ((N >> R_shift) & 0xFF) - GN,
539  ((W >> R_shift) & 0xFF) - GW,
540  ((NW >> R_shift) & 0xFF) - GNW);
541 
542  B = G + epic_decode_component_pred(dc,
543  ((N >> B_shift) & 0xFF) - GN,
544  ((W >> B_shift) & 0xFF) - GW,
545  ((NW >> B_shift) & 0xFF) - GNW);
546  } else {
547  if (x)
548  pred = curr_row[x - 1];
549  else
550  pred = above_row[x];
551 
552  delta = ff_els_decode_unsigned(&dc->els_ctx, &dc->unsigned_rung);
553  R = ((pred >> R_shift) & 0xFF) - TOSIGNED(delta);
554 
555  delta = ff_els_decode_unsigned(&dc->els_ctx, &dc->unsigned_rung);
556  G = ((pred >> G_shift) & 0xFF) - TOSIGNED(delta);
557 
558  delta = ff_els_decode_unsigned(&dc->els_ctx, &dc->unsigned_rung);
559  B = ((pred >> B_shift) & 0xFF) - TOSIGNED(delta);
560  }
561 
562  if (R<0 || G<0 || B<0 || R > 255 || G > 255 || B > 255) {
563  avpriv_request_sample(NULL, "RGB %d %d %d is out of range\n", R, G, B);
564  return 0;
565  }
566 
567  return (R << R_shift) | (G << G_shift) | (B << B_shift);
568 }
569 
571  uint32_t *pPix, uint32_t pix)
572 {
573  if (!ff_els_decode_bit(&dc->els_ctx, rung)) {
574  *pPix = pix;
575  return 1;
576  }
577  dc->stack[dc->stack_pos++ & EPIC_PIX_STACK_MAX] = pix;
578  return 0;
579 }
580 
581 static int epic_handle_edges(ePICContext *dc, int x, int y,
582  const uint32_t *curr_row,
583  const uint32_t *above_row, uint32_t *pPix)
584 {
585  uint32_t pix;
586 
587  if (!x && !y) { /* special case: top-left pixel */
588  /* the top-left pixel is coded independently with 3 unsigned numbers */
589  *pPix = (ff_els_decode_unsigned(&dc->els_ctx, &dc->unsigned_rung) << R_shift) |
592  return 1;
593  }
594 
595  if (x) { /* predict from W first */
596  pix = curr_row[x - 1];
597  if (epic_predict_pixel(dc, &dc->W_flag_rung, pPix, pix))
598  return 1;
599  }
600 
601  if (y) { /* then try to predict from N */
602  pix = above_row[x];
603  if (!dc->stack_pos || dc->stack[0] != pix) {
604  if (epic_predict_pixel(dc, &dc->N_flag_rung, pPix, pix))
605  return 1;
606  }
607  }
608 
609  return 0;
610 }
611 
612 static int epic_decode_run_length(ePICContext *dc, int x, int y, int tile_width,
613  const uint32_t *curr_row,
614  const uint32_t *above_row,
615  const uint32_t *above2_row,
616  uint32_t *pPix, int *pRun)
617 {
618  int idx, got_pixel = 0, WWneW, old_WWneW = 0;
619  uint32_t W, WW, N, NN, NW, NE, NWW, NNW, NNE;
620 
621  *pRun = 0;
622 
623  LOAD_NEIGHBOURS(x);
624 
625  if (dc->next_run_pos == x) {
626  /* can't reuse W for the new pixel in this case */
627  WWneW = 1;
628  } else {
629  idx = (WW != W) << 7 |
630  (NW != W) << 6 |
631  (N != NE) << 5 |
632  (NW != N) << 4 |
633  (NWW != NW) << 3 |
634  (NNE != NE) << 2 |
635  (NN != N) << 1 |
636  (NNW != NW);
637  WWneW = ff_els_decode_bit(&dc->els_ctx, &dc->W_ctx_rung[idx]);
638  if (WWneW < 0)
639  return WWneW;
640  }
641 
642  if (WWneW)
643  dc->stack[dc->stack_pos++ & EPIC_PIX_STACK_MAX] = W;
644  else {
645  *pPix = W;
646  got_pixel = 1;
647  }
648 
649  do {
650  int NWneW = 1;
651  if (got_pixel) // pixel value already known (derived from either W or N)
652  NWneW = *pPix != N;
653  else { // pixel value is unknown and will be decoded later
654  NWneW = *pRun ? NWneW : NW != W;
655 
656  /* TODO: RFC this mess! */
657  switch (((NW != N) << 2) | (NWneW << 1) | WWneW) {
658  case 0:
659  break; // do nothing here
660  case 3:
661  case 5:
662  case 6:
663  case 7:
664  if (!is_pixel_on_stack(dc, N)) {
665  idx = WWneW << 8 |
666  (*pRun ? old_WWneW : WW != W) << 7 |
667  NWneW << 6 |
668  (N != NE) << 5 |
669  (NW != N) << 4 |
670  (NWW != NW) << 3 |
671  (NNE != NE) << 2 |
672  (NN != N) << 1 |
673  (NNW != NW);
674  if (!ff_els_decode_bit(&dc->els_ctx, &dc->N_ctx_rung[idx])) {
675  NWneW = 0;
676  *pPix = N;
677  got_pixel = 1;
678  break;
679  }
680  }
681  /* fall through */
682  default:
683  NWneW = 1;
684  old_WWneW = WWneW;
685  if (!is_pixel_on_stack(dc, N))
686  dc->stack[dc->stack_pos++ & EPIC_PIX_STACK_MAX] = N;
687  }
688  }
689 
690  (*pRun)++;
691  if (x + *pRun >= tile_width - 1)
692  break;
693 
694  UPDATE_NEIGHBOURS(x + *pRun);
695 
696  if (!NWneW && NW == N && N == NE) {
697  int pos, run, rle;
698  int start_pos = x + *pRun;
699 
700  /* scan for a run of pix in the line above */
701  uint32_t pix = above_row[start_pos + 1];
702  for (pos = start_pos + 2; pos < tile_width; pos++)
703  if (!(above_row[pos] == pix))
704  break;
705  run = pos - start_pos - 1;
706  idx = av_ceil_log2(run);
707  if (ff_els_decode_bit(&dc->els_ctx, &dc->prev_row_rung[idx]))
708  *pRun += run;
709  else {
710  int flag;
711  /* run-length is coded as plain binary number of idx - 1 bits */
712  for (pos = idx - 1, rle = 0, flag = 0; pos >= 0; pos--) {
713  if ((1 << pos) + rle < run &&
715  flag ? &dc->runlen_one
716  : &dc->runlen_zeroes[pos])) {
717  flag = 1;
718  rle |= 1 << pos;
719  }
720  }
721  *pRun += rle;
722  break; // return immediately
723  }
724  if (x + *pRun >= tile_width - 1)
725  break;
726 
727  LOAD_NEIGHBOURS(x + *pRun);
728  WWneW = 0;
729  NWneW = 0;
730  }
731 
732  idx = WWneW << 7 |
733  NWneW << 6 |
734  (N != NE) << 5 |
735  (NW != N) << 4 |
736  (NWW != NW) << 3 |
737  (NNE != NE) << 2 |
738  (NN != N) << 1 |
739  (NNW != NW);
740  WWneW = ff_els_decode_bit(&dc->els_ctx, &dc->W_ctx_rung[idx]);
741  } while (!WWneW);
742 
743  dc->next_run_pos = x + *pRun;
744  return got_pixel;
745 }
746 
748  uint32_t *pPix, uint32_t pix)
749 {
750  if (ff_els_decode_bit(&dc->els_ctx, rung)) {
751  *pPix = pix;
752  return 1;
753  }
754  dc->stack[dc->stack_pos++ & EPIC_PIX_STACK_MAX] = pix;
755  return 0;
756 }
757 
758 static int epic_predict_from_NW_NE(ePICContext *dc, int x, int y, int run,
759  int tile_width, const uint32_t *curr_row,
760  const uint32_t *above_row, uint32_t *pPix)
761 {
762  int pos;
763 
764  /* try to reuse the NW pixel first */
765  if (x && y) {
766  uint32_t NW = above_row[x - 1];
767  if (NW != curr_row[x - 1] && NW != above_row[x] && !is_pixel_on_stack(dc, NW)) {
768  if (epic_predict_pixel2(dc, &dc->nw_pred_rung[NW & 0xFF], pPix, NW))
769  return 1;
770  }
771  }
772 
773  /* try to reuse the NE[x + run, y] pixel */
774  pos = x + run - 1;
775  if (pos < tile_width - 1 && y) {
776  uint32_t NE = above_row[pos + 1];
777  if (NE != above_row[pos] && !is_pixel_on_stack(dc, NE)) {
778  if (epic_predict_pixel2(dc, &dc->ne_pred_rung[NE & 0xFF], pPix, NE))
779  return 1;
780  }
781  }
782 
783  return 0;
784 }
785 
786 static int epic_decode_from_cache(ePICContext *dc, uint32_t W, uint32_t *pPix)
787 {
788  ePICPixListElem *list, *prev = NULL;
789  ePICPixHashElem *hash_elem = epic_hash_find(&dc->hash, W);
790 
791  if (!hash_elem || !hash_elem->list)
792  return 0;
793 
794  list = hash_elem->list;
795  while (list) {
796  if (!is_pixel_on_stack(dc, list->pixel)) {
797  if (ff_els_decode_bit(&dc->els_ctx, &list->rung)) {
798  *pPix = list->pixel;
799  if (list != hash_elem->list) {
800  prev->next = list->next;
801  list->next = hash_elem->list;
802  hash_elem->list = list;
803  }
804  return 1;
805  }
806  dc->stack[dc->stack_pos++ & EPIC_PIX_STACK_MAX] = list->pixel;
807  }
808  prev = list;
809  list = list->next;
810  }
811 
812  return 0;
813 }
814 
815 static int epic_decode_tile(ePICContext *dc, uint8_t *out, int tile_height,
816  int tile_width, int stride)
817 {
818  int x, y;
819  uint32_t pix;
820  uint32_t *curr_row = NULL, *above_row = NULL, *above2_row;
821 
822  for (y = 0; y < tile_height; y++, out += stride) {
823  above2_row = above_row;
824  above_row = curr_row;
825  curr_row = (uint32_t *) out;
826 
827  for (x = 0, dc->next_run_pos = 0; x < tile_width;) {
828  if (dc->els_ctx.err)
829  return AVERROR_INVALIDDATA; // bail out in the case of ELS overflow
830 
831  pix = curr_row[x - 1]; // get W pixel
832 
833  if (y >= 1 && x >= 2 &&
834  pix != curr_row[x - 2] && pix != above_row[x - 1] &&
835  pix != above_row[x - 2] && pix != above_row[x] &&
836  !epic_cache_entries_for_pixel(&dc->hash, pix)) {
837  curr_row[x] = epic_decode_pixel_pred(dc, x, y, curr_row, above_row);
838  x++;
839  } else {
840  int got_pixel, run;
841  dc->stack_pos = 0; // empty stack
842 
843  if (y < 2 || x < 2 || x == tile_width - 1) {
844  run = 1;
845  got_pixel = epic_handle_edges(dc, x, y, curr_row, above_row, &pix);
846  } else {
847  got_pixel = epic_decode_run_length(dc, x, y, tile_width,
848  curr_row, above_row,
849  above2_row, &pix, &run);
850  if (got_pixel < 0)
851  return got_pixel;
852  }
853 
854  if (!got_pixel && !epic_predict_from_NW_NE(dc, x, y, run,
855  tile_width, curr_row,
856  above_row, &pix)) {
857  uint32_t ref_pix = curr_row[x - 1];
858  if (!x || !epic_decode_from_cache(dc, ref_pix, &pix)) {
859  pix = epic_decode_pixel_pred(dc, x, y, curr_row, above_row);
860  if (is_pixel_on_stack(dc, pix))
861  return AVERROR_INVALIDDATA;
862 
863  if (x) {
864  int ret = epic_add_pixel_to_cache(&dc->hash,
865  ref_pix,
866  pix);
867  if (ret)
868  return ret;
869  }
870  }
871  }
872  for (; run > 0; x++, run--)
873  curr_row[x] = pix;
874  }
875  }
876  }
877 
878  return 0;
879 }
880 
881 static int epic_jb_decode_tile(G2MContext *c, int tile_x, int tile_y,
882  const uint8_t *src, size_t src_size,
883  AVCodecContext *avctx)
884 {
885  uint8_t prefix, mask = 0x80;
886  int extrabytes, tile_width, tile_height, awidth, aheight;
887  size_t els_dsize;
888  uint8_t *dst;
889 
890  if (!src_size)
891  return 0;
892 
893  /* get data size of the ELS partition as unsigned variable-length integer */
894  prefix = *src++;
895  src_size--;
896  for (extrabytes = 0; (prefix & mask) && (extrabytes < 7); extrabytes++)
897  mask >>= 1;
898  if (extrabytes > 3 || src_size < extrabytes) {
899  av_log(avctx, AV_LOG_ERROR, "ePIC: invalid data size VLI\n");
900  return AVERROR_INVALIDDATA;
901  }
902 
903  els_dsize = prefix & ((0x80 >> extrabytes) - 1); // mask out the length prefix
904  while (extrabytes-- > 0) {
905  els_dsize = (els_dsize << 8) | *src++;
906  src_size--;
907  }
908 
909  if (src_size < els_dsize) {
910  av_log(avctx, AV_LOG_ERROR, "ePIC: data too short, needed %"SIZE_SPECIFIER", got %"SIZE_SPECIFIER"\n",
911  els_dsize, src_size);
912  return AVERROR_INVALIDDATA;
913  }
914 
915  tile_width = FFMIN(c->width - tile_x * c->tile_width, c->tile_width);
916  tile_height = FFMIN(c->height - tile_y * c->tile_height, c->tile_height);
917  awidth = FFALIGN(tile_width, 16);
918  aheight = FFALIGN(tile_height, 16);
919 
920  if (els_dsize) {
921  int ret, i, j, k;
922  uint8_t tr_r, tr_g, tr_b, *buf;
923  uint32_t *in;
924  /* ELS decoder initializations */
925  memset(&c->ec, 0, sizeof(c->ec));
926  ff_els_decoder_init(&c->ec.els_ctx, src, els_dsize);
927  epic_hash_init(&c->ec.hash);
928 
929  /* decode transparent pixel value */
933  if (c->ec.els_ctx.err != 0) {
934  av_log(avctx, AV_LOG_ERROR,
935  "ePIC: couldn't decode transparency pixel!\n");
937  return AVERROR_INVALIDDATA;
938  }
939 
940  ret = epic_decode_tile(&c->ec, c->epic_buf, tile_height, tile_width,
941  c->epic_buf_stride);
942 
945 
946  if (ret) {
947  av_log(avctx, AV_LOG_ERROR,
948  "ePIC: tile decoding failed, frame=%d, tile_x=%d, tile_y=%d\n",
949  avctx->frame_number, tile_x, tile_y);
950  return AVERROR_INVALIDDATA;
951  }
952 
953  buf = c->epic_buf;
954  dst = c->framebuf + tile_x * c->tile_width * 3 +
955  tile_y * c->tile_height * c->framebuf_stride;
956 
957  for (j = 0; j < tile_height; j++) {
958  uint8_t *out = dst;
959  in = (uint32_t *) buf;
960  for (i = 0; i < tile_width; i++) {
961  out[0] = (in[i] >> R_shift) & 0xFF;
962  out[1] = (in[i] >> G_shift) & 0xFF;
963  out[2] = (in[i] >> B_shift) & 0xFF;
964  out += 3;
965  }
966  buf += c->epic_buf_stride;
967  dst += c->framebuf_stride;
968  }
969 
970  if (src_size > els_dsize) {
971  uint8_t *jpg;
972  uint32_t tr;
973  int bstride = FFALIGN(tile_width, 16) >> 3;
974  int nblocks = 0;
975  int estride = c->epic_buf_stride >> 2;
976 
977  src += els_dsize;
978  src_size -= els_dsize;
979 
980  in = (uint32_t *) c->epic_buf;
981  tr = (tr_r << R_shift) | (tr_g << G_shift) | (tr_b << B_shift);
982 
983  memset(c->kempf_flags, 0,
984  (aheight >> 3) * bstride * sizeof(*c->kempf_flags));
985  for (j = 0; j < tile_height; j += 8) {
986  for (i = 0; i < tile_width; i += 8) {
987  c->kempf_flags[(i >> 3) + (j >> 3) * bstride] = 0;
988  for (k = 0; k < 8 * 8; k++) {
989  if (in[i + (k & 7) + (k >> 3) * estride] == tr) {
990  c->kempf_flags[(i >> 3) + (j >> 3) * bstride] = 1;
991  nblocks++;
992  break;
993  }
994  }
995  }
996  in += 8 * estride;
997  }
998 
999  memset(c->jpeg_tile, 0, c->tile_stride * aheight);
1000  jpg_decode_data(&c->jc, awidth, aheight, src, src_size,
1001  c->jpeg_tile, c->tile_stride,
1002  c->kempf_flags, bstride, nblocks, c->swapuv);
1003 
1004  in = (uint32_t *) c->epic_buf;
1005  dst = c->framebuf + tile_x * c->tile_width * 3 +
1006  tile_y * c->tile_height * c->framebuf_stride;
1007  jpg = c->jpeg_tile;
1008  for (j = 0; j < tile_height; j++) {
1009  for (i = 0; i < tile_width; i++)
1010  if (in[i] == tr)
1011  memcpy(dst + i * 3, jpg + i * 3, 3);
1012  in += c->epic_buf_stride >> 2;
1013  dst += c->framebuf_stride;
1014  jpg += c->tile_stride;
1015  }
1016  }
1017  } else {
1018  dst = c->framebuf + tile_x * c->tile_width * 3 +
1019  tile_y * c->tile_height * c->framebuf_stride;
1020  return jpg_decode_data(&c->jc, tile_width, tile_height, src, src_size,
1021  dst, c->framebuf_stride, NULL, 0, 0, c->swapuv);
1022  }
1023 
1024  return 0;
1025 }
1026 
1027 static int kempf_restore_buf(const uint8_t *src, int len,
1028  uint8_t *dst, int stride,
1029  const uint8_t *jpeg_tile, int tile_stride,
1030  int width, int height,
1031  const uint8_t *pal, int npal, int tidx)
1032 {
1033  GetBitContext gb;
1034  int i, j, nb, col;
1035  int ret;
1036  int align_width = FFALIGN(width, 16);
1037 
1038  if ((ret = init_get_bits8(&gb, src, len)) < 0)
1039  return ret;
1040 
1041  if (npal <= 2) nb = 1;
1042  else if (npal <= 4) nb = 2;
1043  else if (npal <= 16) nb = 4;
1044  else nb = 8;
1045 
1046  for (j = 0; j < height; j++, dst += stride, jpeg_tile += tile_stride) {
1047  if (get_bits(&gb, 8))
1048  continue;
1049  for (i = 0; i < width; i++) {
1050  col = get_bits(&gb, nb);
1051  if (col != tidx)
1052  memcpy(dst + i * 3, pal + col * 3, 3);
1053  else
1054  memcpy(dst + i * 3, jpeg_tile + i * 3, 3);
1055  }
1056  skip_bits_long(&gb, nb * (align_width - width));
1057  }
1058 
1059  return 0;
1060 }
1061 
1062 static int kempf_decode_tile(G2MContext *c, int tile_x, int tile_y,
1063  const uint8_t *src, int src_size)
1064 {
1065  int width, height;
1066  int hdr, zsize, npal, tidx = -1, ret;
1067  int i, j;
1068  const uint8_t *src_end = src + src_size;
1069  uint8_t pal[768], transp[3];
1070  uLongf dlen = (c->tile_width + 1) * c->tile_height;
1071  int sub_type;
1072  int nblocks, cblocks, bstride;
1073  int bits, bitbuf, coded;
1074  uint8_t *dst = c->framebuf + tile_x * c->tile_width * 3 +
1075  tile_y * c->tile_height * c->framebuf_stride;
1076 
1077  if (src_size < 2)
1078  return AVERROR_INVALIDDATA;
1079 
1080  width = FFMIN(c->width - tile_x * c->tile_width, c->tile_width);
1081  height = FFMIN(c->height - tile_y * c->tile_height, c->tile_height);
1082 
1083  hdr = *src++;
1084  sub_type = hdr >> 5;
1085  if (sub_type == 0) {
1086  int j;
1087  memcpy(transp, src, 3);
1088  src += 3;
1089  for (j = 0; j < height; j++, dst += c->framebuf_stride)
1090  for (i = 0; i < width; i++)
1091  memcpy(dst + i * 3, transp, 3);
1092  return 0;
1093  } else if (sub_type == 1) {
1094  return jpg_decode_data(&c->jc, width, height, src, src_end - src,
1095  dst, c->framebuf_stride, NULL, 0, 0, 0);
1096  }
1097 
1098  if (sub_type != 2) {
1099  memcpy(transp, src, 3);
1100  src += 3;
1101  }
1102  npal = *src++ + 1;
1103  if (src_end - src < npal * 3)
1104  return AVERROR_INVALIDDATA;
1105  memcpy(pal, src, npal * 3);
1106  src += npal * 3;
1107  if (sub_type != 2) {
1108  for (i = 0; i < npal; i++) {
1109  if (!memcmp(pal + i * 3, transp, 3)) {
1110  tidx = i;
1111  break;
1112  }
1113  }
1114  }
1115 
1116  if (src_end - src < 2)
1117  return 0;
1118  zsize = (src[0] << 8) | src[1];
1119  src += 2;
1120 
1121  if (src_end - src < zsize + (sub_type != 2))
1122  return AVERROR_INVALIDDATA;
1123 
1124  ret = uncompress(c->kempf_buf, &dlen, src, zsize);
1125  if (ret)
1126  return AVERROR_INVALIDDATA;
1127  src += zsize;
1128 
1129  if (sub_type == 2) {
1130  kempf_restore_buf(c->kempf_buf, dlen, dst, c->framebuf_stride,
1131  NULL, 0, width, height, pal, npal, tidx);
1132  return 0;
1133  }
1134 
1135  nblocks = *src++ + 1;
1136  cblocks = 0;
1137  bstride = FFALIGN(width, 16) >> 3;
1138  // blocks are coded LSB and we need normal bitreader for JPEG data
1139  bits = 0;
1140  for (i = 0; i < (FFALIGN(height, 16) >> 4); i++) {
1141  for (j = 0; j < (FFALIGN(width, 16) >> 4); j++) {
1142  if (!bits) {
1143  if (src >= src_end)
1144  return AVERROR_INVALIDDATA;
1145  bitbuf = *src++;
1146  bits = 8;
1147  }
1148  coded = bitbuf & 1;
1149  bits--;
1150  bitbuf >>= 1;
1151  cblocks += coded;
1152  if (cblocks > nblocks)
1153  return AVERROR_INVALIDDATA;
1154  c->kempf_flags[j * 2 + i * 2 * bstride] =
1155  c->kempf_flags[j * 2 + 1 + i * 2 * bstride] =
1156  c->kempf_flags[j * 2 + (i * 2 + 1) * bstride] =
1157  c->kempf_flags[j * 2 + 1 + (i * 2 + 1) * bstride] = coded;
1158  }
1159  }
1160 
1161  memset(c->jpeg_tile, 0, c->tile_stride * height);
1162  jpg_decode_data(&c->jc, width, height, src, src_end - src,
1163  c->jpeg_tile, c->tile_stride,
1164  c->kempf_flags, bstride, nblocks * 4, 0);
1165 
1166  kempf_restore_buf(c->kempf_buf, dlen, dst, c->framebuf_stride,
1167  c->jpeg_tile, c->tile_stride,
1168  width, height, pal, npal, tidx);
1169 
1170  return 0;
1171 }
1172 
1174 {
1175  int aligned_height;
1176 
1177  if (!c->framebuf || c->old_width < c->width || c->old_height < c->height) {
1178  c->framebuf_stride = FFALIGN(c->width + 15, 16) * 3;
1179  aligned_height = c->height + 15;
1180  av_free(c->framebuf);
1181  c->framebuf = av_mallocz_array(c->framebuf_stride, aligned_height);
1182  if (!c->framebuf)
1183  return AVERROR(ENOMEM);
1184  }
1185  if (!c->synth_tile || !c->jpeg_tile ||
1186  (c->compression == 2 && !c->epic_buf_base) ||
1187  c->old_tile_w < c->tile_width ||
1188  c->old_tile_h < c->tile_height) {
1189  c->tile_stride = FFALIGN(c->tile_width, 16) * 3;
1190  c->epic_buf_stride = FFALIGN(c->tile_width * 4, 16);
1191  aligned_height = FFALIGN(c->tile_height, 16);
1192  av_freep(&c->synth_tile);
1193  av_freep(&c->jpeg_tile);
1194  av_freep(&c->kempf_buf);
1195  av_freep(&c->kempf_flags);
1196  av_freep(&c->epic_buf_base);
1197  c->epic_buf = NULL;
1198  c->synth_tile = av_mallocz(c->tile_stride * aligned_height);
1199  c->jpeg_tile = av_mallocz(c->tile_stride * aligned_height);
1200  c->kempf_buf = av_mallocz((c->tile_width + 1) * aligned_height +
1202  c->kempf_flags = av_mallocz(c->tile_width * aligned_height);
1203  if (!c->synth_tile || !c->jpeg_tile ||
1204  !c->kempf_buf || !c->kempf_flags)
1205  return AVERROR(ENOMEM);
1206  if (c->compression == 2) {
1207  c->epic_buf_base = av_mallocz(c->epic_buf_stride * aligned_height + 4);
1208  if (!c->epic_buf_base)
1209  return AVERROR(ENOMEM);
1210  c->epic_buf = c->epic_buf_base + 4;
1211  }
1212  }
1213 
1214  return 0;
1215 }
1216 
1218  GetByteContext *gb)
1219 {
1220  int i, j, k;
1221  uint8_t *dst;
1222  uint32_t bits;
1223  uint32_t cur_size, cursor_w, cursor_h, cursor_stride;
1224  uint32_t cursor_hot_x, cursor_hot_y;
1225  int cursor_fmt, err;
1226 
1227  cur_size = bytestream2_get_be32(gb);
1228  cursor_w = bytestream2_get_byte(gb);
1229  cursor_h = bytestream2_get_byte(gb);
1230  cursor_hot_x = bytestream2_get_byte(gb);
1231  cursor_hot_y = bytestream2_get_byte(gb);
1232  cursor_fmt = bytestream2_get_byte(gb);
1233 
1234  cursor_stride = FFALIGN(cursor_w, cursor_fmt==1 ? 32 : 1) * 4;
1235 
1236  if (cursor_w < 1 || cursor_w > 256 ||
1237  cursor_h < 1 || cursor_h > 256) {
1238  av_log(avctx, AV_LOG_ERROR, "Invalid cursor dimensions %"PRIu32"x%"PRIu32"\n",
1239  cursor_w, cursor_h);
1240  return AVERROR_INVALIDDATA;
1241  }
1242  if (cursor_hot_x > cursor_w || cursor_hot_y > cursor_h) {
1243  av_log(avctx, AV_LOG_WARNING, "Invalid hotspot position %"PRIu32",%"PRIu32"\n",
1244  cursor_hot_x, cursor_hot_y);
1245  cursor_hot_x = FFMIN(cursor_hot_x, cursor_w - 1);
1246  cursor_hot_y = FFMIN(cursor_hot_y, cursor_h - 1);
1247  }
1248  if (cur_size - 9 > bytestream2_get_bytes_left(gb) ||
1249  c->cursor_w * c->cursor_h / 4 > cur_size) {
1250  av_log(avctx, AV_LOG_ERROR, "Invalid cursor data size %"PRIu32"/%u\n",
1251  cur_size, bytestream2_get_bytes_left(gb));
1252  return AVERROR_INVALIDDATA;
1253  }
1254  if (cursor_fmt != 1 && cursor_fmt != 32) {
1255  avpriv_report_missing_feature(avctx, "Cursor format %d",
1256  cursor_fmt);
1257  return AVERROR_PATCHWELCOME;
1258  }
1259 
1260  if ((err = av_reallocp(&c->cursor, cursor_stride * cursor_h)) < 0) {
1261  av_log(avctx, AV_LOG_ERROR, "Cannot allocate cursor buffer\n");
1262  return err;
1263  }
1264 
1265  c->cursor_w = cursor_w;
1266  c->cursor_h = cursor_h;
1267  c->cursor_hot_x = cursor_hot_x;
1268  c->cursor_hot_y = cursor_hot_y;
1269  c->cursor_fmt = cursor_fmt;
1270  c->cursor_stride = cursor_stride;
1271 
1272  dst = c->cursor;
1273  switch (c->cursor_fmt) {
1274  case 1: // old monochrome
1275  for (j = 0; j < c->cursor_h; j++) {
1276  for (i = 0; i < c->cursor_w; i += 32) {
1277  bits = bytestream2_get_be32(gb);
1278  for (k = 0; k < 32; k++) {
1279  dst[0] = !!(bits & 0x80000000);
1280  dst += 4;
1281  bits <<= 1;
1282  }
1283  }
1284  }
1285 
1286  dst = c->cursor;
1287  for (j = 0; j < c->cursor_h; j++) {
1288  for (i = 0; i < c->cursor_w; i += 32) {
1289  bits = bytestream2_get_be32(gb);
1290  for (k = 0; k < 32; k++) {
1291  int mask_bit = !!(bits & 0x80000000);
1292  switch (dst[0] * 2 + mask_bit) {
1293  case 0:
1294  dst[0] = 0xFF;
1295  dst[1] = 0x00;
1296  dst[2] = 0x00;
1297  dst[3] = 0x00;
1298  break;
1299  case 1:
1300  dst[0] = 0xFF;
1301  dst[1] = 0xFF;
1302  dst[2] = 0xFF;
1303  dst[3] = 0xFF;
1304  break;
1305  default:
1306  dst[0] = 0x00;
1307  dst[1] = 0x00;
1308  dst[2] = 0x00;
1309  dst[3] = 0x00;
1310  }
1311  dst += 4;
1312  bits <<= 1;
1313  }
1314  }
1315  }
1316  break;
1317  case 32: // full colour
1318  /* skip monochrome version of the cursor and decode RGBA instead */
1319  bytestream2_skip(gb, c->cursor_h * (FFALIGN(c->cursor_w, 32) >> 3));
1320  for (j = 0; j < c->cursor_h; j++) {
1321  for (i = 0; i < c->cursor_w; i++) {
1322  int val = bytestream2_get_be32(gb);
1323  *dst++ = val >> 0;
1324  *dst++ = val >> 8;
1325  *dst++ = val >> 16;
1326  *dst++ = val >> 24;
1327  }
1328  }
1329  break;
1330  default:
1331  return AVERROR_PATCHWELCOME;
1332  }
1333  return 0;
1334 }
1335 
1336 #define APPLY_ALPHA(src, new, alpha) \
1337  src = (src * (256 - alpha) + new * alpha) >> 8
1338 
1339 static void g2m_paint_cursor(G2MContext *c, uint8_t *dst, int stride)
1340 {
1341  int i, j;
1342  int x, y, w, h;
1343  const uint8_t *cursor;
1344 
1345  if (!c->cursor)
1346  return;
1347 
1348  x = c->cursor_x - c->cursor_hot_x;
1349  y = c->cursor_y - c->cursor_hot_y;
1350 
1351  cursor = c->cursor;
1352  w = c->cursor_w;
1353  h = c->cursor_h;
1354 
1355  if (x + w > c->width)
1356  w = c->width - x;
1357  if (y + h > c->height)
1358  h = c->height - y;
1359  if (x < 0) {
1360  w += x;
1361  cursor += -x * 4;
1362  } else {
1363  dst += x * 3;
1364  }
1365 
1366  if (y < 0)
1367  h += y;
1368  if (w < 0 || h < 0)
1369  return;
1370  if (y < 0) {
1371  cursor += -y * c->cursor_stride;
1372  } else {
1373  dst += y * stride;
1374  }
1375 
1376  for (j = 0; j < h; j++) {
1377  for (i = 0; i < w; i++) {
1378  uint8_t alpha = cursor[i * 4];
1379  APPLY_ALPHA(dst[i * 3 + 0], cursor[i * 4 + 1], alpha);
1380  APPLY_ALPHA(dst[i * 3 + 1], cursor[i * 4 + 2], alpha);
1381  APPLY_ALPHA(dst[i * 3 + 2], cursor[i * 4 + 3], alpha);
1382  }
1383  dst += stride;
1384  cursor += c->cursor_stride;
1385  }
1386 }
1387 
1388 static int g2m_decode_frame(AVCodecContext *avctx, void *data,
1389  int *got_picture_ptr, AVPacket *avpkt)
1390 {
1391  const uint8_t *buf = avpkt->data;
1392  int buf_size = avpkt->size;
1393  G2MContext *c = avctx->priv_data;
1394  AVFrame *pic = data;
1395  GetByteContext bc, tbc;
1396  int magic;
1397  int got_header = 0;
1398  uint32_t chunk_size, r_mask, g_mask, b_mask;
1399  int chunk_type, chunk_start;
1400  int i;
1401  int ret;
1402 
1403  if (buf_size < 12) {
1404  av_log(avctx, AV_LOG_ERROR,
1405  "Frame should have at least 12 bytes, got %d instead\n",
1406  buf_size);
1407  return AVERROR_INVALIDDATA;
1408  }
1409 
1410  bytestream2_init(&bc, buf, buf_size);
1411 
1412  magic = bytestream2_get_be32(&bc);
1413  if ((magic & ~0xF) != MKBETAG('G', '2', 'M', '0') ||
1414  (magic & 0xF) < 2 || (magic & 0xF) > 5) {
1415  av_log(avctx, AV_LOG_ERROR, "Wrong magic %08X\n", magic);
1416  return AVERROR_INVALIDDATA;
1417  }
1418 
1419  c->swapuv = magic == MKBETAG('G', '2', 'M', '2');
1420 
1421  while (bytestream2_get_bytes_left(&bc) > 5) {
1422  chunk_size = bytestream2_get_le32(&bc) - 1;
1423  chunk_type = bytestream2_get_byte(&bc);
1424  chunk_start = bytestream2_tell(&bc);
1425  if (chunk_size > bytestream2_get_bytes_left(&bc)) {
1426  av_log(avctx, AV_LOG_ERROR, "Invalid chunk size %"PRIu32" type %02X\n",
1427  chunk_size, chunk_type);
1428  break;
1429  }
1430  switch (chunk_type) {
1431  case DISPLAY_INFO:
1432  got_header =
1433  c->got_header = 0;
1434  if (chunk_size < 21) {
1435  av_log(avctx, AV_LOG_ERROR, "Invalid display info size %"PRIu32"\n",
1436  chunk_size);
1437  break;
1438  }
1439  c->width = bytestream2_get_be32(&bc);
1440  c->height = bytestream2_get_be32(&bc);
1441  if (c->width < 16 || c->height < 16) {
1442  av_log(avctx, AV_LOG_ERROR,
1443  "Invalid frame dimensions %dx%d\n",
1444  c->width, c->height);
1445  ret = AVERROR_INVALIDDATA;
1446  goto header_fail;
1447  }
1448  if (c->width != avctx->width || c->height != avctx->height) {
1449  ret = ff_set_dimensions(avctx, c->width, c->height);
1450  if (ret < 0)
1451  goto header_fail;
1452  }
1453  c->compression = bytestream2_get_be32(&bc);
1454  if (c->compression != 2 && c->compression != 3) {
1455  avpriv_report_missing_feature(avctx, "Compression method %d",
1456  c->compression);
1457  ret = AVERROR_PATCHWELCOME;
1458  goto header_fail;
1459  }
1460  c->tile_width = bytestream2_get_be32(&bc);
1461  c->tile_height = bytestream2_get_be32(&bc);
1462  if (c->tile_width <= 0 || c->tile_height <= 0 ||
1463  ((c->tile_width | c->tile_height) & 0xF) ||
1464  c->tile_width * (uint64_t)c->tile_height >= INT_MAX / 4 ||
1465  av_image_check_size2(c->tile_width, c->tile_height, avctx->max_pixels, avctx->pix_fmt, 0, avctx) < 0
1466  ) {
1467  av_log(avctx, AV_LOG_ERROR,
1468  "Invalid tile dimensions %dx%d\n",
1469  c->tile_width, c->tile_height);
1470  ret = AVERROR_INVALIDDATA;
1471  goto header_fail;
1472  }
1473  c->tiles_x = (c->width + c->tile_width - 1) / c->tile_width;
1474  c->tiles_y = (c->height + c->tile_height - 1) / c->tile_height;
1475  c->bpp = bytestream2_get_byte(&bc);
1476  if (c->bpp == 32) {
1477  if (bytestream2_get_bytes_left(&bc) < 16 ||
1478  (chunk_size - 21) < 16) {
1479  av_log(avctx, AV_LOG_ERROR,
1480  "Display info: missing bitmasks!\n");
1481  ret = AVERROR_INVALIDDATA;
1482  goto header_fail;
1483  }
1484  r_mask = bytestream2_get_be32(&bc);
1485  g_mask = bytestream2_get_be32(&bc);
1486  b_mask = bytestream2_get_be32(&bc);
1487  if (r_mask != 0xFF0000 || g_mask != 0xFF00 || b_mask != 0xFF) {
1489  "Bitmasks: R=%"PRIX32", G=%"PRIX32", B=%"PRIX32,
1490  r_mask, g_mask, b_mask);
1491  ret = AVERROR_PATCHWELCOME;
1492  goto header_fail;
1493  }
1494  } else {
1495  avpriv_request_sample(avctx, "bpp=%d", c->bpp);
1496  ret = AVERROR_PATCHWELCOME;
1497  goto header_fail;
1498  }
1499  if (g2m_init_buffers(c)) {
1500  ret = AVERROR(ENOMEM);
1501  goto header_fail;
1502  }
1503  got_header = 1;
1504  break;
1505  case TILE_DATA:
1506  if (!c->tiles_x || !c->tiles_y) {
1507  av_log(avctx, AV_LOG_WARNING,
1508  "No display info - skipping tile\n");
1509  break;
1510  }
1511  if (chunk_size < 2) {
1512  av_log(avctx, AV_LOG_ERROR, "Invalid tile data size %"PRIu32"\n",
1513  chunk_size);
1514  break;
1515  }
1516  c->tile_x = bytestream2_get_byte(&bc);
1517  c->tile_y = bytestream2_get_byte(&bc);
1518  if (c->tile_x >= c->tiles_x || c->tile_y >= c->tiles_y) {
1519  av_log(avctx, AV_LOG_ERROR,
1520  "Invalid tile pos %d,%d (in %dx%d grid)\n",
1521  c->tile_x, c->tile_y, c->tiles_x, c->tiles_y);
1522  break;
1523  }
1524  ret = 0;
1525  switch (c->compression) {
1526  case COMPR_EPIC_J_B:
1527  ret = epic_jb_decode_tile(c, c->tile_x, c->tile_y,
1528  buf + bytestream2_tell(&bc),
1529  chunk_size - 2, avctx);
1530  break;
1531  case COMPR_KEMPF_J_B:
1532  ret = kempf_decode_tile(c, c->tile_x, c->tile_y,
1533  buf + bytestream2_tell(&bc),
1534  chunk_size - 2);
1535  break;
1536  }
1537  if (ret && c->framebuf)
1538  av_log(avctx, AV_LOG_ERROR, "Error decoding tile %d,%d\n",
1539  c->tile_x, c->tile_y);
1540  break;
1541  case CURSOR_POS:
1542  if (chunk_size < 5) {
1543  av_log(avctx, AV_LOG_ERROR, "Invalid cursor pos size %"PRIu32"\n",
1544  chunk_size);
1545  break;
1546  }
1547  c->cursor_x = bytestream2_get_be16(&bc);
1548  c->cursor_y = bytestream2_get_be16(&bc);
1549  break;
1550  case CURSOR_SHAPE:
1551  if (chunk_size < 8) {
1552  av_log(avctx, AV_LOG_ERROR, "Invalid cursor data size %"PRIu32"\n",
1553  chunk_size);
1554  break;
1555  }
1556  bytestream2_init(&tbc, buf + bytestream2_tell(&bc),
1557  chunk_size - 4);
1558  g2m_load_cursor(avctx, c, &tbc);
1559  break;
1560  case CHUNK_CC:
1561  case CHUNK_CD:
1562  break;
1563  default:
1564  av_log(avctx, AV_LOG_WARNING, "Skipping chunk type %02d\n",
1565  chunk_type);
1566  }
1567 
1568  /* navigate to next chunk */
1569  bytestream2_skip(&bc, chunk_start + chunk_size - bytestream2_tell(&bc));
1570  }
1571  if (got_header)
1572  c->got_header = 1;
1573 
1574  if (c->width && c->height && c->framebuf) {
1575  if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
1576  return ret;
1577 
1578  pic->key_frame = got_header;
1579  pic->pict_type = got_header ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
1580 
1581  for (i = 0; i < avctx->height; i++)
1582  memcpy(pic->data[0] + i * pic->linesize[0],
1583  c->framebuf + i * c->framebuf_stride,
1584  c->width * 3);
1585  g2m_paint_cursor(c, pic->data[0], pic->linesize[0]);
1586 
1587  *got_picture_ptr = 1;
1588  }
1589 
1590  return buf_size;
1591 
1592 header_fail:
1593  c->width =
1594  c->height = 0;
1595  c->tiles_x =
1596  c->tiles_y = 0;
1597  c->tile_width =
1598  c->tile_height = 0;
1599  return ret;
1600 }
1601 
1603 {
1604  G2MContext *const c = avctx->priv_data;
1605  int ret;
1606 
1607  if ((ret = jpg_init(avctx, &c->jc)) != 0) {
1608  av_log(avctx, AV_LOG_ERROR, "Cannot initialise VLCs\n");
1609  jpg_free_context(&c->jc);
1610  return AVERROR(ENOMEM);
1611  }
1612 
1613  avctx->pix_fmt = AV_PIX_FMT_RGB24;
1614 
1615  // store original sizes and check against those if resize happens
1616  c->orig_width = avctx->width;
1617  c->orig_height = avctx->height;
1618 
1619  return 0;
1620 }
1621 
1623 {
1624  G2MContext *const c = avctx->priv_data;
1625 
1626  jpg_free_context(&c->jc);
1627 
1628  av_freep(&c->epic_buf_base);
1629  c->epic_buf = NULL;
1630  av_freep(&c->kempf_buf);
1631  av_freep(&c->kempf_flags);
1632  av_freep(&c->synth_tile);
1633  av_freep(&c->jpeg_tile);
1634  av_freep(&c->cursor);
1635  av_freep(&c->framebuf);
1636 
1637  return 0;
1638 }
1639 
1641  .name = "g2m",
1642  .long_name = NULL_IF_CONFIG_SMALL("Go2Meeting"),
1643  .type = AVMEDIA_TYPE_VIDEO,
1644  .id = AV_CODEC_ID_G2M,
1645  .priv_data_size = sizeof(G2MContext),
1646  .init = g2m_decode_init,
1647  .close = g2m_decode_end,
1649  .capabilities = AV_CODEC_CAP_DR1,
1650  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
1651 };
#define LOAD_NEIGHBOURS(x)
Definition: g2meet.c:370
static int epic_predict_from_NW_NE(ePICContext *dc, int x, int y, int run, int tile_width, const uint32_t *curr_row, const uint32_t *above_row, uint32_t *pPix)
Definition: g2meet.c:758
int plane
Definition: avisynth_c.h:384
int tiles_y
Definition: g2meet.c:141
int cursor_hot_y
Definition: g2meet.c:158
#define NULL
Definition: coverity.c:32
const char const char void * val
Definition: avisynth_c.h:863
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static int epic_handle_edges(ePICContext *dc, int x, int y, const uint32_t *curr_row, const uint32_t *above_row, uint32_t *pPix)
Definition: g2meet.c:581
static int kempf_decode_tile(G2MContext *c, int tile_x, int tile_y, const uint8_t *src, int src_size)
Definition: g2meet.c:1062
static int epic_add_pixel_to_cache(ePICPixHash *hash, uint32_t key, uint32_t pix)
Definition: g2meet.c:447
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
uint8_t * kempf_flags
Definition: g2meet.c:152
void * av_realloc(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory.
Definition: mem.c:135
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
int height
Definition: g2meet.c:138
ChunkType
Definition: g2meet.c:47
static av_cold void jpg_free_context(JPGContext *ctx)
Definition: g2meet.c:211
uint8_t * epic_buf
Definition: g2meet.c:148
uint8_t * kempf_buf
Definition: g2meet.c:152
misc image utilities
static int chunk_start(AVFormatContext *s)
Definition: webm_chunk.c:164
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
int width
Definition: g2meet.c:138
uint32_t pixel
Definition: g2meet.c:85
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
static int epic_predict_pixel(ePICContext *dc, uint8_t *rung, uint32_t *pPix, uint32_t pix)
Definition: g2meet.c:570
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:291
int got_header
Definition: g2meet.c:143
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
int cursor_fmt
Definition: g2meet.c:156
uint8_t nw_pred_rung[256]
Definition: g2meet.c:109
Entropy Logarithmic-Scale binary arithmetic coder.
void(* clear_block)(int16_t *block)
Definition: blockdsp.h:36
#define avpriv_request_sample(...)
static void yuv2rgb(uint8_t *out, int ridx, int Y, int U, int V)
Definition: g2meet.c:280
static int epic_decode_tile(ePICContext *dc, uint8_t *out, int tile_height, int tile_width, int stride)
Definition: g2meet.c:815
uint32_t stack[EPIC_PIX_STACK_SIZE]
Definition: g2meet.c:115
int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
Definition: bitstream.c:273
Scantable.
Definition: idctdsp.h:31
int size
Definition: avcodec.h:1483
int next_run_pos
Definition: g2meet.c:103
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
Definition: blockdsp.c:60
#define EPIC_HASH_SIZE
Definition: g2meet.c:94
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1780
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
const char * key
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
VLC dc_vlc[2]
Definition: g2meet.c:124
uint8_t permutated[64]
Definition: idctdsp.h:33
uint8_t run
Definition: svq3.c:206
#define R_shift
Definition: g2meet.c:390
#define src
Definition: vp8dsp.c:254
static int g2m_init_buffers(G2MContext *c)
Definition: g2meet.c:1173
int swapuv
Definition: g2meet.c:150
AVCodec.
Definition: avcodec.h:3494
int16_t block[6][64]
Definition: g2meet.c:126
MJPEG encoder and decoder.
Definition: vf_addroi.c:26
static void jpg_unescape(const uint8_t *src, int src_size, uint8_t *dst, int *dst_size)
Definition: g2meet.c:223
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
#define B_shift
Definition: g2meet.c:392
#define N
Definition: af_mcompand.c:54
int tile_width
Definition: g2meet.c:140
uint8_t rung
Definition: g2meet.c:86
int bpp
Definition: g2meet.c:138
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
void ff_mjpeg_build_huffman_codes(uint8_t *huff_size, uint16_t *huff_code, const uint8_t *bits_table, const uint8_t *val_table)
Definition: jpegtables.c:127
The exact code depends on how similar the blocks are and how related they are to the block
uint8_t
#define av_cold
Definition: attributes.h:82
int tile_x
Definition: g2meet.c:141
static int epic_cache_entries_for_pixel(const ePICPixHash *hash, uint32_t pix)
Definition: g2meet.c:468
float delta
static ePICPixHashElem * epic_hash_find(const ePICPixHash *hash, uint32_t key)
Definition: g2meet.c:412
int cursor_h
Definition: g2meet.c:157
static int epic_decode_component_pred(ePICContext *dc, int N, int W, int NW)
Definition: g2meet.c:511
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
static int is_pixel_on_stack(const ePICContext *dc, uint32_t pix)
Definition: g2meet.c:498
ePICPixHash hash
Definition: g2meet.c:116
const uint8_t avpriv_mjpeg_bits_dc_luminance[17]
Definition: jpegtables.c:65
int framebuf_stride
Definition: g2meet.c:146
struct ePICPixListElem * list
Definition: g2meet.c:91
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
Definition: mem.h:112
#define height
uint8_t N_ctx_rung[512]
Definition: g2meet.c:108
uint8_t * data
Definition: avcodec.h:1482
bitstream reader API header.
uint8_t * framebuf
Definition: g2meet.c:145
static void epic_free_pixel_cache(ePICPixHash *hash)
Definition: g2meet.c:479
int version
Definition: g2meet.c:135
ScanTable scantable
Definition: g2meet.c:122
#define G_shift
Definition: g2meet.c:391
static av_cold int g2m_decode_init(AVCodecContext *avctx)
Definition: g2meet.c:1602
uint8_t W_ctx_rung[256]
Definition: g2meet.c:107
#define FFALIGN(x, a)
Definition: macros.h:48
VLC ac_vlc[2]
Definition: g2meet.c:124
#define av_log(a,...)
static av_cold int jpg_init(AVCodecContext *avctx, JPGContext *c)
Definition: g2meet.c:182
uint8_t hash[HASH_SIZE]
Definition: movenc.c:57
#define U(x)
Definition: vp56_arith.h:37
int err
Definition: elsdec.h:40
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
uint8_t * buf
Definition: g2meet.c:128
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define R
Definition: huffyuvdsp.h:34
ElsUnsignedRung unsigned_rung
Definition: g2meet.c:104
static const uint16_t mask[17]
Definition: lzw.c:38
static int g2m_load_cursor(AVCodecContext *avctx, G2MContext *c, GetByteContext *gb)
Definition: g2meet.c:1217
static av_cold int g2m_decode_end(AVCodecContext *avctx)
Definition: g2meet.c:1622
ePICContext ec
Definition: g2meet.c:132
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:164
const uint8_t avpriv_mjpeg_bits_dc_chrominance[17]
Definition: jpegtables.c:70
void(* clear_blocks)(int16_t *blocks)
Definition: blockdsp.h:37
uint8_t runlen_zeroes[14]
Definition: g2meet.c:112
#define B
Definition: huffyuvdsp.h:32
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
int old_tile_h
Definition: g2meet.c:149
int cursor_stride
Definition: g2meet.c:155
int orig_height
Definition: g2meet.c:139
static int epic_jb_decode_tile(G2MContext *c, int tile_x, int tile_y, const uint8_t *src, size_t src_size, AVCodecContext *avctx)
Definition: g2meet.c:881
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:154
const char * name
Name of the codec implementation.
Definition: avcodec.h:3501
void ff_els_decoder_init(ElsDecCtx *ctx, const uint8_t *in, size_t data_size)
Definition: elsdec.c:247
static ePICPixHashElem * epic_hash_add(ePICPixHash *hash, uint32_t key)
Definition: g2meet.c:424
uint8_t bits
Definition: vp3data.h:202
int64_t max_pixels
The number of pixels per image to maximally accept.
Definition: avcodec.h:3297
int orig_width
Definition: g2meet.c:139
Definition: vlc.h:26
static int jpg_decode_block(JPGContext *c, GetBitContext *gb, int plane, int16_t *block)
Definition: g2meet.c:240
uint8_t prev_row_rung[14]
Definition: g2meet.c:111
uint8_t * cursor
Definition: g2meet.c:154
int prev_dc[3]
Definition: g2meet.c:125
static FFFrameBucket * bucket(FFFrameQueue *fq, size_t idx)
Definition: framequeue.c:25
Compression
Definition: g2meet.c:56
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:378
#define Y
Definition: boxblur.h:38
static int jpg_decode_data(JPGContext *c, int width, int height, const uint8_t *src, int src_size, uint8_t *dst, int dst_stride, const uint8_t *mask, int mask_stride, int num_mbs, int swapuv)
Definition: g2meet.c:287
#define FFMIN(a, b)
Definition: common.h:96
static int g2m_decode_frame(AVCodecContext *avctx, void *data, int *got_picture_ptr, AVPacket *avpkt)
Definition: g2meet.c:1388
uint8_t * synth_tile
Definition: g2meet.c:148
#define width
int width
picture width / height.
Definition: avcodec.h:1743
uint8_t w
Definition: llviddspenc.c:38
AVFormatContext * ctx
Definition: movenc.c:48
int cursor_y
Definition: g2meet.c:157
static int epic_predict_pixel2(ePICContext *dc, uint8_t *rung, uint32_t *pPix, uint32_t pix)
Definition: g2meet.c:747
static void g2m_paint_cursor(G2MContext *c, uint8_t *dst, int stride)
Definition: g2meet.c:1339
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:797
int tile_y
Definition: g2meet.c:141
uint8_t idct_permutation[64]
IDCT input permutation.
Definition: idctdsp.h:96
int old_tile_w
Definition: g2meet.c:149
uint8_t ne_pred_rung[256]
Definition: g2meet.c:110
if(ret)
static const float pred[4]
Definition: siprdata.h:259
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
ElsDecCtx els_ctx
Definition: g2meet.c:102
static uint32_t epic_decode_pixel_pred(ePICContext *dc, int x, int y, const uint32_t *curr_row, const uint32_t *above_row)
Definition: g2meet.c:518
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
#define TOSIGNED(val)
Definition: g2meet.c:509
#define EPIC_PIX_STACK_MAX
Definition: g2meet.c:45
AVCodec ff_g2m_decoder
Definition: g2meet.c:1640
int av_reallocp(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory through a pointer to a pointer.
Definition: mem.c:163
Libavcodec external API header.
static int kempf_restore_buf(const uint8_t *src, int len, uint8_t *dst, int stride, const uint8_t *jpeg_tile, int tile_stride, int width, int height, const uint8_t *pal, int npal, int tidx)
Definition: g2meet.c:1027
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
#define APPLY_ALPHA(src, new, alpha)
Definition: g2meet.c:1336
int tile_stride
Definition: g2meet.c:149
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
int bucket_size[EPIC_HASH_SIZE]
Definition: g2meet.c:97
static const int16_t alpha[]
Definition: ilbcdata.h:55
main external API structure.
Definition: avcodec.h:1570
static av_cold int build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int nb_codes, int is_ac)
Definition: g2meet.c:161
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff)*mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
Definition: get_bits.h:321
JPGContext jc
Definition: g2meet.c:133
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1968
void * buf
Definition: avisynth_c.h:766
const uint8_t avpriv_mjpeg_val_dc[12]
Definition: jpegtables.c:67
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
int compression
Definition: g2meet.c:137
static int djb2_hash(uint32_t key)
Definition: g2meet.c:395
int epic_buf_stride
Definition: g2meet.c:149
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
#define mid_pred
Definition: mathops.h:97
int cursor_w
Definition: g2meet.c:157
ePICPixHashElem * bucket[EPIC_HASH_SIZE]
Definition: g2meet.c:96
uint8_t runlen_one
Definition: g2meet.c:113
int cursor_hot_x
Definition: g2meet.c:158
const uint8_t avpriv_mjpeg_bits_ac_chrominance[17]
Definition: jpegtables.c:99
unsigned ff_els_decode_unsigned(ElsDecCtx *ctx, ElsUnsignedRung *ur)
Definition: elsdec.c:350
static const uint8_t chroma_quant[64]
Definition: g2meet.c:72
const uint8_t avpriv_mjpeg_val_ac_chrominance[]
Definition: jpegtables.c:102
static int epic_decode_from_cache(ePICContext *dc, uint32_t W, uint32_t *pPix)
Definition: g2meet.c:786
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
#define SIZE_SPECIFIER
Definition: internal.h:262
struct ePICPixListElem * next
Definition: g2meet.c:84
int bucket_fill[EPIC_HASH_SIZE]
Definition: g2meet.c:98
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
int tiles_x
Definition: g2meet.c:141
uint8_t W_flag_rung
Definition: g2meet.c:105
uint8_t * epic_buf_base
Definition: g2meet.c:148
static void epic_hash_init(ePICPixHash *hash)
Definition: g2meet.c:407
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
common internal api header.
uint32_t pix_id
Definition: g2meet.c:90
uint8_t N_flag_rung
Definition: g2meet.c:106
#define G
Definition: huffyuvdsp.h:33
#define flag(name)
Definition: cbs_av1.c:553
BlockDSPContext bdsp
Definition: g2meet.c:120
int cursor_x
Definition: g2meet.c:157
int av_image_check_size2(unsigned int w, unsigned int h, int64_t max_pixels, enum AVPixelFormat pix_fmt, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of a plane of an image with...
Definition: imgutils.c:253
IDCTDSPContext idsp
Definition: g2meet.c:121
static const uint8_t luma_quant[64]
Definition: g2meet.c:61
#define MKBETAG(a, b, c, d)
Definition: common.h:367
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:795
const uint8_t avpriv_mjpeg_bits_ac_luminance[17]
Definition: jpegtables.c:73
void * priv_data
Definition: avcodec.h:1597
int tile_height
Definition: g2meet.c:140
#define av_free(p)
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: idctdsp.c:29
int stack_pos
Definition: g2meet.c:114
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:238
int len
#define UPDATE_NEIGHBOURS(x)
Definition: g2meet.c:381
const uint8_t avpriv_mjpeg_val_ac_luminance[]
Definition: jpegtables.c:75
#define EPIC_PIX_STACK_SIZE
Definition: g2meet.c:44
VLC_TYPE(* table)[2]
code, bits
Definition: vlc.h:28
void ff_els_decoder_uninit(ElsUnsignedRung *rung)
Definition: elsdec.c:272
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:373
int ff_els_decode_bit(ElsDecCtx *ctx, uint8_t *rung)
Definition: elsdec.c:291
int old_width
Definition: g2meet.c:146
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:2261
FILE * out
Definition: movenc.c:54
static int epic_decode_run_length(ePICContext *dc, int x, int y, int tile_width, const uint32_t *curr_row, const uint32_t *above_row, const uint32_t *above2_row, uint32_t *pPix, int *pRun)
Definition: g2meet.c:612
#define av_freep(p)
#define stride
void(* idct)(int16_t *block)
Definition: idctdsp.h:65
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
uint8_t * jpeg_tile
Definition: g2meet.c:148
int old_height
Definition: g2meet.c:146
This structure stores compressed data.
Definition: avcodec.h:1459
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:359
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:986
for(j=16;j >0;--j)
Predicted.
Definition: avutil.h:275
#define V
Definition: avdct.c:30
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:191
static uint8_t tmp[11]
Definition: aes_ctr.c:26