FFmpeg
rasc.c
Go to the documentation of this file.
1 /*
2  * RemotelyAnywhere Screen Capture decoder
3  *
4  * Copyright (c) 2018 Paul B Mahol
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 
27 #include "libavutil/imgutils.h"
28 #include "libavutil/opt.h"
29 
30 #include "avcodec.h"
31 #include "bytestream.h"
32 #include "internal.h"
33 
34 #include <zlib.h>
35 
36 #define KBND MKTAG('K', 'B', 'N', 'D')
37 #define FINT MKTAG('F', 'I', 'N', 'T')
38 #define INIT MKTAG('I', 'N', 'I', 'T')
39 #define BNDL MKTAG('B', 'N', 'D', 'L')
40 #define KFRM MKTAG('K', 'F', 'R', 'M')
41 #define DLTA MKTAG('D', 'L', 'T', 'A')
42 #define MOUS MKTAG('M', 'O', 'U', 'S')
43 #define MPOS MKTAG('M', 'P', 'O', 'S')
44 #define MOVE MKTAG('M', 'O', 'V', 'E')
45 #define EMPT MKTAG('E', 'M', 'P', 'T')
46 
47 typedef struct RASCContext {
48  AVClass *class;
51  uint8_t *delta;
53  uint8_t *cursor;
55  unsigned cursor_w;
56  unsigned cursor_h;
57  unsigned cursor_x;
58  unsigned cursor_y;
59  int stride;
60  int bpp;
61  z_stream zstream;
65 } RASCContext;
66 
67 static void clear_plane(AVCodecContext *avctx, AVFrame *frame)
68 {
69  RASCContext *s = avctx->priv_data;
70  uint8_t *dst = frame->data[0];
71 
72  if (!dst)
73  return;
74 
75  for (int y = 0; y < avctx->height; y++) {
76  memset(dst, 0, avctx->width * s->bpp);
77  dst += frame->linesize[0];
78  }
79 }
80 
81 static void copy_plane(AVCodecContext *avctx, AVFrame *src, AVFrame *dst)
82 {
83  RASCContext *s = avctx->priv_data;
84  uint8_t *srcp = src->data[0];
85  uint8_t *dstp = dst->data[0];
86 
87  for (int y = 0; y < avctx->height; y++) {
88  memcpy(dstp, srcp, s->stride);
89  srcp += src->linesize[0];
90  dstp += dst->linesize[0];
91  }
92 }
93 
94 static int init_frames(AVCodecContext *avctx)
95 {
96  RASCContext *s = avctx->priv_data;
97  int ret;
98 
99  av_frame_unref(s->frame1);
100  av_frame_unref(s->frame2);
101  if ((ret = ff_get_buffer(avctx, s->frame1, 0)) < 0)
102  return ret;
103 
104  if ((ret = ff_get_buffer(avctx, s->frame2, 0)) < 0)
105  return ret;
106 
107  clear_plane(avctx, s->frame2);
108  clear_plane(avctx, s->frame1);
109 
110  return 0;
111 }
112 
113 static int decode_fint(AVCodecContext *avctx,
114  const AVPacket *avpkt, unsigned size)
115 {
116  RASCContext *s = avctx->priv_data;
117  GetByteContext *gb = &s->gb;
118  unsigned w, h, fmt;
119  int ret;
120 
121  if (bytestream2_peek_le32(gb) != 0x65) {
122  if (!s->frame2->data[0] || !s->frame1->data[0])
123  return AVERROR_INVALIDDATA;
124 
125  clear_plane(avctx, s->frame2);
126  clear_plane(avctx, s->frame1);
127  return 0;
128  }
129  if (bytestream2_get_bytes_left(gb) < 72)
130  return AVERROR_INVALIDDATA;
131 
132  bytestream2_skip(gb, 8);
133  w = bytestream2_get_le32(gb);
134  h = bytestream2_get_le32(gb);
135  bytestream2_skip(gb, 30);
136  fmt = bytestream2_get_le16(gb);
137  bytestream2_skip(gb, 24);
138 
139  switch (fmt) {
140  case 8: s->stride = FFALIGN(w, 4);
141  s->bpp = 1;
142  fmt = AV_PIX_FMT_PAL8; break;
143  case 16: s->stride = w * 2;
144  s->bpp = 2;
145  fmt = AV_PIX_FMT_RGB555LE; break;
146  case 32: s->stride = w * 4;
147  s->bpp = 4;
148  fmt = AV_PIX_FMT_BGR0; break;
149  default: return AVERROR_INVALIDDATA;
150  }
151 
152  ret = ff_set_dimensions(avctx, w, h);
153  if (ret < 0)
154  return ret;
155  avctx->width = w;
156  avctx->height = h;
157  avctx->pix_fmt = fmt;
158 
159  ret = init_frames(avctx);
160  if (ret < 0)
161  return ret;
162 
163  if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
164  uint32_t *pal = (uint32_t *)s->frame2->data[1];
165 
166  for (int i = 0; i < 256; i++)
167  pal[i] = bytestream2_get_le32(gb) | 0xFF000000u;
168  }
169 
170  return 0;
171 }
172 
173 static int decode_zlib(AVCodecContext *avctx, const AVPacket *avpkt,
174  unsigned size, unsigned uncompressed_size)
175 {
176  RASCContext *s = avctx->priv_data;
177  GetByteContext *gb = &s->gb;
178  int zret;
179 
180  zret = inflateReset(&s->zstream);
181  if (zret != Z_OK) {
182  av_log(avctx, AV_LOG_ERROR, "Inflate reset error: %d\n", zret);
183  return AVERROR_EXTERNAL;
184  }
185 
186  av_fast_padded_malloc(&s->delta, &s->delta_size, uncompressed_size);
187  if (!s->delta)
188  return AVERROR(ENOMEM);
189 
190  s->zstream.next_in = avpkt->data + bytestream2_tell(gb);
191  s->zstream.avail_in = FFMIN(size, bytestream2_get_bytes_left(gb));
192 
193  s->zstream.next_out = s->delta;
194  s->zstream.avail_out = s->delta_size;
195 
196  zret = inflate(&s->zstream, Z_FINISH);
197  if (zret != Z_STREAM_END) {
198  av_log(avctx, AV_LOG_ERROR,
199  "Inflate failed with return code: %d.\n", zret);
200  return AVERROR_INVALIDDATA;
201  }
202 
203  return 0;
204 }
205 
206 static int decode_move(AVCodecContext *avctx,
207  const AVPacket *avpkt, unsigned size)
208 {
209  RASCContext *s = avctx->priv_data;
210  GetByteContext *gb = &s->gb;
212  unsigned pos, compression, nb_moves;
213  unsigned uncompressed_size;
214  int ret;
215 
216  pos = bytestream2_tell(gb);
217  bytestream2_skip(gb, 8);
218  nb_moves = bytestream2_get_le32(gb);
219  bytestream2_skip(gb, 8);
220  compression = bytestream2_get_le32(gb);
221 
222  if (nb_moves > INT32_MAX / 16 || nb_moves > avctx->width * avctx->height)
223  return AVERROR_INVALIDDATA;
224 
225  uncompressed_size = 16 * nb_moves;
226 
227  if (compression == 1) {
228  ret = decode_zlib(avctx, avpkt,
229  size - (bytestream2_tell(gb) - pos),
230  uncompressed_size);
231  if (ret < 0)
232  return ret;
233  bytestream2_init(&mc, s->delta, uncompressed_size);
234  } else if (compression == 0) {
235  bytestream2_init(&mc, avpkt->data + bytestream2_tell(gb),
237  } else if (compression == 2) {
238  avpriv_request_sample(avctx, "compression %d", compression);
239  return AVERROR_PATCHWELCOME;
240  } else {
241  return AVERROR_INVALIDDATA;
242  }
243 
244  if (bytestream2_get_bytes_left(&mc) < uncompressed_size)
245  return AVERROR_INVALIDDATA;
246 
247  for (int i = 0; i < nb_moves; i++) {
248  int type, start_x, start_y, end_x, end_y, mov_x, mov_y;
249  uint8_t *e2, *b1, *b2;
250  int w, h;
251 
252  type = bytestream2_get_le16(&mc);
253  start_x = bytestream2_get_le16(&mc);
254  start_y = bytestream2_get_le16(&mc);
255  end_x = bytestream2_get_le16(&mc);
256  end_y = bytestream2_get_le16(&mc);
257  mov_x = bytestream2_get_le16(&mc);
258  mov_y = bytestream2_get_le16(&mc);
259  bytestream2_skip(&mc, 2);
260 
261  if (start_x >= avctx->width || start_y >= avctx->height ||
262  end_x >= avctx->width || end_y >= avctx->height ||
263  mov_x >= avctx->width || mov_y >= avctx->height) {
264  continue;
265  }
266 
267  if (start_x >= end_x || start_y >= end_y)
268  continue;
269 
270  w = end_x - start_x;
271  h = end_y - start_y;
272 
273  if (mov_x + w > avctx->width || mov_y + h > avctx->height)
274  continue;
275 
276  if (!s->frame2->data[0] || !s->frame1->data[0])
277  return AVERROR_INVALIDDATA;
278 
279  b1 = s->frame1->data[0] + s->frame1->linesize[0] * (start_y + h - 1) + start_x * s->bpp;
280  b2 = s->frame2->data[0] + s->frame2->linesize[0] * (start_y + h - 1) + start_x * s->bpp;
281  e2 = s->frame2->data[0] + s->frame2->linesize[0] * (mov_y + h - 1) + mov_x * s->bpp;
282 
283  if (type == 2) {
284  for (int j = 0; j < h; j++) {
285  memcpy(b1, b2, w * s->bpp);
286  b1 -= s->frame1->linesize[0];
287  b2 -= s->frame2->linesize[0];
288  }
289  } else if (type == 1) {
290  for (int j = 0; j < h; j++) {
291  memset(b2, 0, w * s->bpp);
292  b2 -= s->frame2->linesize[0];
293  }
294  } else if (type == 0) {
295  uint8_t *buffer;
296 
297  av_fast_padded_malloc(&s->delta, &s->delta_size, w * h * s->bpp);
298  buffer = s->delta;
299  if (!buffer)
300  return AVERROR(ENOMEM);
301 
302  for (int j = 0; j < h; j++) {
303  memcpy(buffer + j * w * s->bpp, e2, w * s->bpp);
304  e2 -= s->frame2->linesize[0];
305  }
306 
307  for (int j = 0; j < h; j++) {
308  memcpy(b2, buffer + j * w * s->bpp, w * s->bpp);
309  b2 -= s->frame2->linesize[0];
310  }
311  } else {
312  return AVERROR_INVALIDDATA;
313  }
314  }
315 
316  bytestream2_skip(gb, size - (bytestream2_tell(gb) - pos));
317 
318  return 0;
319 }
320 
321 #define NEXT_LINE \
322  if (cx >= w * s->bpp) { \
323  cx = 0; \
324  cy--; \
325  b1 -= s->frame1->linesize[0]; \
326  b2 -= s->frame2->linesize[0]; \
327  } \
328  len--;
329 
330 static int decode_dlta(AVCodecContext *avctx,
331  const AVPacket *avpkt, unsigned size)
332 {
333  RASCContext *s = avctx->priv_data;
334  GetByteContext *gb = &s->gb;
336  unsigned uncompressed_size, pos;
337  unsigned x, y, w, h;
338  int ret, cx, cy, compression;
339  uint8_t *b1, *b2;
340 
341  pos = bytestream2_tell(gb);
342  bytestream2_skip(gb, 12);
343  uncompressed_size = bytestream2_get_le32(gb);
344  x = bytestream2_get_le32(gb);
345  y = bytestream2_get_le32(gb);
346  w = bytestream2_get_le32(gb);
347  h = bytestream2_get_le32(gb);
348 
349  if (x >= avctx->width || y >= avctx->height ||
350  w > avctx->width || h > avctx->height)
351  return AVERROR_INVALIDDATA;
352 
353  if (x + w > avctx->width || y + h > avctx->height)
354  return AVERROR_INVALIDDATA;
355 
356  bytestream2_skip(gb, 4);
357  compression = bytestream2_get_le32(gb);
358 
359  if (compression == 1) {
360  if (w * h * s->bpp * 3 < uncompressed_size)
361  return AVERROR_INVALIDDATA;
362  ret = decode_zlib(avctx, avpkt, size, uncompressed_size);
363  if (ret < 0)
364  return ret;
365  bytestream2_init(&dc, s->delta, uncompressed_size);
366  } else if (compression == 0) {
367  if (bytestream2_get_bytes_left(gb) < uncompressed_size)
368  return AVERROR_INVALIDDATA;
369  bytestream2_init(&dc, avpkt->data + bytestream2_tell(gb),
370  uncompressed_size);
371  } else if (compression == 2) {
372  avpriv_request_sample(avctx, "compression %d", compression);
373  return AVERROR_PATCHWELCOME;
374  } else {
375  return AVERROR_INVALIDDATA;
376  }
377 
378  if (!s->frame2->data[0] || !s->frame1->data[0])
379  return AVERROR_INVALIDDATA;
380 
381  b1 = s->frame1->data[0] + s->frame1->linesize[0] * (y + h - 1) + x * s->bpp;
382  b2 = s->frame2->data[0] + s->frame2->linesize[0] * (y + h - 1) + x * s->bpp;
383  cx = 0, cy = h;
384  while (bytestream2_get_bytes_left(&dc) > 0) {
385  int type = bytestream2_get_byte(&dc);
386  int len = bytestream2_get_byte(&dc);
387  unsigned fill;
388 
389  switch (type) {
390  case 1:
391  while (len > 0 && cy > 0) {
392  cx++;
393  NEXT_LINE
394  }
395  break;
396  case 2:
397  while (len > 0 && cy > 0) {
398  int v0 = b1[cx];
399  int v1 = b2[cx];
400 
401  b2[cx] = v0;
402  b1[cx] = v1;
403  cx++;
404  NEXT_LINE
405  }
406  break;
407  case 3:
408  while (len > 0 && cy > 0) {
409  fill = bytestream2_get_byte(&dc);
410  b1[cx] = b2[cx];
411  b2[cx] = fill;
412  cx++;
413  NEXT_LINE
414  }
415  break;
416  case 4:
417  fill = bytestream2_get_byte(&dc);
418  while (len > 0 && cy > 0) {
419  AV_WL32(b1 + cx, AV_RL32(b2 + cx));
420  AV_WL32(b2 + cx, fill);
421  cx++;
422  NEXT_LINE
423  }
424  break;
425  case 7:
426  fill = bytestream2_get_le32(&dc);
427  while (len > 0 && cy > 0) {
428  AV_WL32(b1 + cx, AV_RL32(b2 + cx));
429  AV_WL32(b2 + cx, fill);
430  cx += 4;
431  NEXT_LINE
432  }
433  break;
434  case 10:
435  while (len > 0 && cy > 0) {
436  cx += 4;
437  NEXT_LINE
438  }
439  break;
440  case 12:
441  while (len > 0 && cy > 0) {
442  unsigned v0, v1;
443 
444  v0 = AV_RL32(b2 + cx);
445  v1 = AV_RL32(b1 + cx);
446  AV_WL32(b2 + cx, v1);
447  AV_WL32(b1 + cx, v0);
448  cx += 4;
449  NEXT_LINE
450  }
451  break;
452  case 13:
453  while (len > 0 && cy > 0) {
454  fill = bytestream2_get_le32(&dc);
455  AV_WL32(b1 + cx, AV_RL32(b2 + cx));
456  AV_WL32(b2 + cx, fill);
457  cx += 4;
458  NEXT_LINE
459  }
460  break;
461  default:
462  avpriv_request_sample(avctx, "runlen %d", type);
463  return AVERROR_INVALIDDATA;
464  }
465  }
466 
467  bytestream2_skip(gb, size - (bytestream2_tell(gb) - pos));
468 
469  return 0;
470 }
471 
472 static int decode_kfrm(AVCodecContext *avctx,
473  const AVPacket *avpkt, unsigned size)
474 {
475  RASCContext *s = avctx->priv_data;
476  GetByteContext *gb = &s->gb;
477  uint8_t *dst;
478  unsigned pos;
479  int zret, ret;
480 
481  pos = bytestream2_tell(gb);
482  if (bytestream2_peek_le32(gb) == 0x65) {
483  ret = decode_fint(avctx, avpkt, size);
484  if (ret < 0)
485  return ret;
486  }
487 
488  if (!s->frame2->data[0])
489  return AVERROR_INVALIDDATA;
490 
491  zret = inflateReset(&s->zstream);
492  if (zret != Z_OK) {
493  av_log(avctx, AV_LOG_ERROR, "Inflate reset error: %d\n", zret);
494  return AVERROR_EXTERNAL;
495  }
496 
497  s->zstream.next_in = avpkt->data + bytestream2_tell(gb);
498  s->zstream.avail_in = bytestream2_get_bytes_left(gb);
499 
500  dst = s->frame2->data[0] + (avctx->height - 1) * s->frame2->linesize[0];
501  for (int i = 0; i < avctx->height; i++) {
502  s->zstream.next_out = dst;
503  s->zstream.avail_out = s->stride;
504 
505  zret = inflate(&s->zstream, Z_SYNC_FLUSH);
506  if (zret != Z_OK && zret != Z_STREAM_END) {
507  av_log(avctx, AV_LOG_ERROR,
508  "Inflate failed with return code: %d.\n", zret);
509  return AVERROR_INVALIDDATA;
510  }
511 
512  dst -= s->frame2->linesize[0];
513  }
514 
515  dst = s->frame1->data[0] + (avctx->height - 1) * s->frame1->linesize[0];
516  for (int i = 0; i < avctx->height; i++) {
517  s->zstream.next_out = dst;
518  s->zstream.avail_out = s->stride;
519 
520  zret = inflate(&s->zstream, Z_SYNC_FLUSH);
521  if (zret != Z_OK && zret != Z_STREAM_END) {
522  av_log(avctx, AV_LOG_ERROR,
523  "Inflate failed with return code: %d.\n", zret);
524  return AVERROR_INVALIDDATA;
525  }
526 
527  dst -= s->frame1->linesize[0];
528  }
529 
530  bytestream2_skip(gb, size - (bytestream2_tell(gb) - pos));
531 
532  return 0;
533 }
534 
535 static int decode_mous(AVCodecContext *avctx,
536  const AVPacket *avpkt, unsigned size)
537 {
538  RASCContext *s = avctx->priv_data;
539  GetByteContext *gb = &s->gb;
540  unsigned w, h, pos, uncompressed_size;
541  int ret;
542 
543  pos = bytestream2_tell(gb);
544  bytestream2_skip(gb, 8);
545  w = bytestream2_get_le32(gb);
546  h = bytestream2_get_le32(gb);
547  bytestream2_skip(gb, 12);
548  uncompressed_size = bytestream2_get_le32(gb);
549 
550  if (w > avctx->width || h > avctx->height)
551  return AVERROR_INVALIDDATA;
552 
553  if (uncompressed_size != 3 * w * h)
554  return AVERROR_INVALIDDATA;
555 
556  av_fast_padded_malloc(&s->cursor, &s->cursor_size, uncompressed_size);
557  if (!s->cursor)
558  return AVERROR(ENOMEM);
559 
560  ret = decode_zlib(avctx, avpkt,
561  size - (bytestream2_tell(gb) - pos),
562  uncompressed_size);
563  if (ret < 0)
564  return ret;
565  memcpy(s->cursor, s->delta, uncompressed_size);
566 
567  bytestream2_skip(gb, size - (bytestream2_tell(gb) - pos));
568 
569  s->cursor_w = w;
570  s->cursor_h = h;
571 
572  return 0;
573 }
574 
575 static int decode_mpos(AVCodecContext *avctx,
576  const AVPacket *avpkt, unsigned size)
577 {
578  RASCContext *s = avctx->priv_data;
579  GetByteContext *gb = &s->gb;
580  unsigned pos;
581 
582  pos = bytestream2_tell(gb);
583  bytestream2_skip(gb, 8);
584  s->cursor_x = bytestream2_get_le32(gb);
585  s->cursor_y = bytestream2_get_le32(gb);
586 
587  bytestream2_skip(gb, size - (bytestream2_tell(gb) - pos));
588 
589  return 0;
590 }
591 
592 static void draw_cursor(AVCodecContext *avctx)
593 {
594  RASCContext *s = avctx->priv_data;
595  uint8_t *dst, *pal;
596 
597  if (!s->cursor)
598  return;
599 
600  if (s->cursor_x >= avctx->width || s->cursor_y >= avctx->height)
601  return;
602 
603  if (s->cursor_x + s->cursor_w > avctx->width ||
604  s->cursor_y + s->cursor_h > avctx->height)
605  return;
606 
607  if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
608  pal = s->frame->data[1];
609  for (int i = 0; i < s->cursor_h; i++) {
610  for (int j = 0; j < s->cursor_w; j++) {
611  int cr = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 0];
612  int cg = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 1];
613  int cb = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 2];
614  int best = INT_MAX;
615  int index = 0;
616  int dist;
617 
618  if (cr == s->cursor[0] && cg == s->cursor[1] && cb == s->cursor[2])
619  continue;
620 
621  dst = s->frame->data[0] + s->frame->linesize[0] * (s->cursor_y + i) + (s->cursor_x + j);
622  for (int k = 0; k < 256; k++) {
623  int pr = pal[k * 4 + 0];
624  int pg = pal[k * 4 + 1];
625  int pb = pal[k * 4 + 2];
626 
627  dist = FFABS(cr - pr) + FFABS(cg - pg) + FFABS(cb - pb);
628  if (dist < best) {
629  best = dist;
630  index = k;
631  }
632  }
633  dst[0] = index;
634  }
635  }
636  } else if (avctx->pix_fmt == AV_PIX_FMT_RGB555LE) {
637  for (int i = 0; i < s->cursor_h; i++) {
638  for (int j = 0; j < s->cursor_w; j++) {
639  int cr = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 0];
640  int cg = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 1];
641  int cb = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 2];
642 
643  if (cr == s->cursor[0] && cg == s->cursor[1] && cb == s->cursor[2])
644  continue;
645 
646  cr >>= 3; cg >>=3; cb >>= 3;
647  dst = s->frame->data[0] + s->frame->linesize[0] * (s->cursor_y + i) + 2 * (s->cursor_x + j);
648  AV_WL16(dst, cr | cg << 5 | cb << 10);
649  }
650  }
651  } else if (avctx->pix_fmt == AV_PIX_FMT_BGR0) {
652  for (int i = 0; i < s->cursor_h; i++) {
653  for (int j = 0; j < s->cursor_w; j++) {
654  int cr = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 0];
655  int cg = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 1];
656  int cb = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 2];
657 
658  if (cr == s->cursor[0] && cg == s->cursor[1] && cb == s->cursor[2])
659  continue;
660 
661  dst = s->frame->data[0] + s->frame->linesize[0] * (s->cursor_y + i) + 4 * (s->cursor_x + j);
662  dst[0] = cb;
663  dst[1] = cg;
664  dst[2] = cr;
665  }
666  }
667  }
668 }
669 
670 static int decode_frame(AVCodecContext *avctx,
671  void *data, int *got_frame,
672  AVPacket *avpkt)
673 {
674  RASCContext *s = avctx->priv_data;
675  GetByteContext *gb = &s->gb;
676  int ret, intra = 0;
677  AVFrame *frame = data;
678 
679  bytestream2_init(gb, avpkt->data, avpkt->size);
680 
681  if (bytestream2_peek_le32(gb) == EMPT)
682  return avpkt->size;
683 
684  s->frame = frame;
685 
686  while (bytestream2_get_bytes_left(gb) > 0) {
687  unsigned type, size = 0;
688 
689  if (bytestream2_get_bytes_left(gb) < 8)
690  return AVERROR_INVALIDDATA;
691 
692  type = bytestream2_get_le32(gb);
693  if (type == KBND || type == BNDL) {
694  intra = type == KBND;
695  type = bytestream2_get_le32(gb);
696  }
697 
698  size = bytestream2_get_le32(gb);
700  return AVERROR_INVALIDDATA;
701 
702  switch (type) {
703  case FINT:
704  case INIT:
705  ret = decode_fint(avctx, avpkt, size);
706  break;
707  case KFRM:
708  ret = decode_kfrm(avctx, avpkt, size);
709  break;
710  case DLTA:
711  ret = decode_dlta(avctx, avpkt, size);
712  break;
713  case MOVE:
714  ret = decode_move(avctx, avpkt, size);
715  break;
716  case MOUS:
717  ret = decode_mous(avctx, avpkt, size);
718  break;
719  case MPOS:
720  ret = decode_mpos(avctx, avpkt, size);
721  break;
722  default:
723  bytestream2_skip(gb, size);
724  ret = 0;
725  }
726 
727  if (ret < 0)
728  return ret;
729  }
730 
731  if (!s->frame2->data[0] || !s->frame1->data[0])
732  return AVERROR_INVALIDDATA;
733 
734  if ((ret = ff_get_buffer(avctx, s->frame, 0)) < 0)
735  return ret;
736 
737  copy_plane(avctx, s->frame2, s->frame);
738  if (avctx->pix_fmt == AV_PIX_FMT_PAL8)
739  memcpy(s->frame->data[1], s->frame2->data[1], 1024);
740  if (!s->skip_cursor)
741  draw_cursor(avctx);
742 
743  s->frame->key_frame = intra;
744  s->frame->pict_type = intra ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
745 
746  *got_frame = 1;
747 
748  return avpkt->size;
749 }
750 
752 {
753  RASCContext *s = avctx->priv_data;
754  int zret;
755 
756  s->zstream.zalloc = Z_NULL;
757  s->zstream.zfree = Z_NULL;
758  s->zstream.opaque = Z_NULL;
759  zret = inflateInit(&s->zstream);
760  if (zret != Z_OK) {
761  av_log(avctx, AV_LOG_ERROR, "Inflate init error: %d\n", zret);
762  return AVERROR_EXTERNAL;
763  }
764 
765  s->frame1 = av_frame_alloc();
766  s->frame2 = av_frame_alloc();
767  if (!s->frame1 || !s->frame2)
768  return AVERROR(ENOMEM);
769 
770  return 0;
771 }
772 
774 {
775  RASCContext *s = avctx->priv_data;
776 
777  av_freep(&s->cursor);
778  s->cursor_size = 0;
779  av_freep(&s->delta);
780  s->delta_size = 0;
781  av_frame_free(&s->frame1);
782  av_frame_free(&s->frame2);
783  inflateEnd(&s->zstream);
784 
785  return 0;
786 }
787 
788 static void decode_flush(AVCodecContext *avctx)
789 {
790  RASCContext *s = avctx->priv_data;
791 
792  clear_plane(avctx, s->frame1);
793  clear_plane(avctx, s->frame2);
794 }
795 
796 static const AVOption options[] = {
797 { "skip_cursor", "skip the cursor", offsetof(RASCContext, skip_cursor), AV_OPT_TYPE_BOOL, {.i64 = 0 }, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
798 { NULL },
799 };
800 
801 static const AVClass rasc_decoder_class = {
802  .class_name = "rasc decoder",
803  .item_name = av_default_item_name,
804  .option = options,
805  .version = LIBAVUTIL_VERSION_INT,
806 };
807 
809  .name = "rasc",
810  .long_name = NULL_IF_CONFIG_SMALL("RemotelyAnywhere Screen Capture"),
811  .type = AVMEDIA_TYPE_VIDEO,
812  .id = AV_CODEC_ID_RASC,
813  .priv_data_size = sizeof(RASCContext),
814  .init = decode_init,
815  .close = decode_close,
816  .decode = decode_frame,
817  .flush = decode_flush,
818  .capabilities = AV_CODEC_CAP_DR1,
819  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
821  .priv_class = &rasc_decoder_class,
822 };
RASCContext
Definition: rasc.c:47
AVCodec
AVCodec.
Definition: codec.h:202
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:42
init_frames
static int init_frames(AVCodecContext *avctx)
Definition: rasc.c:94
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
BNDL
#define BNDL
Definition: rasc.c:39
opt.h
AV_OPT_FLAG_VIDEO_PARAM
#define AV_OPT_FLAG_VIDEO_PARAM
Definition: opt.h:280
AV_WL32
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
cb
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:215
GetByteContext
Definition: bytestream.h:33
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:264
FINT
#define FINT
Definition: rasc.c:37
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:112
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
index
fg index
Definition: ffmpeg_filter.c:168
RASCContext::delta_size
int delta_size
Definition: rasc.c:52
w
uint8_t w
Definition: llviddspenc.c:38
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:373
AVOption
AVOption.
Definition: opt.h:247
data
const char data[16]
Definition: mxf.c:143
RASCContext::frame
AVFrame * frame
Definition: rasc.c:62
options
static const AVOption options[]
Definition: rasc.c:796
EMPT
#define EMPT
Definition: rasc.c:45
decode_fint
static int decode_fint(AVCodecContext *avctx, const AVPacket *avpkt, unsigned size)
Definition: rasc.c:113
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:317
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
decode_dlta
static int decode_dlta(AVCodecContext *avctx, const AVPacket *avpkt, unsigned size)
Definition: rasc.c:330
b1
static double b1(void *priv, double x, double y)
Definition: vf_xfade.c:1703
v0
#define v0
Definition: regdef.h:26
ff_rasc_decoder
const AVCodec ff_rasc_decoder
Definition: rasc.c:808
inflate
static void inflate(uint8_t *dst, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
Definition: vf_neighbor.c:193
NEXT_LINE
#define NEXT_LINE
Definition: rasc.c:321
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
copy_plane
static void copy_plane(AVCodecContext *avctx, AVFrame *src, AVFrame *dst)
Definition: rasc.c:81
RASCContext::frame2
AVFrame * frame2
Definition: rasc.c:64
RASCContext::cursor_x
unsigned cursor_x
Definition: rasc.c:57
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:99
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
RASCContext::stride
int stride
Definition: rasc.c:59
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
rasc_decoder_class
static const AVClass rasc_decoder_class
Definition: rasc.c:801
s
#define s(width, name)
Definition: cbs_vp9.c:257
RASCContext::cursor_h
unsigned cursor_h
Definition: rasc.c:56
decode_kfrm
static int decode_kfrm(AVCodecContext *avctx, const AVPacket *avpkt, unsigned size)
Definition: rasc.c:472
decode_flush
static void decode_flush(AVCodecContext *avctx)
Definition: rasc.c:788
RASCContext::zstream
z_stream zstream
Definition: rasc.c:61
draw_cursor
static void draw_cursor(AVCodecContext *avctx)
Definition: rasc.c:592
decode_move
static int decode_move(AVCodecContext *avctx, const AVPacket *avpkt, unsigned size)
Definition: rasc.c:206
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:65
if
if(ret)
Definition: filter_design.txt:179
decode_frame
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: rasc.c:670
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
flush
static void flush(AVCodecContext *avctx)
Definition: aacdec_template.c:593
NULL
#define NULL
Definition: coverity.c:32
INIT
#define INIT
Definition: rasc.c:38
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
decode_mous
static int decode_mous(AVCodecContext *avctx, const AVPacket *avpkt, unsigned size)
Definition: rasc.c:535
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
src
#define src
Definition: vp8dsp.c:255
AV_PIX_FMT_BGR0
@ AV_PIX_FMT_BGR0
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
Definition: pixfmt.h:230
MOVE
#define MOVE
Definition: rasc.c:44
RASCContext::cursor_y
unsigned cursor_y
Definition: rasc.c:58
RASCContext::skip_cursor
int skip_cursor
Definition: rasc.c:49
KBND
#define KBND
Definition: rasc.c:36
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
DLTA
#define DLTA
Definition: rasc.c:41
decode_zlib
static int decode_zlib(AVCodecContext *avctx, const AVPacket *avpkt, unsigned size, unsigned uncompressed_size)
Definition: rasc.c:173
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1652
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
RASCContext::cursor_size
int cursor_size
Definition: rasc.c:54
AVPacket::size
int size
Definition: packet.h:374
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
AV_CODEC_ID_RASC
@ AV_CODEC_ID_RASC
Definition: codec_id.h:290
size
int size
Definition: twinvq_data.h:10344
RASCContext::cursor
uint8_t * cursor
Definition: rasc.c:53
RASCContext::cursor_w
unsigned cursor_w
Definition: rasc.c:55
RASCContext::bpp
int bpp
Definition: rasc.c:60
AV_WL16
#define AV_WL16(p, v)
Definition: intreadwrite.h:412
b2
static double b2(void *priv, double x, double y)
Definition: vf_xfade.c:1704
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:59
KFRM
#define KFRM
Definition: rasc.c:40
AV_PIX_FMT_RGB555LE
@ AV_PIX_FMT_RGB555LE
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined
Definition: pixfmt.h:108
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:50
decode_mpos
static int decode_mpos(AVCodecContext *avctx, const AVPacket *avpkt, unsigned size)
Definition: rasc.c:575
decode_init
static av_cold int decode_init(AVCodecContext *avctx)
Definition: rasc.c:751
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:50
AV_OPT_FLAG_DECODING_PARAM
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:278
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:437
MPOS
#define MPOS
Definition: rasc.c:43
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:209
len
int len
Definition: vorbis_enc_data.h:426
AVCodecContext::height
int height
Definition: avcodec.h:556
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:593
avcodec.h
decode_close
static av_cold int decode_close(AVCodecContext *avctx)
Definition: rasc.c:773
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
ret
ret
Definition: filter_design.txt:187
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
pos
unsigned int pos
Definition: spdifenc.c:412
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
AVCodecContext
main external API structure.
Definition: avcodec.h:383
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
clear_plane
static void clear_plane(AVCodecContext *avctx, AVFrame *frame)
Definition: rasc.c:67
MOUS
#define MOUS
Definition: rasc.c:42
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:86
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:37
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AVPacket
This structure stores compressed data.
Definition: packet.h:350
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:410
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:241
cr
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:216
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
RASCContext::gb
GetByteContext gb
Definition: rasc.c:50
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:556
bytestream.h
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:334
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
RASCContext::delta
uint8_t * delta
Definition: rasc.c:51
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
h
h
Definition: vp9dsp_template.c:2038
RASCContext::frame1
AVFrame * frame1
Definition: rasc.c:63
mc
#define mc
Definition: vf_colormatrix.c:102