FFmpeg
smcenc.c
Go to the documentation of this file.
1 /*
2  * QuickTime Graphics (SMC) Video Encoder
3  * Copyright (c) 2021 The FFmpeg project
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file smcenc.c
24  * QT SMC Video Encoder by Paul B. Mahol
25  */
26 
27 #include "libavutil/common.h"
28 
29 #include "avcodec.h"
30 #include "encode.h"
31 #include "internal.h"
32 #include "bytestream.h"
33 
34 #define CPAIR 2
35 #define CQUAD 4
36 #define COCTET 8
37 
38 #define COLORS_PER_TABLE 256
39 
40 typedef struct SMCContext {
41  AVFrame *prev_frame; // buffer for previous source frame
42 
43  uint8_t mono_value;
46  uint8_t distinct_values[16];
47  uint8_t next_distinct_values[16];
48 
52 
53  int key_frame;
54 } SMCContext;
55 
56 #define ADVANCE_BLOCK(pixel_ptr, row_ptr, nb_blocks) \
57 { \
58  for (int block = 0; block < nb_blocks && pixel_ptr && row_ptr; block++) { \
59  pixel_ptr += 4; \
60  if (pixel_ptr - row_ptr >= width) \
61  { \
62  row_ptr += stride * 4; \
63  pixel_ptr = row_ptr; \
64  } \
65  } \
66 }
67 
68 static int smc_cmp_values(const void *a, const void *b)
69 {
70  const uint8_t *aa = a, *bb = b;
71 
72  return FFDIFFSIGN(aa[0], bb[0]);
73 }
74 
75 static int count_distinct_items(const uint8_t *block_values,
76  uint8_t *distinct_values,
77  int size)
78 {
79  int n = 1;
80 
81  distinct_values[0] = block_values[0];
82  for (int i = 1; i < size; i++) {
83  if (block_values[i] != block_values[i-1]) {
84  distinct_values[n] = block_values[i];
85  n++;
86  }
87  }
88 
89  return n;
90 }
91 
92 #define CACHE_PAIR(x) \
93  (s->color_pairs[i][0] == distinct_values[x] || \
94  s->color_pairs[i][1] == distinct_values[x])
95 
96 #define CACHE_QUAD(x) \
97  (s->color_quads[i][0] == distinct_values[x] || \
98  s->color_quads[i][1] == distinct_values[x] || \
99  s->color_quads[i][2] == distinct_values[x] || \
100  s->color_quads[i][3] == distinct_values[x])
101 
102 #define CACHE_OCTET(x) \
103  (s->color_octets[i][0] == distinct_values[x] || \
104  s->color_octets[i][1] == distinct_values[x] || \
105  s->color_octets[i][2] == distinct_values[x] || \
106  s->color_octets[i][3] == distinct_values[x] || \
107  s->color_octets[i][4] == distinct_values[x] || \
108  s->color_octets[i][5] == distinct_values[x] || \
109  s->color_octets[i][6] == distinct_values[x] || \
110  s->color_octets[i][7] == distinct_values[x])
111 
113  PutByteContext *pb)
114 {
115  const uint8_t *src_pixels = (const uint8_t *)frame->data[0];
116  const int stride = frame->linesize[0];
117  const uint8_t *prev_pixels = (const uint8_t *)s->prev_frame->data[0];
118  uint8_t *distinct_values = s->distinct_values;
119  const uint8_t *pixel_ptr, *row_ptr;
120  const int width = frame->width;
121  uint8_t block_values[16];
122  int block_counter = 0;
123  int color_pair_index = 0;
124  int color_quad_index = 0;
125  int color_octet_index = 0;
126  int color_table_index; /* indexes to color pair, quad, or octet tables */
127  int total_blocks;
128 
129  memset(s->color_pairs, 0, sizeof(s->color_pairs));
130  memset(s->color_quads, 0, sizeof(s->color_quads));
131  memset(s->color_octets, 0, sizeof(s->color_octets));
132 
133  /* Number of 4x4 blocks in frame. */
134  total_blocks = ((frame->width + 3) / 4) * ((frame->height + 3) / 4);
135 
136  pixel_ptr = row_ptr = src_pixels;
137 
138  while (block_counter < total_blocks) {
139  const uint8_t *xpixel_ptr = pixel_ptr;
140  const uint8_t *xrow_ptr = row_ptr;
141  int intra_skip_blocks = 0;
142  int inter_skip_blocks = 0;
143  int coded_distinct = 0;
144  int coded_blocks = 0;
145  int cache_index;
146  int distinct = 0;
147  int blocks = 0;
148 
149  while (prev_pixels && s->key_frame == 0 && block_counter + inter_skip_blocks < total_blocks) {
150  int compare = 0;
151 
152  for (int y = 0; y < 4; y++) {
153  const ptrdiff_t offset = pixel_ptr - src_pixels;
154  const uint8_t *prev_pixel_ptr = prev_pixels + offset;
155 
156  compare |= memcmp(prev_pixel_ptr + y * stride, pixel_ptr + y * stride, 4);
157  if (compare)
158  break;
159  }
160 
161  if (compare)
162  break;
163 
164  if (inter_skip_blocks >= 256)
165  break;
166  inter_skip_blocks++;
167 
168  ADVANCE_BLOCK(pixel_ptr, row_ptr, 1)
169  }
170 
171  pixel_ptr = xpixel_ptr;
172  row_ptr = xrow_ptr;
173 
174  while (block_counter > 0 && block_counter + intra_skip_blocks < total_blocks) {
175  const ptrdiff_t offset = pixel_ptr - src_pixels;
176  const int sy = offset / stride;
177  const int sx = offset % stride;
178  const int ny = sx < 4 ? sy - 4 : sy;
179  const int nx = sx < 4 ? width - 4 : sx - 4;
180  const uint8_t *old_pixel_ptr = src_pixels + nx + ny * stride;
181  int compare = 0;
182 
183  for (int y = 0; y < 4; y++) {
184  compare |= memcmp(old_pixel_ptr + y * stride, pixel_ptr + y * stride, 4);
185  if (compare)
186  break;
187  }
188 
189  if (compare)
190  break;
191 
192  if (intra_skip_blocks >= 256)
193  break;
194  intra_skip_blocks++;
195  ADVANCE_BLOCK(pixel_ptr, row_ptr, 1)
196  }
197 
198  pixel_ptr = xpixel_ptr;
199  row_ptr = xrow_ptr;
200 
201  while (block_counter + coded_blocks < total_blocks && coded_blocks < 256) {
202  for (int y = 0; y < 4; y++)
203  memcpy(block_values + y * 4, pixel_ptr + y * stride, 4);
204 
205  qsort(block_values, 16, sizeof(block_values[0]), smc_cmp_values);
206  s->next_nb_distinct = count_distinct_items(block_values, s->next_distinct_values, 16);
207  if (coded_blocks == 0) {
208  memcpy(distinct_values, s->next_distinct_values, sizeof(s->distinct_values));
209  s->nb_distinct = s->next_nb_distinct;
210  } else {
211  if (s->next_nb_distinct != s->nb_distinct ||
212  memcmp(distinct_values, s->next_distinct_values, s->nb_distinct)) {
213  break;
214  }
215  }
216  s->mono_value = block_values[0];
217 
218  coded_distinct = s->nb_distinct;
219  ADVANCE_BLOCK(pixel_ptr, row_ptr, 1)
220  coded_blocks++;
221  if (coded_distinct > 1 && coded_blocks >= 16)
222  break;
223  }
224 
225  pixel_ptr = xpixel_ptr;
226  row_ptr = xrow_ptr;
227 
228  blocks = coded_blocks;
229  distinct = coded_distinct;
230 
231  if (intra_skip_blocks > 0 && intra_skip_blocks >= inter_skip_blocks &&
232  intra_skip_blocks > 0) {
233  distinct = 17;
234  blocks = intra_skip_blocks;
235  }
236 
237  if (intra_skip_blocks > 16 && intra_skip_blocks >= inter_skip_blocks &&
238  intra_skip_blocks > 0) {
239  distinct = 18;
240  blocks = intra_skip_blocks;
241  }
242 
243  if (inter_skip_blocks > 0 && inter_skip_blocks > intra_skip_blocks &&
244  inter_skip_blocks > 0) {
245  distinct = 19;
246  blocks = inter_skip_blocks;
247  }
248 
249  if (inter_skip_blocks > 16 && inter_skip_blocks > intra_skip_blocks &&
250  inter_skip_blocks > 0) {
251  distinct = 20;
252  blocks = inter_skip_blocks;
253  }
254 
255  switch (distinct) {
256  case 1:
257  if (blocks <= 16) {
258  bytestream2_put_byte(pb, 0x60 | (blocks - 1));
259  } else {
260  bytestream2_put_byte(pb, 0x70);
261  bytestream2_put_byte(pb, blocks - 1);
262  }
263  bytestream2_put_byte(pb, s->mono_value);
264  ADVANCE_BLOCK(pixel_ptr, row_ptr, blocks)
265  break;
266  case 2:
267  cache_index = -1;
268  for (int i = 0; i < COLORS_PER_TABLE; i++) {
269  if (CACHE_PAIR(0) &&
270  CACHE_PAIR(1)) {
271  cache_index = i;
272  break;
273  }
274  }
275 
276  if (cache_index >= 0) {
277  bytestream2_put_byte(pb, 0x90 | (blocks - 1));
278  bytestream2_put_byte(pb, cache_index);
279  color_table_index = cache_index;
280  } else {
281  bytestream2_put_byte(pb, 0x80 | (blocks - 1));
282 
283  color_table_index = color_pair_index;
284  for (int i = 0; i < CPAIR; i++) {
285  s->color_pairs[color_table_index][i] = distinct_values[i];
286  bytestream2_put_byte(pb, distinct_values[i]);
287  }
288 
289  color_pair_index++;
290  if (color_pair_index == COLORS_PER_TABLE)
291  color_pair_index = 0;
292  }
293 
294  for (int i = 0; i < blocks; i++) {
295  uint8_t value = s->color_pairs[color_table_index][1];
296  uint16_t flags = 0;
297  int shift = 15;
298 
299  for (int y = 0; y < 4; y++) {
300  for (int x = 0; x < 4; x++) {
301  flags |= (value == pixel_ptr[x + y * stride]) << shift;
302  shift--;
303  }
304  }
305 
306  bytestream2_put_be16(pb, flags);
307 
308  ADVANCE_BLOCK(pixel_ptr, row_ptr, 1)
309  }
310  break;
311  case 3:
312  case 4:
313  cache_index = -1;
314  for (int i = 0; i < COLORS_PER_TABLE; i++) {
315  if (CACHE_QUAD(0) &&
316  CACHE_QUAD(1) &&
317  CACHE_QUAD(2) &&
318  CACHE_QUAD(3)) {
319  cache_index = i;
320  break;
321  }
322  }
323 
324  if (cache_index >= 0) {
325  bytestream2_put_byte(pb, 0xB0 | (blocks - 1));
326  bytestream2_put_byte(pb, cache_index);
327  color_table_index = cache_index;
328  } else {
329  bytestream2_put_byte(pb, 0xA0 | (blocks - 1));
330 
331  color_table_index = color_quad_index;
332  for (int i = 0; i < CQUAD; i++) {
333  s->color_quads[color_table_index][i] = distinct_values[i];
334  bytestream2_put_byte(pb, distinct_values[i]);
335  }
336 
337  color_quad_index++;
338  if (color_quad_index == COLORS_PER_TABLE)
339  color_quad_index = 0;
340  }
341 
342  for (int i = 0; i < blocks; i++) {
343  uint32_t flags = 0;
344  uint8_t quad[4];
345  int shift = 30;
346 
347  for (int k = 0; k < 4; k++)
348  quad[k] = s->color_quads[color_table_index][k];
349 
350  for (int y = 0; y < 4; y++) {
351  for (int x = 0; x < 4; x++) {
352  int pixel = pixel_ptr[x + y * stride];
353  uint32_t idx = 0;
354 
355  for (int w = 0; w < CQUAD; w++) {
356  if (quad[w] == pixel) {
357  idx = w;
358  break;
359  }
360  }
361 
362  flags |= idx << shift;
363  shift -= 2;
364  }
365  }
366 
367  bytestream2_put_be32(pb, flags);
368 
369  ADVANCE_BLOCK(pixel_ptr, row_ptr, 1)
370  }
371  break;
372  case 5:
373  case 6:
374  case 7:
375  case 8:
376  cache_index = -1;
377  for (int i = 0; i < COLORS_PER_TABLE; i++) {
378  if (CACHE_OCTET(0) &&
379  CACHE_OCTET(1) &&
380  CACHE_OCTET(2) &&
381  CACHE_OCTET(3) &&
382  CACHE_OCTET(4) &&
383  CACHE_OCTET(5) &&
384  CACHE_OCTET(6) &&
385  CACHE_OCTET(7)) {
386  cache_index = i;
387  break;
388  }
389  }
390 
391  if (cache_index >= 0) {
392  bytestream2_put_byte(pb, 0xD0 | (blocks - 1));
393  bytestream2_put_byte(pb, cache_index);
394  color_table_index = cache_index;
395  } else {
396  bytestream2_put_byte(pb, 0xC0 | (blocks - 1));
397 
398  color_table_index = color_octet_index;
399  for (int i = 0; i < COCTET; i++) {
400  s->color_octets[color_table_index][i] = distinct_values[i];
401  bytestream2_put_byte(pb, distinct_values[i]);
402  }
403 
404  color_octet_index++;
405  if (color_octet_index == COLORS_PER_TABLE)
406  color_octet_index = 0;
407  }
408 
409  for (int i = 0; i < blocks; i++) {
410  uint64_t flags = 0;
411  uint8_t octet[8];
412  int shift = 45;
413 
414  for (int k = 0; k < 8; k++)
415  octet[k] = s->color_octets[color_table_index][k];
416 
417  for (int y = 0; y < 4; y++) {
418  for (int x = 0; x < 4; x++) {
419  int pixel = pixel_ptr[x + y * stride];
420  uint64_t idx = 0;
421 
422  for (int w = 0; w < COCTET; w++) {
423  if (octet[w] == pixel) {
424  idx = w;
425  break;
426  }
427  }
428 
429  flags |= idx << shift;
430  shift -= 3;
431  }
432  }
433 
434  bytestream2_put_be16(pb, ((flags >> 32) & 0xFFF0) | ((flags >> 8) & 0xF));
435  bytestream2_put_be16(pb, ((flags >> 20) & 0xFFF0) | ((flags >> 4) & 0xF));
436  bytestream2_put_be16(pb, ((flags >> 8) & 0xFFF0) | ((flags >> 0) & 0xF));
437 
438  ADVANCE_BLOCK(pixel_ptr, row_ptr, 1)
439  }
440  break;
441  default:
442  bytestream2_put_byte(pb, 0xE0 | (blocks - 1));
443  for (int i = 0; i < blocks; i++) {
444  for (int y = 0; y < 4; y++) {
445  for (int x = 0; x < 4; x++)
446  bytestream2_put_byte(pb, pixel_ptr[x + y * stride]);
447  }
448 
449  ADVANCE_BLOCK(pixel_ptr, row_ptr, 1)
450  }
451  break;
452  case 17:
453  bytestream2_put_byte(pb, 0x20 | (blocks - 1));
454  ADVANCE_BLOCK(pixel_ptr, row_ptr, blocks)
455  break;
456  case 18:
457  bytestream2_put_byte(pb, 0x30);
458  bytestream2_put_byte(pb, blocks - 1);
459  ADVANCE_BLOCK(pixel_ptr, row_ptr, blocks)
460  break;
461  case 19:
462  bytestream2_put_byte(pb, 0x00 | (blocks - 1));
463  ADVANCE_BLOCK(pixel_ptr, row_ptr, blocks)
464  break;
465  case 20:
466  bytestream2_put_byte(pb, 0x10);
467  bytestream2_put_byte(pb, blocks - 1);
468  ADVANCE_BLOCK(pixel_ptr, row_ptr, blocks)
469  break;
470  }
471 
472  block_counter += blocks;
473  }
474 }
475 
476 static int smc_encode_init(AVCodecContext *avctx)
477 {
478  SMCContext *s = avctx->priv_data;
479 
480  avctx->bits_per_coded_sample = 8;
481 
482  s->prev_frame = av_frame_alloc();
483  if (!s->prev_frame)
484  return AVERROR(ENOMEM);
485 
486  return 0;
487 }
488 
490  const AVFrame *frame, int *got_packet)
491 {
492  SMCContext *s = avctx->priv_data;
493  const AVFrame *pict = frame;
494  PutByteContext pb;
495  uint8_t *pal;
496  int ret;
497 
498  ret = ff_alloc_packet(avctx, pkt, 8LL * avctx->height * avctx->width);
499  if (ret < 0)
500  return ret;
501 
502  if (avctx->gop_size == 0 || !s->prev_frame->data[0] ||
503  (avctx->frame_number % avctx->gop_size) == 0) {
504  s->key_frame = 1;
505  } else {
506  s->key_frame = 0;
507  }
508 
510 
511  bytestream2_put_be32(&pb, 0x00);
512 
514  if (!pal)
515  return AVERROR(ENOMEM);
516  memcpy(pal, frame->data[1], AVPALETTE_SIZE);
517 
518  smc_encode_stream(s, pict, &pb);
519 
521 
522  pkt->data[0] = 0x0;
523 
524  // write chunk length
525  AV_WB24(pkt->data + 1, pkt->size);
526 
527  av_frame_unref(s->prev_frame);
528  ret = av_frame_ref(s->prev_frame, frame);
529  if (ret < 0) {
530  av_log(avctx, AV_LOG_ERROR, "cannot add reference\n");
531  return ret;
532  }
533 
534  if (s->key_frame)
536 
537  *got_packet = 1;
538 
539  return 0;
540 }
541 
542 static int smc_encode_end(AVCodecContext *avctx)
543 {
544  SMCContext *s = (SMCContext *)avctx->priv_data;
545 
546  av_frame_free(&s->prev_frame);
547 
548  return 0;
549 }
550 
552  .name = "smc",
553  .long_name = NULL_IF_CONFIG_SMALL("QuickTime Graphics (SMC)"),
554  .type = AVMEDIA_TYPE_VIDEO,
555  .id = AV_CODEC_ID_SMC,
556  .priv_data_size = sizeof(SMCContext),
558  .encode2 = smc_encode_frame,
559  .close = smc_encode_end,
560  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
561  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_PAL8,
563 };
AVCodec
AVCodec.
Definition: codec.h:202
stride
int stride
Definition: mace.c:144
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:42
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
ADVANCE_BLOCK
#define ADVANCE_BLOCK(pixel_ptr, row_ptr, nb_blocks)
Definition: smcenc.c:56
COCTET
#define COCTET
Definition: smcenc.c:36
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:109
SMCContext::prev_frame
AVFrame * prev_frame
Definition: smcenc.c:41
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
w
uint8_t w
Definition: llviddspenc.c:38
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:373
encode.h
b
#define b
Definition: input.c:40
smc_encode_frame
static int smc_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet)
Definition: smcenc.c:489
AV_PKT_DATA_PALETTE
@ AV_PKT_DATA_PALETTE
An AV_PKT_DATA_PALETTE side data packet contains exactly AVPALETTE_SIZE bytes worth of palette.
Definition: packet.h:46
bytestream2_tell_p
static av_always_inline int bytestream2_tell_p(PutByteContext *p)
Definition: bytestream.h:197
smc_encode_init
static int smc_encode_init(AVCodecContext *avctx)
Definition: smcenc.c:476
CPAIR
#define CPAIR
Definition: smcenc.c:34
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:428
init
static int init
Definition: av_tx.c:47
av_shrink_packet
void av_shrink_packet(AVPacket *pkt, int size)
Reduce packet size, correctly zeroing padding.
Definition: avpacket.c:114
AV_CODEC_ID_SMC
@ AV_CODEC_ID_SMC
Definition: codec_id.h:99
FFDIFFSIGN
#define FFDIFFSIGN(x, y)
Comparator.
Definition: macros.h:45
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:97
SMCContext
Definition: smcenc.c:40
SMCContext::next_nb_distinct
int next_nb_distinct
Definition: smcenc.c:45
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
bytestream2_init_writer
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:147
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:257
SMCContext::next_distinct_values
uint8_t next_distinct_values[16]
Definition: smcenc.c:47
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
SMCContext::mono_value
uint8_t mono_value
Definition: smcenc.c:43
pixel
uint8_t pixel
Definition: tiny_ssim.c:42
AVPALETTE_SIZE
#define AVPALETTE_SIZE
Definition: pixfmt.h:32
CQUAD
#define CQUAD
Definition: smcenc.c:35
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
PutByteContext
Definition: bytestream.h:37
AVPacket::size
int size
Definition: packet.h:374
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:578
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:325
size
int size
Definition: twinvq_data.h:10344
AV_WB24
#define AV_WB24(p, d)
Definition: intreadwrite.h:450
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
count_distinct_items
static int count_distinct_items(const uint8_t *block_values, uint8_t *distinct_values, int size)
Definition: smcenc.c:75
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:379
ff_smc_encoder
const AVCodec ff_smc_encoder
Definition: smcenc.c:551
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1418
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
common.h
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
COLORS_PER_TABLE
#define COLORS_PER_TABLE
Definition: smcenc.c:38
CACHE_QUAD
#define CACHE_QUAD(x)
Definition: smcenc.c:96
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:435
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:209
AVCodecContext::height
int height
Definition: avcodec.h:556
avcodec.h
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
ret
ret
Definition: filter_design.txt:187
SMCContext::color_quads
uint8_t color_quads[COLORS_PER_TABLE][CQUAD]
Definition: smcenc.c:50
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVCodecContext
main external API structure.
Definition: avcodec.h:383
compare
static float compare(const AVFrame *haystack, const AVFrame *obj, int offx, int offy)
Definition: vf_find_rect.c:95
SMCContext::color_octets
uint8_t color_octets[COLORS_PER_TABLE][COCTET]
Definition: smcenc.c:51
av_packet_new_side_data
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Allocate new information of a packet.
Definition: avpacket.c:232
SMCContext::distinct_values
uint8_t distinct_values[16]
Definition: smcenc.c:46
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
smc_encode_stream
static void smc_encode_stream(SMCContext *s, const AVFrame *frame, PutByteContext *pb)
Definition: smcenc.c:112
SMCContext::key_frame
int key_frame
Definition: smcenc.c:53
shift
static int shift(int a, int b)
Definition: sonic.c:83
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
CACHE_OCTET
#define CACHE_OCTET(x)
Definition: smcenc.c:102
AVCodecContext::frame_number
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:1023
AVPacket
This structure stores compressed data.
Definition: packet.h:350
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:410
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:556
bytestream.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
CACHE_PAIR
#define CACHE_PAIR(x)
Definition: smcenc.c:92
SMCContext::color_pairs
uint8_t color_pairs[COLORS_PER_TABLE][CPAIR]
Definition: smcenc.c:49
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:34
smc_cmp_values
static int smc_cmp_values(const void *a, const void *b)
Definition: smcenc.c:68
SMCContext::nb_distinct
int nb_distinct
Definition: smcenc.c:44
smc_encode_end
static int smc_encode_end(AVCodecContext *avctx)
Definition: smcenc.c:542