FFmpeg
a64multienc.c
Go to the documentation of this file.
1 /*
2  * a64 video encoder - multicolor modes
3  * Copyright (c) 2009 Tobias Bindhammer
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * a64 video encoder - multicolor modes
25  */
26 
27 #include "a64colors.h"
28 #include "a64tables.h"
29 #include "elbg.h"
30 #include "encode.h"
31 #include "internal.h"
32 #include "libavutil/avassert.h"
33 #include "libavutil/common.h"
34 #include "libavutil/intreadwrite.h"
35 
36 #define DITHERSTEPS 8
37 #define CHARSET_CHARS 256
38 #define INTERLACED 1
39 #define CROP_SCREENS 1
40 
41 #define C64XRES 320
42 #define C64YRES 200
43 
44 typedef struct A64Context {
45  /* variables for multicolor modes */
49  unsigned mc_frame_counter;
51  int *mc_charmap;
52  int *mc_best_cb;
53  int mc_luma_vals[5];
54  uint8_t *mc_colram;
55  uint8_t *mc_palette;
57 
58  /* pts of the next packet that will be output */
59  int64_t next_pts;
60 } A64Context;
61 
62 /* gray gradient */
63 static const uint8_t mc_colors[5]={0x0,0xb,0xc,0xf,0x1};
64 
65 /* other possible gradients - to be tested */
66 //static const uint8_t mc_colors[5]={0x0,0x8,0xa,0xf,0x7};
67 //static const uint8_t mc_colors[5]={0x0,0x9,0x8,0xa,0x3};
68 
69 static void to_meta_with_crop(AVCodecContext *avctx,
70  const AVFrame *p, int *dest)
71 {
72  int blockx, blocky, x, y;
73  int luma = 0;
74  int height = FFMIN(avctx->height, C64YRES);
75  int width = FFMIN(avctx->width , C64XRES);
76  uint8_t *src = p->data[0];
77 
78  for (blocky = 0; blocky < C64YRES; blocky += 8) {
79  for (blockx = 0; blockx < C64XRES; blockx += 8) {
80  for (y = blocky; y < blocky + 8 && y < C64YRES; y++) {
81  for (x = blockx; x < blockx + 8 && x < C64XRES; x += 2) {
82  if(x < width && y < height) {
83  if (x + 1 < width) {
84  /* build average over 2 pixels */
85  luma = (src[(x + 0 + y * p->linesize[0])] +
86  src[(x + 1 + y * p->linesize[0])]) / 2;
87  } else {
88  luma = src[(x + y * p->linesize[0])];
89  }
90  /* write blocks as linear data now so they are suitable for elbg */
91  dest[0] = luma;
92  }
93  dest++;
94  }
95  }
96  }
97  }
98 }
99 
100 static void render_charset(AVCodecContext *avctx, uint8_t *charset,
101  uint8_t *colrammap)
102 {
103  A64Context *c = avctx->priv_data;
104  uint8_t row1, row2;
105  int charpos, x, y;
106  int a, b;
107  uint8_t pix;
108  int lowdiff, highdiff;
109  int *best_cb = c->mc_best_cb;
110  uint8_t index1[256];
111  uint8_t index2[256];
112  uint8_t dither[256];
113  int i;
114  int distance;
115 
116  /* Generate lookup-tables for dither and index before looping.
117  * This code relies on c->mc_luma_vals[c->mc_pal_size - 1] being
118  * the maximum of all the mc_luma_vals values and on the minimum
119  * being zero; this ensures that dither is properly initialized. */
120  i = 0;
121  for (a=0; a < 256; a++) {
122  if(i < c->mc_pal_size -1 && a == c->mc_luma_vals[i + 1]) {
123  distance = c->mc_luma_vals[i + 1] - c->mc_luma_vals[i];
124  for(b = 0; b <= distance; b++) {
125  dither[c->mc_luma_vals[i] + b] = b * (DITHERSTEPS - 1) / distance;
126  }
127  i++;
128  }
129  if(i >= c->mc_pal_size - 1) dither[a] = 0;
130  index1[a] = i;
131  index2[a] = FFMIN(i + 1, c->mc_pal_size - 1);
132  }
133 
134  /* and render charset */
135  for (charpos = 0; charpos < CHARSET_CHARS; charpos++) {
136  lowdiff = 0;
137  highdiff = 0;
138  for (y = 0; y < 8; y++) {
139  row1 = 0; row2 = 0;
140  for (x = 0; x < 4; x++) {
141  pix = best_cb[y * 4 + x];
142 
143  /* accumulate error for brightest/darkest color */
144  if (index1[pix] >= 3)
145  highdiff += pix - c->mc_luma_vals[3];
146  if (index1[pix] < 1)
147  lowdiff += c->mc_luma_vals[1] - pix;
148 
149  row1 <<= 2;
150 
151  if (INTERLACED) {
152  row2 <<= 2;
153  if (interlaced_dither_patterns[dither[pix]][(y & 3) * 2 + 0][x & 3])
154  row1 |= 3-(index2[pix] & 3);
155  else
156  row1 |= 3-(index1[pix] & 3);
157 
158  if (interlaced_dither_patterns[dither[pix]][(y & 3) * 2 + 1][x & 3])
159  row2 |= 3-(index2[pix] & 3);
160  else
161  row2 |= 3-(index1[pix] & 3);
162  }
163  else {
164  if (multi_dither_patterns[dither[pix]][(y & 3)][x & 3])
165  row1 |= 3-(index2[pix] & 3);
166  else
167  row1 |= 3-(index1[pix] & 3);
168  }
169  }
170  charset[y+0x000] = row1;
171  if (INTERLACED) charset[y+0x800] = row2;
172  }
173  /* do we need to adjust pixels? */
174  if (highdiff > 0 && lowdiff > 0 && c->mc_use_5col) {
175  if (lowdiff > highdiff) {
176  for (x = 0; x < 32; x++)
177  best_cb[x] = FFMIN(c->mc_luma_vals[3], best_cb[x]);
178  } else {
179  for (x = 0; x < 32; x++)
180  best_cb[x] = FFMAX(c->mc_luma_vals[1], best_cb[x]);
181  }
182  charpos--; /* redo now adjusted char */
183  /* no adjustment needed, all fine */
184  } else {
185  /* advance pointers */
186  best_cb += 32;
187  charset += 8;
188 
189  /* remember colorram value */
190  colrammap[charpos] = (highdiff > 0);
191  }
192  }
193 }
194 
196 {
197  A64Context *c = avctx->priv_data;
198  av_freep(&c->mc_meta_charset);
199  av_freep(&c->mc_best_cb);
200  av_freep(&c->mc_charmap);
201  av_freep(&c->mc_colram);
202  return 0;
203 }
204 
206 {
207  A64Context *c = avctx->priv_data;
208  int a;
209  av_lfg_init(&c->randctx, 1);
210 
211  if (avctx->global_quality < 1) {
212  c->mc_lifetime = 4;
213  } else {
214  c->mc_lifetime = avctx->global_quality / FF_QP2LAMBDA;
215  }
216 
217  av_log(avctx, AV_LOG_INFO, "charset lifetime set to %d frame(s)\n", c->mc_lifetime);
218 
219  c->mc_frame_counter = 0;
220  c->mc_use_5col = avctx->codec->id == AV_CODEC_ID_A64_MULTI5;
221  c->mc_pal_size = 4 + c->mc_use_5col;
222 
223  /* precalc luma values for later use */
224  for (a = 0; a < c->mc_pal_size; a++) {
225  c->mc_luma_vals[a]=a64_palette[mc_colors[a]][0] * 0.30 +
226  a64_palette[mc_colors[a]][1] * 0.59 +
227  a64_palette[mc_colors[a]][2] * 0.11;
228  }
229 
230  if (!(c->mc_meta_charset = av_mallocz_array(c->mc_lifetime, 32000 * sizeof(int))) ||
231  !(c->mc_best_cb = av_malloc(CHARSET_CHARS * 32 * sizeof(int))) ||
232  !(c->mc_charmap = av_mallocz_array(c->mc_lifetime, 1000 * sizeof(int))) ||
233  !(c->mc_colram = av_mallocz(CHARSET_CHARS * sizeof(uint8_t)))) {
234  av_log(avctx, AV_LOG_ERROR, "Failed to allocate buffer memory.\n");
235  return AVERROR(ENOMEM);
236  }
237 
238  /* set up extradata */
239  if (!(avctx->extradata = av_mallocz(8 * 4 + AV_INPUT_BUFFER_PADDING_SIZE))) {
240  av_log(avctx, AV_LOG_ERROR, "Failed to allocate memory for extradata.\n");
241  return AVERROR(ENOMEM);
242  }
243  avctx->extradata_size = 8 * 4;
244  AV_WB32(avctx->extradata, c->mc_lifetime);
245  AV_WB32(avctx->extradata + 16, INTERLACED);
246 
247  if (!avctx->codec_tag)
248  avctx->codec_tag = AV_RL32("a64m");
249 
250  c->next_pts = AV_NOPTS_VALUE;
251 
252  return 0;
253 }
254 
255 static void a64_compress_colram(unsigned char *buf, int *charmap, uint8_t *colram)
256 {
257  int a;
258  uint8_t temp;
259  /* only needs to be done in 5col mode */
260  /* XXX could be squeezed to 0x80 bytes */
261  for (a = 0; a < 256; a++) {
262  temp = colram[charmap[a + 0x000]] << 0;
263  temp |= colram[charmap[a + 0x100]] << 1;
264  temp |= colram[charmap[a + 0x200]] << 2;
265  if (a < 0xe8) temp |= colram[charmap[a + 0x300]] << 3;
266  buf[a] = temp << 2;
267  }
268 }
269 
271  const AVFrame *p, int *got_packet)
272 {
273  A64Context *c = avctx->priv_data;
274 
275  int frame;
276  int x, y;
277  int b_height;
278  int b_width;
279 
280  int req_size, ret;
281  uint8_t *buf = NULL;
282 
283  int *charmap = c->mc_charmap;
284  uint8_t *colram = c->mc_colram;
285  int *meta = c->mc_meta_charset;
286  int *best_cb = c->mc_best_cb;
287 
288  int charset_size = 0x800 * (INTERLACED + 1);
289  int colram_size = 0x100 * c->mc_use_5col;
290  int screen_size;
291 
292  if(CROP_SCREENS) {
293  b_height = FFMIN(avctx->height,C64YRES) >> 3;
294  b_width = FFMIN(avctx->width ,C64XRES) >> 3;
295  screen_size = b_width * b_height;
296  } else {
297  b_height = C64YRES >> 3;
298  b_width = C64XRES >> 3;
299  screen_size = 0x400;
300  }
301 
302  /* no data, means end encoding asap */
303  if (!p) {
304  /* all done, end encoding */
305  if (!c->mc_lifetime) return 0;
306  /* no more frames in queue, prepare to flush remaining frames */
307  if (!c->mc_frame_counter) {
308  c->mc_lifetime = 0;
309  }
310  /* still frames in queue so limit lifetime to remaining frames */
311  else c->mc_lifetime = c->mc_frame_counter;
312  /* still new data available */
313  } else {
314  /* fill up mc_meta_charset with data until lifetime exceeds */
315  if (c->mc_frame_counter < c->mc_lifetime) {
316  to_meta_with_crop(avctx, p, meta + 32000 * c->mc_frame_counter);
317  c->mc_frame_counter++;
318  if (c->next_pts == AV_NOPTS_VALUE)
319  c->next_pts = p->pts;
320  /* lifetime is not reached so wait for next frame first */
321  return 0;
322  }
323  }
324 
325  /* lifetime reached so now convert X frames at once */
326  if (c->mc_frame_counter == c->mc_lifetime) {
327  req_size = 0;
328  /* any frames to encode? */
329  if (c->mc_lifetime) {
330  int alloc_size = charset_size + c->mc_lifetime*(screen_size + colram_size);
331  if ((ret = ff_get_encode_buffer(avctx, pkt, alloc_size, 0)) < 0)
332  return ret;
333  buf = pkt->data;
334 
335  /* calc optimal new charset + charmaps */
336  ret = avpriv_init_elbg(meta, 32, 1000 * c->mc_lifetime, best_cb,
337  CHARSET_CHARS, 50, charmap, &c->randctx);
338  if (ret < 0)
339  return ret;
340  ret = avpriv_do_elbg(meta, 32, 1000 * c->mc_lifetime, best_cb,
341  CHARSET_CHARS, 50, charmap, &c->randctx);
342  if (ret < 0)
343  return ret;
344 
345  /* create colorram map and a c64 readable charset */
346  render_charset(avctx, buf, colram);
347 
348  /* advance pointers */
349  buf += charset_size;
350  req_size += charset_size;
351  }
352 
353  /* write x frames to buf */
354  for (frame = 0; frame < c->mc_lifetime; frame++) {
355  /* copy charmap to buf. buf is uchar*, charmap is int*, so no memcpy here, sorry */
356  for (y = 0; y < b_height; y++) {
357  for (x = 0; x < b_width; x++) {
358  buf[y * b_width + x] = charmap[y * b_width + x];
359  }
360  }
361  /* advance pointers */
362  buf += screen_size;
363  req_size += screen_size;
364 
365  /* compress and copy colram to buf */
366  if (c->mc_use_5col) {
367  a64_compress_colram(buf, charmap, colram);
368  /* advance pointers */
369  buf += colram_size;
370  req_size += colram_size;
371  }
372 
373  /* advance to next charmap */
374  charmap += 1000;
375  }
376 
377  AV_WB32(avctx->extradata + 4, c->mc_frame_counter);
378  AV_WB32(avctx->extradata + 8, charset_size);
379  AV_WB32(avctx->extradata + 12, screen_size + colram_size);
380 
381  /* reset counter */
382  c->mc_frame_counter = 0;
383 
384  pkt->pts = pkt->dts = c->next_pts;
385  c->next_pts = AV_NOPTS_VALUE;
386 
387  av_assert0(pkt->size == req_size);
389  *got_packet = !!req_size;
390  }
391  return 0;
392 }
393 
394 #if CONFIG_A64MULTI_ENCODER
396  .name = "a64multi",
397  .long_name = NULL_IF_CONFIG_SMALL("Multicolor charset for Commodore 64"),
398  .type = AVMEDIA_TYPE_VIDEO,
399  .id = AV_CODEC_ID_A64_MULTI,
400  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
401  .priv_data_size = sizeof(A64Context),
403  .encode2 = a64multi_encode_frame,
404  .close = a64multi_close_encoder,
407 };
408 #endif
409 #if CONFIG_A64MULTI5_ENCODER
411  .name = "a64multi5",
412  .long_name = NULL_IF_CONFIG_SMALL("Multicolor charset for Commodore 64, extended with 5th color (colram)"),
413  .type = AVMEDIA_TYPE_VIDEO,
415  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
416  .priv_data_size = sizeof(A64Context),
418  .encode2 = a64multi_encode_frame,
419  .close = a64multi_close_encoder,
422 };
423 #endif
mc_colors
static const uint8_t mc_colors[5]
Definition: a64multienc.c:63
AVCodec
AVCodec.
Definition: codec.h:197
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:41
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
a64multi_close_encoder
static av_cold int a64multi_close_encoder(AVCodecContext *avctx)
Definition: a64multienc.c:195
A64Context
Definition: a64multienc.c:44
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
DITHERSTEPS
#define DITHERSTEPS
Definition: a64multienc.c:36
CROP_SCREENS
#define CROP_SCREENS
Definition: a64multienc.c:39
av_lfg_init
av_cold void av_lfg_init(AVLFG *c, unsigned int seed)
Definition: lfg.c:32
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:396
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:365
A64Context::mc_charmap
int * mc_charmap
Definition: a64multienc.c:51
AV_CODEC_ID_A64_MULTI
@ AV_CODEC_ID_A64_MULTI
Definition: codec_id.h:192
encode.h
b
#define b
Definition: input.c:41
a64multi_encode_init
static av_cold int a64multi_encode_init(AVCodecContext *avctx)
Definition: a64multienc.c:205
a64colors.h
av_mallocz_array
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:196
A64Context::mc_palette
uint8_t * mc_palette
Definition: a64multienc.c:55
A64Context::mc_luma_vals
int mc_luma_vals[5]
Definition: a64multienc.c:53
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:396
CHARSET_CHARS
#define CHARSET_CHARS
Definition: a64multienc.c:37
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:317
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:510
ff_a64multi5_encoder
const AVCodec ff_a64multi5_encoder
A64Context::randctx
AVLFG randctx
Definition: a64multienc.c:46
A64Context::mc_use_5col
int mc_use_5col
Definition: a64multienc.c:48
A64Context::mc_meta_charset
int * mc_meta_charset
Definition: a64multienc.c:50
avassert.h
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:181
av_cold
#define av_cold
Definition: attributes.h:90
a64multi_encode_frame
static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *p, int *got_packet)
Definition: a64multienc.c:270
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:603
width
#define width
C64XRES
#define C64XRES
Definition: a64multienc.c:41
intreadwrite.h
to_meta_with_crop
static void to_meta_with_crop(AVCodecContext *avctx, const AVFrame *p, int *dest)
Definition: a64multienc.c:69
AVCodecContext::global_quality
int global_quality
Global quality for codecs which cannot change it per frame.
Definition: avcodec.h:567
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:289
C64YRES
#define C64YRES
Definition: a64multienc.c:42
render_charset
static void render_charset(AVCodecContext *avctx, uint8_t *charset, uint8_t *colrammap)
Definition: a64multienc.c:100
multi_dither_patterns
static const uint8_t multi_dither_patterns[9][4][4]
dither patterns used vor rendering the multicolor charset
Definition: a64tables.h:36
A64Context::mc_pal_size
int mc_pal_size
Definition: a64multienc.c:56
A64Context::mc_colram
uint8_t * mc_colram
Definition: a64multienc.c:54
elbg.h
NULL
#define NULL
Definition: coverity.c:32
A64Context::mc_lifetime
int mc_lifetime
Definition: a64multienc.c:47
avpriv_init_elbg
int avpriv_init_elbg(int *points, int dim, int numpoints, int *codebook, int numCB, int max_steps, int *closest_cb, AVLFG *rand_state)
Initialize the **codebook vector for the elbg algorithm.
Definition: elbg.c:337
src
#define src
Definition: vp8dsp.c:255
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_WB32
#define AV_WB32(p, v)
Definition: intreadwrite.h:419
A64Context::mc_best_cb
int * mc_best_cb
Definition: a64multienc.c:52
AVLFG
Context structure for the Lagged Fibonacci PRNG.
Definition: lfg.h:33
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:366
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
INTERLACED
#define INTERLACED
Definition: a64multienc.c:38
FFMAX
#define FFMAX(a, b)
Definition: common.h:103
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
a64_compress_colram
static void a64_compress_colram(unsigned char *buf, int *charmap, uint8_t *colram)
Definition: a64multienc.c:255
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:364
height
#define height
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
FFMIN
#define FFMIN(a, b)
Definition: common.h:105
a64_palette
static const uint8_t a64_palette[16][3]
Definition: a64colors.h:33
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:371
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:192
interlaced_dither_patterns
static const uint8_t interlaced_dither_patterns[9][8][4]
Definition: a64tables.h:93
AVCodec::id
enum AVCodecID id
Definition: codec.h:211
i
int i
Definition: input.c:407
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:358
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:602
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:49
a64tables.h
common.h
A64Context::mc_frame_counter
unsigned mc_frame_counter
Definition: a64multienc.c:49
AV_CODEC_ID_A64_MULTI5
@ AV_CODEC_ID_A64_MULTI5
Definition: codec_id.h:193
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:243
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:204
ff_a64multi_encoder
const AVCodec ff_a64multi_encoder
AVCodecContext::height
int height
Definition: avcodec.h:674
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: avcodec.h:192
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
AVCodecContext
main external API structure.
Definition: avcodec.h:501
ff_get_encode_buffer
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
Get a buffer for a packet.
Definition: encode.c:81
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
temp
else temp
Definition: vf_mcdeint.c:259
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:77
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:526
AVPacket
This structure stores compressed data.
Definition: packet.h:342
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:528
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:674
distance
static float distance(float x, float y, int band)
Definition: nellymoserenc.c:233
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:334
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
A64Context::next_pts
int64_t next_pts
Definition: a64multienc.c:59
avpriv_do_elbg
int avpriv_do_elbg(int *points, int dim, int numpoints, int *codebook, int numCB, int max_steps, int *closest_cb, AVLFG *rand_state)
Implementation of the Enhanced LBG Algorithm Based on the paper "Neural Networks 14:1219-1237" that c...
Definition: elbg.c:371
dither
static const uint8_t dither[8][8]
Definition: vf_fspp.c:59