Go to the documentation of this file.
25 #define BITSTREAM_READER_LE
102 static const int8_t
luma_adjust[] = { -4, -3, -2, -1, 1, 2, 3, 4 };
105 { 1, 1, 0, -1, -1, -1, 0, 1 },
106 { 0, 1, 1, 1, 0, -1, -1, -1 }
110 20, 28, 36, 44, 52, 60, 68, 76,
111 84, 92, 100, 106, 112, 116, 120, 124,
112 128, 132, 136, 140, 144, 150, 156, 164,
113 172, 180, 188, 196, 204, 212, 220, 228
123 "Dimensions should be a multiple of two.\n");
130 if (!
s->old_y_avg || !
s->buf1 || !
s->buf2) {
138 s->linesize[0] = avctx->
width;
140 s->linesize[2] = avctx->
width / 2;
149 memset(
s->old_u, 0x10, avctx->
width * avctx->
height / 4);
150 memset(
s->old_v, 0x10, avctx->
width * avctx->
height / 4);
195 int buf_size = avpkt->
size;
201 uint8_t *old_y, *old_cb, *old_cr,
202 *new_y, *new_cb, *new_cr;
204 unsigned old_y_stride, old_cb_stride, old_cr_stride,
205 new_y_stride, new_cb_stride, new_cr_stride;
206 unsigned total_blocks = avctx->
width * avctx->
height / 4,
207 block_index, block_x = 0;
208 unsigned y[4] = { 0 },
cb = 0x10,
cr = 0x10;
209 int skip = -1, y_avg = 0,
i, j;
213 if (buf_size <= 16) {
228 new_y_stride =
s->linesize[0];
229 new_cb_stride =
s->linesize[1];
230 new_cr_stride =
s->linesize[2];
234 old_y_stride =
s->linesize[0];
235 old_cb_stride =
s->linesize[1];
236 old_cr_stride =
s->linesize[2];
238 for (block_index = 0; block_index < total_blocks; block_index++) {
251 y[2] = old_y[old_y_stride];
252 y[3] = old_y[old_y_stride + 1];
258 unsigned sign_selector =
get_bits(&gb, 6);
259 unsigned difference_selector =
get_bits(&gb, 2);
261 for (
i = 0;
i < 4;
i++) {
269 unsigned adjust_index =
get_bits(&gb, 3);
272 for (
i = 0;
i < 4;
i++)
281 unsigned adjust_index =
get_bits(&gb, 3);
291 new_y[new_y_stride] = y[2];
292 new_y[new_y_stride + 1] = y[3];
303 if (block_x * 2 == avctx->
width) {
305 old_y += old_y_stride * 2 - avctx->
width;
306 old_cb += old_cb_stride - avctx->
width / 2;
307 old_cr += old_cr_stride - avctx->
width / 2;
308 new_y += new_y_stride * 2 - avctx->
width;
309 new_cb += new_cb_stride - avctx->
width / 2;
310 new_cr += new_cr_stride - avctx->
width / 2;
322 for (j = 0; j < avctx->
height; j++) {
324 dstY[
i] = new_y[
i] << 2;
326 new_y += new_y_stride;
328 for (j = 0; j < avctx->
height / 2; j++) {
329 for (
i = 0;
i < avctx->
width / 2;
i++) {
335 new_cb += new_cb_stride;
336 new_cr += new_cr_stride;
339 ff_dlog(avctx,
"Frame data: provided %d bytes, used %d bytes\n",
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
static av_cold int init(AVCodecContext *avctx)
static int get_bits_left(GetBitContext *gb)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
#define FFSWAP(type, a, b)
static double cb(void *priv, double x, double y)
static int get_bits_count(const GetBitContext *s)
This structure describes decoded (raw) audio or video data.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
static const int8_t chroma_adjust[2][8]
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
static av_cold int escape130_decode_close(AVCodecContext *avctx)
static unsigned int get_bits1(GetBitContext *s)
AVCodec ff_escape130_decoder
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
static int decode_skip_count(GetBitContext *gb)
static const uint8_t offset_table[]
#define i(width, name, range_min, range_max)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
const char * name
Name of the codec implementation.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static av_cold int escape130_decode_init(AVCodecContext *avctx)
static const int8_t sign_table[64][4]
main external API structure.
static const uint8_t chroma_vals[]
static const int8_t luma_adjust[]
This structure stores compressed data.
static double cr(void *priv, double x, double y)
int width
picture width / height.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static int escape130_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)