FFmpeg
ljpegenc.c
Go to the documentation of this file.
1 /*
2  * lossless JPEG encoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2003 Alex Beregszaszi
5  * Copyright (c) 2003-2004 Michael Niedermayer
6  *
7  * Support for external huffman table, various fixes (AVID workaround),
8  * aspecting, new decode_frame mechanism and apple mjpeg-b support
9  * by Alex Beregszaszi
10  *
11  * This file is part of FFmpeg.
12  *
13  * FFmpeg is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Lesser General Public
15  * License as published by the Free Software Foundation; either
16  * version 2.1 of the License, or (at your option) any later version.
17  *
18  * FFmpeg is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21  * Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public
24  * License along with FFmpeg; if not, write to the Free Software
25  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26  */
27 
28 /**
29  * @file
30  * lossless JPEG encoder.
31  */
32 
33 #include "libavutil/frame.h"
34 #include "libavutil/mem.h"
35 #include "libavutil/pixdesc.h"
36 
37 #include "avcodec.h"
38 #include "idctdsp.h"
39 #include "internal.h"
40 #include "jpegtables.h"
41 #include "mjpegenc_common.h"
42 #include "mjpeg.h"
43 
44 typedef struct LJpegEncContext {
45  AVClass *class;
48  uint16_t matrix[64];
49 
50  int vsample[4];
51  int hsample[4];
52 
53  uint16_t huff_code_dc_luminance[12];
57 
58  uint16_t (*scratch)[4];
59  int pred;
61 
63  const AVFrame *frame)
64 {
65  LJpegEncContext *s = avctx->priv_data;
66  const int width = frame->width;
67  const int height = frame->height;
68  const int linesize = frame->linesize[0];
69  uint16_t (*buffer)[4] = s->scratch;
70  int left[4], top[4], topleft[4];
71  int x, y, i;
72 
73 #if FF_API_PRIVATE_OPT
75  if (avctx->prediction_method)
76  s->pred = avctx->prediction_method + 1;
78 #endif
79 
80  for (i = 0; i < 4; i++)
81  buffer[0][i] = 1 << (9 - 1);
82 
83  for (y = 0; y < height; y++) {
84  const int modified_predictor = y ? s->pred : 1;
85  uint8_t *ptr = frame->data[0] + (linesize * y);
86 
87  if (pb->buf_end - pb->buf - (put_bits_count(pb) >> 3) < width * 4 * 4) {
88  av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n");
89  return -1;
90  }
91 
92  for (i = 0; i < 4; i++)
93  top[i]= left[i]= topleft[i]= buffer[0][i];
94 
95  for (x = 0; x < width; x++) {
96  if(avctx->pix_fmt == AV_PIX_FMT_BGR24){
97  buffer[x][1] = ptr[3 * x + 0] - ptr[3 * x + 1] + 0x100;
98  buffer[x][2] = ptr[3 * x + 2] - ptr[3 * x + 1] + 0x100;
99  buffer[x][0] = (ptr[3 * x + 0] + 2 * ptr[3 * x + 1] + ptr[3 * x + 2]) >> 2;
100  }else{
101  buffer[x][1] = ptr[4 * x + 0] - ptr[4 * x + 1] + 0x100;
102  buffer[x][2] = ptr[4 * x + 2] - ptr[4 * x + 1] + 0x100;
103  buffer[x][0] = (ptr[4 * x + 0] + 2 * ptr[4 * x + 1] + ptr[4 * x + 2]) >> 2;
104  if (avctx->pix_fmt == AV_PIX_FMT_BGRA)
105  buffer[x][3] = ptr[4 * x + 3];
106  }
107 
108  for (i = 0; i < 3 + (avctx->pix_fmt == AV_PIX_FMT_BGRA); i++) {
109  int pred, diff;
110 
111  PREDICT(pred, topleft[i], top[i], left[i], modified_predictor);
112 
113  topleft[i] = top[i];
114  top[i] = buffer[x+1][i];
115 
116  left[i] = buffer[x][i];
117 
118  diff = ((left[i] - pred + 0x100) & 0x1FF) - 0x100;
119 
120  if (i == 0 || i == 3)
122  else
124  }
125  }
126  }
127 
128  return 0;
129 }
130 
132  const AVFrame *frame, int predictor,
133  int mb_x, int mb_y)
134 {
135  int i;
136 
137  if (mb_x == 0 || mb_y == 0) {
138  for (i = 0; i < 3; i++) {
139  uint8_t *ptr;
140  int x, y, h, v, linesize;
141  h = s->hsample[i];
142  v = s->vsample[i];
143  linesize = frame->linesize[i];
144 
145  for (y = 0; y < v; y++) {
146  for (x = 0; x < h; x++) {
147  int pred;
148 
149  ptr = frame->data[i] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
150  if (y == 0 && mb_y == 0) {
151  if (x == 0 && mb_x == 0)
152  pred = 128;
153  else
154  pred = ptr[-1];
155  } else {
156  if (x == 0 && mb_x == 0) {
157  pred = ptr[-linesize];
158  } else {
159  PREDICT(pred, ptr[-linesize - 1], ptr[-linesize],
160  ptr[-1], predictor);
161  }
162  }
163 
164  if (i == 0)
165  ff_mjpeg_encode_dc(pb, *ptr - pred, s->huff_size_dc_luminance, s->huff_code_dc_luminance); //FIXME ugly
166  else
168  }
169  }
170  }
171  } else {
172  for (i = 0; i < 3; i++) {
173  uint8_t *ptr;
174  int x, y, h, v, linesize;
175  h = s->hsample[i];
176  v = s->vsample[i];
177  linesize = frame->linesize[i];
178 
179  for (y = 0; y < v; y++) {
180  for (x = 0; x < h; x++) {
181  int pred;
182 
183  ptr = frame->data[i] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
184  PREDICT(pred, ptr[-linesize - 1], ptr[-linesize], ptr[-1], predictor);
185 
186  if (i == 0)
187  ff_mjpeg_encode_dc(pb, *ptr - pred, s->huff_size_dc_luminance, s->huff_code_dc_luminance); //FIXME ugly
188  else
190  }
191  }
192  }
193  }
194 }
195 
197  const AVFrame *frame)
198 {
199  LJpegEncContext *s = avctx->priv_data;
200  const int mb_width = (avctx->width + s->hsample[0] - 1) / s->hsample[0];
201  const int mb_height = (avctx->height + s->vsample[0] - 1) / s->vsample[0];
202  int mb_x, mb_y;
203 
204 #if FF_API_PRIVATE_OPT
206  if (avctx->prediction_method)
207  s->pred = avctx->prediction_method + 1;
209 #endif
210 
211  for (mb_y = 0; mb_y < mb_height; mb_y++) {
212  if (pb->buf_end - pb->buf - (put_bits_count(pb) >> 3) <
213  mb_width * 4 * 3 * s->hsample[0] * s->vsample[0]) {
214  av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n");
215  return -1;
216  }
217 
218  for (mb_x = 0; mb_x < mb_width; mb_x++)
219  ljpeg_encode_yuv_mb(s, pb, frame, s->pred, mb_x, mb_y);
220  }
221 
222  return 0;
223 }
224 
226  const AVFrame *pict, int *got_packet)
227 {
228  LJpegEncContext *s = avctx->priv_data;
229  PutBitContext pb;
230  const int width = avctx->width;
231  const int height = avctx->height;
232  const int mb_width = (width + s->hsample[0] - 1) / s->hsample[0];
233  const int mb_height = (height + s->vsample[0] - 1) / s->vsample[0];
234  int max_pkt_size = AV_INPUT_BUFFER_MIN_SIZE;
235  int ret, header_bits;
236 
237  if( avctx->pix_fmt == AV_PIX_FMT_BGR0
238  || avctx->pix_fmt == AV_PIX_FMT_BGR24)
239  max_pkt_size += width * height * 3 * 4;
240  else if(avctx->pix_fmt == AV_PIX_FMT_BGRA)
241  max_pkt_size += width * height * 4 * 4;
242  else {
243  max_pkt_size += mb_width * mb_height * 3 * 4
244  * s->hsample[0] * s->vsample[0];
245  }
246 
247  if ((ret = ff_alloc_packet2(avctx, pkt, max_pkt_size, 0)) < 0)
248  return ret;
249 
250  init_put_bits(&pb, pkt->data, pkt->size);
251 
253  s->pred, s->matrix, s->matrix);
254 
255  header_bits = put_bits_count(&pb);
256 
257  if( avctx->pix_fmt == AV_PIX_FMT_BGR0
258  || avctx->pix_fmt == AV_PIX_FMT_BGRA
259  || avctx->pix_fmt == AV_PIX_FMT_BGR24)
260  ret = ljpeg_encode_bgr(avctx, &pb, pict);
261  else
262  ret = ljpeg_encode_yuv(avctx, &pb, pict);
263  if (ret < 0)
264  return ret;
265 
266  emms_c();
267 
268  ff_mjpeg_escape_FF(&pb, header_bits >> 3);
269  ff_mjpeg_encode_picture_trailer(&pb, header_bits);
270 
271  flush_put_bits(&pb);
272  pkt->size = put_bits_ptr(&pb) - pb.buf;
273  pkt->flags |= AV_PKT_FLAG_KEY;
274  *got_packet = 1;
275 
276  return 0;
277 }
278 
280 {
281  LJpegEncContext *s = avctx->priv_data;
282 
283  av_freep(&s->scratch);
284 
285  return 0;
286 }
287 
289 {
290  LJpegEncContext *s = avctx->priv_data;
291 
292  if ((avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
293  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
294  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
295  avctx->color_range == AVCOL_RANGE_MPEG) &&
297  av_log(avctx, AV_LOG_ERROR,
298  "Limited range YUV is non-standard, set strict_std_compliance to "
299  "at least unofficial to use it.\n");
300  return AVERROR(EINVAL);
301  }
302 
303 #if FF_API_CODED_FRAME
306  avctx->coded_frame->key_frame = 1;
308 #endif
309 
310  s->scratch = av_malloc_array(avctx->width + 1, sizeof(*s->scratch));
311  if (!s->scratch)
312  goto fail;
313 
314  ff_idctdsp_init(&s->idsp, avctx);
317 
318  ff_mjpeg_init_hvsample(avctx, s->hsample, s->vsample);
319 
328 
329  return 0;
330 fail:
331  ljpeg_encode_close(avctx);
332  return AVERROR(ENOMEM);
333 }
334 
335 #define OFFSET(x) offsetof(LJpegEncContext, x)
336 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
337 static const AVOption options[] = {
338 { "pred", "Prediction method", OFFSET(pred), AV_OPT_TYPE_INT, { .i64 = 1 }, 1, 3, VE, "pred" },
339  { "left", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, INT_MIN, INT_MAX, VE, "pred" },
340  { "plane", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 2 }, INT_MIN, INT_MAX, VE, "pred" },
341  { "median", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 3 }, INT_MIN, INT_MAX, VE, "pred" },
342 
343  { NULL},
344 };
345 
346 static const AVClass ljpeg_class = {
347  .class_name = "ljpeg",
348  .item_name = av_default_item_name,
349  .option = options,
350  .version = LIBAVUTIL_VERSION_INT,
351 };
352 
354  .name = "ljpeg",
355  .long_name = NULL_IF_CONFIG_SMALL("Lossless JPEG"),
356  .type = AVMEDIA_TYPE_VIDEO,
357  .id = AV_CODEC_ID_LJPEG,
358  .priv_data_size = sizeof(LJpegEncContext),
359  .priv_class = &ljpeg_class,
361  .encode2 = ljpeg_encode_frame,
362  .close = ljpeg_encode_close,
364  .pix_fmts = (const enum AVPixelFormat[]){
369 };
AVCodec ff_ljpeg_encoder
Definition: ljpegenc.c:353
#define NULL
Definition: coverity.c:32
IDCTDSPContext idsp
Definition: ljpegenc.c:46
This structure describes decoded (raw) audio or video data.
Definition: frame.h:268
AVOption.
Definition: opt.h:246
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
Memory handling functions.
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2196
Scantable.
Definition: idctdsp.h:31
int hsample[4]
Definition: ljpegenc.c:51
int size
Definition: avcodec.h:1478
void ff_mjpeg_init_hvsample(AVCodecContext *avctx, int hsample[4], int vsample[4])
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:191
static const AVClass ljpeg_class
Definition: ljpegenc.c:346
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1775
static AVPacket pkt
AVCodec.
Definition: avcodec.h:3477
MJPEG encoder and decoder.
#define AV_CODEC_CAP_INTRA_ONLY
Codec is intra only.
Definition: avcodec.h:1067
uint16_t(* scratch)[4]
Definition: ljpegenc.c:58
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
uint8_t huff_size_dc_chrominance[12]
Definition: ljpegenc.c:56
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: encode.c:32
void ff_mjpeg_build_huffman_codes(uint8_t *huff_size, uint16_t *huff_code, const uint8_t *bits_table, const uint8_t *val_table)
Definition: jpegtables.c:127
uint8_t
#define av_cold
Definition: attributes.h:82
const uint8_t avpriv_mjpeg_bits_dc_luminance[17]
Definition: jpegtables.c:65
#define height
uint8_t * data
Definition: avcodec.h:1477
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
#define AV_INPUT_BUFFER_MIN_SIZE
minimum encoding buffer size Used to avoid some checks during header writing.
Definition: avcodec.h:797
#define av_log(a,...)
#define FF_COMPLIANCE_UNOFFICIAL
Allow unofficial extensions.
Definition: avcodec.h:2628
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1509
#define PREDICT(ret, topleft, top, left, predictor)
Definition: mjpeg.h:118
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:260
int width
Definition: frame.h:326
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:324
const uint8_t avpriv_mjpeg_bits_dc_chrominance[17]
Definition: jpegtables.c:70
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
uint8_t * buf
Definition: put_bits.h:38
const char * name
Name of the codec implementation.
Definition: avcodec.h:3484
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
uint16_t matrix[64]
Definition: ljpegenc.c:48
ScanTable scantable
Definition: ljpegenc.c:47
#define fail()
Definition: checkasm.h:120
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:1037
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1483
reference-counted frame API
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:85
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
static int ljpeg_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet)
Definition: ljpegenc.c:225
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:351
static av_cold int ljpeg_encode_init(AVCodecContext *avctx)
Definition: ljpegenc.c:288
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
#define width
int width
picture width / height.
Definition: avcodec.h:1738
static av_cold int ljpeg_encode_close(AVCodecContext *avctx)
Definition: ljpegenc.c:279
uint16_t huff_code_dc_chrominance[12]
Definition: ljpegenc.c:54
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
void ff_mjpeg_escape_FF(PutBitContext *pb, int start)
uint8_t idct_permutation[64]
IDCT input permutation.
Definition: idctdsp.h:96
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
#define OFFSET(x)
Definition: ljpegenc.c:335
void ff_mjpeg_encode_dc(PutBitContext *pb, int val, uint8_t *huff_size, uint16_t *huff_code)
Libavcodec external API header.
attribute_deprecated int prediction_method
Definition: avcodec.h:1924
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:299
main external API structure.
Definition: avcodec.h:1565
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:206
uint8_t * buf_end
Definition: put_bits.h:38
const uint8_t avpriv_mjpeg_val_dc[12]
Definition: jpegtables.c:67
#define VE
Definition: ljpegenc.c:336
Describe the class of an AVClass context structure.
Definition: log.h:67
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
Definition: pixfmt.h:240
void ff_mjpeg_encode_picture_header(AVCodecContext *avctx, PutBitContext *pb, ScanTable *intra_scantable, int pred, uint16_t luma_intra_matrix[64], uint16_t chroma_intra_matrix[64])
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
static int ljpeg_encode_yuv(AVCodecContext *avctx, PutBitContext *pb, const AVFrame *frame)
Definition: ljpegenc.c:196
static int ljpeg_encode_bgr(AVCodecContext *avctx, PutBitContext *pb, const AVFrame *frame)
Definition: ljpegenc.c:62
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:282
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:521
uint16_t huff_code_dc_luminance[12]
Definition: ljpegenc.c:53
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
common internal api header.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:101
uint8_t huff_size_dc_luminance[12]
Definition: ljpegenc.c:55
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
attribute_deprecated AVFrame * coded_frame
the picture in the bitstream
Definition: avcodec.h:2811
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:48
void * priv_data
Definition: avcodec.h:1592
static av_always_inline int diff(const uint32_t a, const uint32_t b)
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: idctdsp.c:29
static void ljpeg_encode_yuv_mb(LJpegEncContext *s, PutBitContext *pb, const AVFrame *frame, int predictor, int mb_x, int mb_y)
Definition: ljpegenc.c:131
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:238
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:85
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:346
int height
Definition: frame.h:326
#define av_freep(p)
#define av_malloc_array(a, b)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
This structure stores compressed data.
Definition: avcodec.h:1454
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:2624
static const AVOption options[]
Definition: ljpegenc.c:337
GLuint buffer
Definition: opengl_enc.c:101
int vsample[4]
Definition: ljpegenc.c:50