FFmpeg
ljpegenc.c
Go to the documentation of this file.
1 /*
2  * lossless JPEG encoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2003 Alex Beregszaszi
5  * Copyright (c) 2003-2004 Michael Niedermayer
6  *
7  * Support for external huffman table, various fixes (AVID workaround),
8  * aspecting, new decode_frame mechanism and apple mjpeg-b support
9  * by Alex Beregszaszi
10  *
11  * This file is part of FFmpeg.
12  *
13  * FFmpeg is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Lesser General Public
15  * License as published by the Free Software Foundation; either
16  * version 2.1 of the License, or (at your option) any later version.
17  *
18  * FFmpeg is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21  * Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public
24  * License along with FFmpeg; if not, write to the Free Software
25  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26  */
27 
28 /**
29  * @file
30  * lossless JPEG encoder.
31  */
32 
33 #include "libavutil/frame.h"
34 #include "libavutil/mem.h"
35 #include "libavutil/opt.h"
36 
37 #include "avcodec.h"
38 #include "codec_internal.h"
39 #include "encode.h"
40 #include "jpegtables.h"
41 #include "mjpegenc_common.h"
42 #include "mjpeg.h"
43 
44 typedef struct LJpegEncContext {
45  AVClass *class;
46 
47  int vsample[4];
48  int hsample[4];
49 
50  uint16_t huff_code_dc_luminance[12];
54 
55  uint16_t (*scratch)[4];
56  int pred;
58 
60  const AVFrame *frame)
61 {
62  LJpegEncContext *s = avctx->priv_data;
63  const int width = frame->width;
64  const int height = frame->height;
65  const int linesize = frame->linesize[0];
66  uint16_t (*buffer)[4] = s->scratch;
67  int left[4], top[4], topleft[4];
68  int x, y, i;
69 
70  for (i = 0; i < 4; i++)
71  buffer[0][i] = 1 << (9 - 1);
72 
73  for (y = 0; y < height; y++) {
74  const int modified_predictor = y ? s->pred : 1;
75  const uint8_t *ptr = frame->data[0] + (linesize * y);
76 
77  if (put_bytes_left(pb, 0) < width * 4 * 4) {
78  av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n");
79  return -1;
80  }
81 
82  for (i = 0; i < 4; i++)
83  top[i]= left[i]= topleft[i]= buffer[0][i];
84 
85  for (x = 0; x < width; x++) {
86  if(avctx->pix_fmt == AV_PIX_FMT_BGR24){
87  buffer[x][1] = ptr[3 * x + 0] - ptr[3 * x + 1] + 0x100;
88  buffer[x][2] = ptr[3 * x + 2] - ptr[3 * x + 1] + 0x100;
89  buffer[x][0] = (ptr[3 * x + 0] + 2 * ptr[3 * x + 1] + ptr[3 * x + 2]) >> 2;
90  }else{
91  buffer[x][1] = ptr[4 * x + 0] - ptr[4 * x + 1] + 0x100;
92  buffer[x][2] = ptr[4 * x + 2] - ptr[4 * x + 1] + 0x100;
93  buffer[x][0] = (ptr[4 * x + 0] + 2 * ptr[4 * x + 1] + ptr[4 * x + 2]) >> 2;
94  if (avctx->pix_fmt == AV_PIX_FMT_BGRA)
95  buffer[x][3] = ptr[4 * x + 3];
96  }
97 
98  for (i = 0; i < 3 + (avctx->pix_fmt == AV_PIX_FMT_BGRA); i++) {
99  int pred, diff;
100 
101  PREDICT(pred, topleft[i], top[i], left[i], modified_predictor);
102 
103  topleft[i] = top[i];
104  top[i] = buffer[x+1][i];
105 
106  left[i] = buffer[x][i];
107 
108  diff = ((left[i] - pred + 0x100) & 0x1FF) - 0x100;
109 
110  if (i == 0 || i == 3)
111  ff_mjpeg_encode_dc(pb, diff, s->huff_size_dc_luminance, s->huff_code_dc_luminance); //FIXME ugly
112  else
113  ff_mjpeg_encode_dc(pb, diff, s->huff_size_dc_chrominance, s->huff_code_dc_chrominance);
114  }
115  }
116  }
117 
118  return 0;
119 }
120 
122  const AVFrame *frame, int predictor,
123  int mb_x, int mb_y)
124 {
125  int i;
126 
127  if (mb_x == 0 || mb_y == 0) {
128  for (i = 0; i < 3; i++) {
129  const uint8_t *ptr;
130  int x, y, h, v, linesize;
131  h = s->hsample[i];
132  v = s->vsample[i];
133  linesize = frame->linesize[i];
134 
135  for (y = 0; y < v; y++) {
136  for (x = 0; x < h; x++) {
137  int pred;
138 
139  ptr = frame->data[i] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
140  if (y == 0 && mb_y == 0) {
141  if (x == 0 && mb_x == 0)
142  pred = 128;
143  else
144  pred = ptr[-1];
145  } else {
146  if (x == 0 && mb_x == 0) {
147  pred = ptr[-linesize];
148  } else {
149  PREDICT(pred, ptr[-linesize - 1], ptr[-linesize],
150  ptr[-1], predictor);
151  }
152  }
153 
154  if (i == 0)
155  ff_mjpeg_encode_dc(pb, *ptr - pred, s->huff_size_dc_luminance, s->huff_code_dc_luminance); //FIXME ugly
156  else
157  ff_mjpeg_encode_dc(pb, *ptr - pred, s->huff_size_dc_chrominance, s->huff_code_dc_chrominance);
158  }
159  }
160  }
161  } else {
162  for (i = 0; i < 3; i++) {
163  const uint8_t *ptr;
164  int x, y, h, v, linesize;
165  h = s->hsample[i];
166  v = s->vsample[i];
167  linesize = frame->linesize[i];
168 
169  for (y = 0; y < v; y++) {
170  for (x = 0; x < h; x++) {
171  int pred;
172 
173  ptr = frame->data[i] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
174  PREDICT(pred, ptr[-linesize - 1], ptr[-linesize], ptr[-1], predictor);
175 
176  if (i == 0)
177  ff_mjpeg_encode_dc(pb, *ptr - pred, s->huff_size_dc_luminance, s->huff_code_dc_luminance); //FIXME ugly
178  else
179  ff_mjpeg_encode_dc(pb, *ptr - pred, s->huff_size_dc_chrominance, s->huff_code_dc_chrominance);
180  }
181  }
182  }
183  }
184 }
185 
187  const AVFrame *frame)
188 {
189  LJpegEncContext *s = avctx->priv_data;
190  const int mb_width = (avctx->width + s->hsample[0] - 1) / s->hsample[0];
191  const int mb_height = (avctx->height + s->vsample[0] - 1) / s->vsample[0];
192  int mb_x, mb_y;
193 
194  for (mb_y = 0; mb_y < mb_height; mb_y++) {
195  if (put_bytes_left(pb, 0) <
196  mb_width * 4 * 3 * s->hsample[0] * s->vsample[0]) {
197  av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n");
198  return -1;
199  }
200 
201  for (mb_x = 0; mb_x < mb_width; mb_x++)
202  ljpeg_encode_yuv_mb(s, pb, frame, s->pred, mb_x, mb_y);
203  }
204 
205  return 0;
206 }
207 
209  const AVFrame *pict, int *got_packet)
210 {
211  LJpegEncContext *s = avctx->priv_data;
212  PutBitContext pb;
213  const int width = avctx->width;
214  const int height = avctx->height;
215  const int mb_width = (width + s->hsample[0] - 1) / s->hsample[0];
216  const int mb_height = (height + s->vsample[0] - 1) / s->vsample[0];
217  size_t max_pkt_size = AV_INPUT_BUFFER_MIN_SIZE;
218  int ret, header_bits;
219 
220  if( avctx->pix_fmt == AV_PIX_FMT_BGR0
221  || avctx->pix_fmt == AV_PIX_FMT_BGR24)
222  max_pkt_size += width * height * 3 * 4;
223  else if(avctx->pix_fmt == AV_PIX_FMT_BGRA)
224  max_pkt_size += width * height * 4 * 4;
225  else {
226  max_pkt_size += mb_width * mb_height * 3 * 4
227  * s->hsample[0] * s->vsample[0];
228  }
229 
230  if ((ret = ff_mjpeg_add_icc_profile_size(avctx, pict, &max_pkt_size)) < 0)
231  return ret;
232  if ((ret = ff_alloc_packet(avctx, pkt, max_pkt_size)) < 0)
233  return ret;
234 
235  init_put_bits(&pb, pkt->data, pkt->size);
236 
237  ff_mjpeg_encode_picture_header(avctx, &pb, pict, NULL, NULL,
238  s->pred, NULL, NULL, 0);
239 
240  header_bits = put_bits_count(&pb);
241 
242  if( avctx->pix_fmt == AV_PIX_FMT_BGR0
243  || avctx->pix_fmt == AV_PIX_FMT_BGRA
244  || avctx->pix_fmt == AV_PIX_FMT_BGR24)
245  ret = ljpeg_encode_bgr(avctx, &pb, pict);
246  else
247  ret = ljpeg_encode_yuv(avctx, &pb, pict);
248  if (ret < 0)
249  return ret;
250 
251  ff_mjpeg_escape_FF(&pb, header_bits >> 3);
252  ff_mjpeg_encode_picture_trailer(&pb, header_bits);
253 
254  flush_put_bits(&pb);
255  pkt->size = put_bits_ptr(&pb) - pb.buf;
256  *got_packet = 1;
257 
258  return 0;
259 }
260 
262 {
263  LJpegEncContext *s = avctx->priv_data;
264 
265  av_freep(&s->scratch);
266 
267  return 0;
268 }
269 
271 {
272  int ret = ff_mjpeg_encode_check_pix_fmt(avctx);
273  LJpegEncContext *s = avctx->priv_data;
274 
275  if (ret < 0)
276  return ret;
277 
278  s->scratch = av_malloc_array(avctx->width + 1, sizeof(*s->scratch));
279  if (!s->scratch)
280  return AVERROR(ENOMEM);
281 
282  ff_mjpeg_init_hvsample(avctx, s->hsample, s->vsample);
283 
284  ff_mjpeg_build_huffman_codes(s->huff_size_dc_luminance,
285  s->huff_code_dc_luminance,
288  ff_mjpeg_build_huffman_codes(s->huff_size_dc_chrominance,
289  s->huff_code_dc_chrominance,
292 
293  return 0;
294 }
295 
296 #define OFFSET(x) offsetof(LJpegEncContext, x)
297 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
298 static const AVOption options[] = {
299 { "pred", "Prediction method", OFFSET(pred), AV_OPT_TYPE_INT, { .i64 = 1 }, 1, 3, VE, "pred" },
300  { "left", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, INT_MIN, INT_MAX, VE, "pred" },
301  { "plane", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 2 }, INT_MIN, INT_MAX, VE, "pred" },
302  { "median", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 3 }, INT_MIN, INT_MAX, VE, "pred" },
303 
304  { NULL},
305 };
306 
307 static const AVClass ljpeg_class = {
308  .class_name = "ljpeg",
309  .item_name = av_default_item_name,
310  .option = options,
311  .version = LIBAVUTIL_VERSION_INT,
312 };
313 
315  .p.name = "ljpeg",
316  CODEC_LONG_NAME("Lossless JPEG"),
317  .p.type = AVMEDIA_TYPE_VIDEO,
318  .p.id = AV_CODEC_ID_LJPEG,
319  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS |
321  .priv_data_size = sizeof(LJpegEncContext),
322  .p.priv_class = &ljpeg_class,
323  .init = ljpeg_encode_init,
325  .close = ljpeg_encode_close,
326  .p.pix_fmts = (const enum AVPixelFormat[]){
331 };
ff_mjpeg_encode_dc
void ff_mjpeg_encode_dc(PutBitContext *pb, int val, uint8_t *huff_size, uint16_t *huff_code)
Definition: mjpegenc_common.c:468
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
jpegtables.h
mjpeg.h
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
mjpegenc_common.h
LJpegEncContext::huff_code_dc_chrominance
uint16_t huff_code_dc_chrominance[12]
Definition: ljpegenc.c:51
ljpeg_encode_init
static av_cold int ljpeg_encode_init(AVCodecContext *avctx)
Definition: ljpegenc.c:270
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
AVFrame::width
int width
Definition: frame.h:412
AVPacket::data
uint8_t * data
Definition: packet.h:491
AVOption
AVOption.
Definition: opt.h:251
encode.h
LJpegEncContext
Definition: ljpegenc.c:44
ff_mjpeg_val_dc
const uint8_t ff_mjpeg_val_dc[]
Definition: jpegtabs.h:34
FFCodec
Definition: codec_internal.h:127
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
ljpeg_encode_yuv
static int ljpeg_encode_yuv(AVCodecContext *avctx, PutBitContext *pb, const AVFrame *frame)
Definition: ljpegenc.c:186
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
LJpegEncContext::huff_code_dc_luminance
uint16_t huff_code_dc_luminance[12]
Definition: ljpegenc.c:50
FF_CODEC_ENCODE_CB
#define FF_CODEC_ENCODE_CB(func)
Definition: codec_internal.h:315
ljpeg_encode_yuv_mb
static void ljpeg_encode_yuv_mb(LJpegEncContext *s, PutBitContext *pb, const AVFrame *frame, int predictor, int mb_x, int mb_y)
Definition: ljpegenc.c:121
put_bytes_left
static int put_bytes_left(const PutBitContext *s, int round_up)
Definition: put_bits.h:135
LJpegEncContext::huff_size_dc_luminance
uint8_t huff_size_dc_luminance[12]
Definition: ljpegenc.c:52
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:198
ljpeg_encode_bgr
static int ljpeg_encode_bgr(AVCodecContext *avctx, PutBitContext *pb, const AVFrame *frame)
Definition: ljpegenc.c:59
AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
Definition: codec.h:159
AV_INPUT_BUFFER_MIN_SIZE
#define AV_INPUT_BUFFER_MIN_SIZE
Definition: avcodec.h:195
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
PutBitContext
Definition: put_bits.h:50
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
frame
static AVFrame * frame
Definition: demux_decode.c:54
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:110
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
ff_mjpeg_encode_picture_header
void ff_mjpeg_encode_picture_header(AVCodecContext *avctx, PutBitContext *pb, const AVFrame *frame, struct MJpegContext *m, const uint8_t intra_matrix_permutation[64], int pred, uint16_t luma_intra_matrix[64], uint16_t chroma_intra_matrix[64], int use_slices)
Definition: mjpegenc_common.c:276
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
PutBitContext::buf
uint8_t * buf
Definition: put_bits.h:53
NULL
#define NULL
Definition: coverity.c:32
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
ljpeg_encode_frame
static int ljpeg_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet)
Definition: ljpegenc.c:208
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
ljpeg_encode_close
static av_cold int ljpeg_encode_close(AVCodecContext *avctx)
Definition: ljpegenc.c:261
ljpeg_class
static const AVClass ljpeg_class
Definition: ljpegenc.c:307
AV_PIX_FMT_BGR0
@ AV_PIX_FMT_BGR0
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
Definition: pixfmt.h:258
LJpegEncContext::vsample
int vsample[4]
Definition: ljpegenc.c:47
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:492
options
static const AVOption options[]
Definition: ljpegenc.c:298
OFFSET
#define OFFSET(x)
Definition: ljpegenc.c:296
ff_ljpeg_encoder
const FFCodec ff_ljpeg_encoder
Definition: ljpegenc.c:314
codec_internal.h
LJpegEncContext::hsample
int hsample[4]
Definition: ljpegenc.c:48
frame.h
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:164
height
#define height
VE
#define VE
Definition: ljpegenc.c:297
predictor
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:170
ff_mjpeg_add_icc_profile_size
int ff_mjpeg_add_icc_profile_size(AVCodecContext *avctx, const AVFrame *frame, size_t *max_pkt_size)
Definition: mjpegenc_common.c:138
ff_mjpeg_build_huffman_codes
void ff_mjpeg_build_huffman_codes(uint8_t *huff_size, uint16_t *huff_code, const uint8_t *bits_table, const uint8_t *val_table)
Definition: mjpegenc_common.c:441
LJpegEncContext::huff_size_dc_chrominance
uint8_t huff_size_dc_chrominance[12]
Definition: ljpegenc.c:53
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:80
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
ff_mjpeg_encode_check_pix_fmt
int ff_mjpeg_encode_check_pix_fmt(AVCodecContext *avctx)
Definition: mjpegenc_common.c:490
AVCodecContext::height
int height
Definition: avcodec.h:621
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:658
LJpegEncContext::scratch
uint16_t(* scratch)[4]
Definition: ljpegenc.c:55
avcodec.h
LJpegEncContext::pred
int pred
Definition: ljpegenc.c:56
ret
ret
Definition: filter_design.txt:187
pred
static const float pred[4]
Definition: siprdata.h:259
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AVCodecContext
main external API structure.
Definition: avcodec.h:441
AVFrame::height
int height
Definition: frame.h:412
put_bits_ptr
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:377
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
ff_mjpeg_escape_FF
void ff_mjpeg_escape_FF(PutBitContext *pb, int start)
Definition: mjpegenc_common.c:385
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
ff_mjpeg_bits_dc_chrominance
const uint8_t ff_mjpeg_bits_dc_chrominance[]
Definition: jpegtabs.h:37
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
mem.h
ff_mjpeg_init_hvsample
void ff_mjpeg_init_hvsample(AVCodecContext *avctx, int hsample[4], int vsample[4])
Definition: mjpegenc_common.c:250
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:143
ff_mjpeg_bits_dc_luminance
const FF_VISIBILITY_PUSH_HIDDEN uint8_t ff_mjpeg_bits_dc_luminance[]
Definition: jpegtabs.h:32
AVPacket
This structure stores compressed data.
Definition: packet.h:468
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:468
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:621
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:385
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
ff_mjpeg_encode_picture_trailer
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
Definition: mjpegenc_common.c:461
h
h
Definition: vp9dsp_template.c:2038
PREDICT
#define PREDICT(ret, topleft, top, left, predictor)
Definition: mjpeg.h:118
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
AV_CODEC_ID_LJPEG
@ AV_CODEC_ID_LJPEG
Definition: codec_id.h:61
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:61