Go to the documentation of this file.
29 #ifndef AVCODEC_MJPEGDEC_H
30 #define AVCODEC_MJPEGDEC_H
45 #define MAX_COMPONENTS 4
173 const uint8_t *val_table,
int is_ac,
void *logctx);
181 const uint8_t *mb_bitmask,
int mb_bitmask_size,
184 const uint8_t **buf_ptr,
const uint8_t *buf_end,
185 const uint8_t **unescaped_buf_ptr,
int *unescaped_buf_size);
AVPixelFormat
Pixel format.
int h_scount[MAX_COMPONENTS]
uint16_t quant_matrixes[4][64]
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
int nb_blocks[MAX_COMPONENTS]
int v_count[MAX_COMPONENTS]
int block_stride[MAX_COMPONENTS]
This structure describes decoded (raw) audio or video data.
uint8_t * last_nnz[MAX_COMPONENTS]
struct JLSState * jls_state
const AVPixFmtDescriptor * pix_desc
!< stereoscopic information (cached, since it is read before frame allocation)
int ff_mjpeg_find_marker(MJpegDecodeContext *s, const uint8_t **buf_ptr, const uint8_t *buf_end, const uint8_t **unescaped_buf_ptr, int *unescaped_buf_size)
int h_count[MAX_COMPONENTS]
int dc_index[MAX_COMPONENTS]
int ff_mjpeg_decode_end(AVCodecContext *avctx)
int ff_sp5x_process_packet(AVCodecContext *avctx, AVPacket *avpkt)
int comp_index[MAX_COMPONENTS]
int component_id[MAX_COMPONENTS]
const uint8_t * raw_image_buffer
uint16_t(* ljpeg_buffer)[4]
int quant_sindex[MAX_COMPONENTS]
unsigned int ljpeg_buffer_size
Describe the class of an AVClass context structure.
enum AVPixelFormat hwaccel_pix_fmt
AVDictionary * exif_metadata
int16_t(*[MAX_COMPONENTS] blocks)[64]
intermediate sums (progressive mode)
int last_dc[MAX_COMPONENTS]
int near
near lossless bound (si 0 for lossless)
int ff_mjpeg_decode_init(AVCodecContext *avctx)
size_t raw_scan_buffer_size
int ac_index[MAX_COMPONENTS]
int reset
context halfing interval ?rename
int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
int ff_mjpeg_receive_frame(AVCodecContext *avctx, AVFrame *frame)
#define DECLARE_ALIGNED(n, t, v)
int v_scount[MAX_COMPONENTS]
int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
int qscale[4]
quantizer scale calculated from quant_matrixes
uint8_t raw_huffman_values[2][4][256]
const uint8_t * raw_scan_buffer
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
main external API structure.
int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
int got_picture
we found a SOF and picture is valid, too.
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
int ff_mjpeg_build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int is_ac, void *logctx)
This structure stores compressed data.
uint8_t raw_huffman_lengths[2][4][16]
size_t raw_image_buffer_size
uint64_t coefs_finished[MAX_COMPONENTS]
bitmask of which coefs have been completely decoded (progressive mode)
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
int linesize[MAX_COMPONENTS]
linesize << interlaced
enum AVPixelFormat hwaccel_sw_pix_fmt
void * hwaccel_picture_private