47 bytestream2_put_byteu(pb, 0xff);
48 bytestream2_put_byteu(pb, code);
53 bytestream2_put_byte(pb, 0xff);
54 bytestream2_put_byte(pb, code);
67 for (k = 0; (state->
N[Q] << k) < state->
A[Q]; k++)
70 map = !state->
near && !k && (2 * state->
B[Q] <= -state->
N[Q]);
74 if (err >= (state->
range + 1 >> 1)) {
89 int RItype,
int err,
int limit_add)
98 temp += state->
N[Q] >> 1;
99 for (k = 0; (state->
N[Q] << k) < temp; k++)
102 if (!k && err && (2 * state->
B[Q] < state->
N[Q]))
106 val = -(2 * err) - 1 - RItype + map;
108 val = 2 * err - RItype -
map;
113 state->
A[Q] += (val + 1 - RItype) >> 1;
144 void *
tmp,
const void *
in,
int last2,
int w,
148 int Ra =
R(tmp, 0), Rb, Rc = last2, Rd;
156 Rd = (x >= w -
stride) ?
R(tmp, x) :
R(tmp, x + stride);
165 int RUNval, RItype,
run;
169 while (x < w && (
FFABS(
R(in, x) - RUNval) <= state->
near)) {
179 pred = RItype ? Ra : Rb;
180 err =
R(in, x) -
pred;
182 if (!RItype && Ra > Rb)
191 if (RItype || (Rb >= Ra))
201 if (err >= state->
range + 1 >> 1)
215 pred =
mid_pred(Ra, Ra + Rb - Rc, Rb);
221 err = pred -
R(in, x);
225 err =
R(in, x) -
pred;
255 if (state->
T1 == state2.
T1 &&
256 state->
T2 == state2.
T2 &&
257 state->
T3 == state2.
T3 &&
262 bytestream2_put_be16u(pb, 13);
263 bytestream2_put_byteu(pb, 1);
264 bytestream2_put_be16u(pb, state->
maxval);
265 bytestream2_put_be16u(pb, state->
T1);
266 bytestream2_put_be16u(pb, state->
T2);
267 bytestream2_put_be16u(pb, state->
T3);
268 bytestream2_put_be16u(pb, state->
reset);
272 const AVFrame *pict,
int *got_packet)
285 #if FF_API_PRIVATE_OPT 313 bytestream2_put_be16u(&pb, 8 + comps * 3);
315 bytestream2_put_be16u(&pb, avctx->
height);
316 bytestream2_put_be16u(&pb, avctx->
width);
317 bytestream2_put_byteu(&pb, comps);
318 for (i = 1; i <= comps; i++) {
319 bytestream2_put_byteu(&pb, i);
320 bytestream2_put_byteu(&pb, 0x11);
321 bytestream2_put_byteu(&pb, 0);
325 bytestream2_put_be16u(&pb, 6 + comps * 2);
326 bytestream2_put_byteu(&pb, comps);
327 for (i = 1; i <= comps; i++) {
328 bytestream2_put_byteu(&pb, i);
329 bytestream2_put_byteu(&pb, 0);
331 bytestream2_put_byteu(&pb, ctx->
pred);
332 bytestream2_put_byteu(&pb, (comps > 1) ? 1 : 0);
333 bytestream2_put_byteu(&pb, 0);
347 for (i = 0; i < avctx->
height; i++) {
356 for (i = 0; i < avctx->
height; i++) {
357 int last0 = *((uint16_t *)last);
364 int Rc[3] = { 0, 0, 0 };
366 width = avctx->
width * 3;
367 for (i = 0; i < avctx->
height; i++) {
368 for (j = 0; j < 3; j++) {
378 int Rc[3] = { 0, 0, 0 };
380 width = avctx->
width * 3;
381 for (i = 0; i < avctx->
height; i++) {
382 for (j = 2; j >= 0; j--) {
404 bytestream2_put_byte(&pb, v);
407 bytestream2_put_byte(&pb, v);
425 #if FF_API_CODED_FRAME 435 #define OFFSET(x) offsetof(JPEGLSContext, x) 436 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM 459 .priv_class = &jpegls_class,
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
const uint8_t ff_log2_run[41]
static void put_marker_byte(PutByteContext *pb, enum JpegMarker code)
This structure describes decoded (raw) audio or video data.
AVCodec ff_jpegls_encoder
static void ls_store_lse(JLSState *state, PutByteContext *pb)
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
#define LIBAVUTIL_VERSION_INT
packed RGB 8:8:8, 24bpp, RGBRGB...
static void ls_encode_regular(JLSState *state, PutBitContext *pb, int Q, int err)
Encode error from regular symbol.
static av_cold int init(AVCodecContext *avctx)
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
const char * av_default_item_name(void *ptr)
Return the context name.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
MJPEG encoder and decoder.
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
static const AVOption options[]
static int get_bits_count(const GetBitContext *s)
bitstream reader API header.
#define AV_INPUT_BUFFER_MIN_SIZE
minimum encoding buffer size Used to avoid some checks during header writing.
static void ls_encode_run(JLSState *state, PutBitContext *pb, int run, int comp, int trail)
Encode run value as specified by JPEG-LS standard.
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
static void ls_encode_line(JLSState *state, PutBitContext *pb, void *tmp, const void *in, int last2, int w, int stride, int comp, int bits)
Encode one line of image.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
void ff_jpegls_reset_coding_parameters(JLSState *s, int reset_all)
Calculate JPEG-LS codec values.
const char * name
Name of the codec implementation.
static int ff_jpegls_update_state_regular(JLSState *state, int Q, int err)
static int encode_picture_ls(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet)
static av_always_inline int bytestream2_tell_p(PutByteContext *p)
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
int flags
A combination of AV_PKT_FLAG values.
static int put_bits_count(PutBitContext *s)
enum AVPictureType pict_type
Picture type of the frame.
#define AV_PIX_FMT_GRAY16
int width
picture width / height.
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
static void ls_encode_runterm(JLSState *state, PutBitContext *pb, int RItype, int err, int limit_add)
Encode error from run termination.
packed RGB 8:8:8, 24bpp, BGRBGR...
static void set_ur_golomb_jpegls(PutBitContext *pb, int i, int k, int limit, int esc_len)
write unsigned golomb rice code (jpegls).
static void put_marker_byteu(PutByteContext *pb, enum JpegMarker code)
static void comp(unsigned char *dst, ptrdiff_t dst_stride, unsigned char *src, ptrdiff_t src_stride, int add)
Libavcodec external API header.
attribute_deprecated int prediction_method
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
main external API structure.
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Describe the class of an AVClass context structure.
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
static const AVClass jpegls_class
const VDPAUPixFmtMap * map
static av_cold int encode_init_ls(AVCodecContext *ctx)
static enum AVPixelFormat pix_fmts[]
JPEG-LS extension parameters.
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
GLint GLenum GLboolean GLsizei stride
#define FF_DISABLE_DEPRECATION_WARNINGS
common internal api header.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
attribute_deprecated AVFrame * coded_frame
the picture in the bitstream
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
#define FF_ENABLE_DEPRECATION_WARNINGS
static int ff_jpegls_quantize(JLSState *s, int v)
Calculate quantized gradient value, used for context determination.
int key_frame
1 -> keyframe, 0-> not
void ff_jpegls_init_state(JLSState *state)
Calculate initial JPEG-LS parameters.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your local context
AVPixelFormat
Pixel format.
static double val(void *priv, double ch)
This structure stores compressed data.
static void ff_jpegls_downscale_state(JLSState *state, int Q)