40 #define ALPHA_COMPAND_DC_OFFSET 256 41 #define ALPHA_COMPAND_GAIN 9400 49 for (
int i = 0;
i < 64;
i++) {
67 for (
int i = 0;
i < 256;
i++)
68 s->
lut[1][
i] =
i + ((768LL *
i *
i *
i) / (256 * 256 * 256));
111 if (codebook == 0 || codebook == 1) {
114 return level * quantisation;
121 for (i = 0; i <
height; i++) {
122 for (j = 1; j <
width; j++) {
123 band[j] += band[j-1];
132 for (i = 0; i <
length; i++)
134 band[
i] = bytestream2_get_le16(&peak->
base);
140 for (i = 0; i <
width; i++) {
153 const int linesize = frame->
linesize[0];
154 uint16_t *
r = (uint16_t *)frame->
data[0];
155 uint16_t *g1 = (uint16_t *)(frame->
data[0] + 2);
156 uint16_t *g2 = (uint16_t *)(frame->
data[0] + frame->
linesize[0]);
157 uint16_t *
b = (uint16_t *)(frame->
data[0] + frame->
linesize[0] + 2);
158 const int mid = 1 << (bpc - 1);
159 const int factor = 1 << (16 - bpc);
161 for (
int y = 0; y < frame->
height >> 1; y++) {
162 for (
int x = 0; x < frame->
width; x += 2) {
172 R = (rg - mid) * 2 + g;
175 B = (bg - mid) * 2 + g;
196 int width,
int linesize,
int plane)
200 for (i = 0; i <
width; i++) {
201 even = (low[
i] - high[
i])/2;
202 odd = (low[
i] + high[
i])/2;
211 int even = (low[
i] - high[
i]) / 2;
212 int odd = (low[
i] + high[
i]) / 2;
232 for (j = 0; j < 10; j++)
250 int chroma_x_shift, chroma_y_shift;
261 &chroma_y_shift)) < 0)
271 for (i = 0; i <
planes; i++) {
272 int w8, h8, w4, h4, w2, h2;
277 if (chroma_y_shift && !bayer)
341 int t = j < 1 ? 0 : (j < 3 ? 1 : 2);
363 s->
plane[
i].
l_h[9] = frame2 + 2 * w2 * h2;
383 int ret = 0,
i, j, plane, got_buffer = 0;
393 uint16_t tagu = bytestream2_get_be16(&gb);
394 int16_t
tag = (int16_t)tagu;
395 int8_t tag8 = (int8_t)(tagu >> 8);
396 uint16_t abstag =
abs(tag);
397 int8_t abs_tag8 =
abs(tag8);
398 uint16_t data = bytestream2_get_be16(&gb);
399 if (abs_tag8 >= 0x60 && abs_tag8 <= 0x6f) {
415 }
else if (abstag ==
Version) {
481 for (
i = 0;
i < 8;
i++)
485 if (!data || data > 5) {
507 }
else if (data == 1) {
518 }
else if (abstag >= 0x4000 && abstag <= 0x40ff) {
519 if (abstag == 0x4001)
521 av_log(avctx,
AV_LOG_DEBUG,
"Small chunk length %d %s\n", data * 4, tag < 0 ?
"optional" :
"required");
534 uint32_t
offset = bytestream2_get_be32(&gb);
575 if (data >= 100 && data <= 105) {
577 }
else if (data >= 122 && data <= 128) {
579 }
else if (data == 30) {
592 if (!(data == 10 || data == 12)) {
602 }
else if (data == 2) {
604 }
else if (data == 3) {
606 }
else if (data == 4) {
680 if (avctx->
height < height)
712 int lowpass_height, lowpass_width, lowpass_a_height, lowpass_a_width;
724 if (lowpass_width < 3 ||
725 lowpass_width > lowpass_a_width) {
731 if (lowpass_height < 3 ||
732 lowpass_height > lowpass_a_height) {
744 if (lowpass_height > lowpass_a_height || lowpass_width > lowpass_a_width ||
752 for (
i = 0;
i < lowpass_height;
i++) {
753 for (j = 0; j < lowpass_width; j++)
754 coeff_data[j] = bytestream2_get_be16u(&gb);
756 coeff_data += lowpass_width;
763 if (lowpass_height & 1) {
764 memcpy(&coeff_data[lowpass_height * lowpass_width],
765 &coeff_data[(lowpass_height - 1) * lowpass_width],
766 lowpass_width *
sizeof(*coeff_data));
771 av_log(avctx,
AV_LOG_DEBUG,
"Lowpass coefficients %d\n", lowpass_width * lowpass_height);
776 int highpass_height, highpass_width, highpass_a_width, highpass_a_height, highpass_stride, a_expected;
779 int count = 0, bytes;
791 a_expected = highpass_a_height * highpass_a_width;
799 if (highpass_height > highpass_a_height || highpass_width > highpass_a_width || a_expected < highpass_height * (uint64_t)highpass_stride) {
804 expected = highpass_height * highpass_stride;
830 if (count > expected)
840 for (
i = 0;
i <
run;
i++) {
841 *coeff_data |= coeff * 256;
846 *coeff_data++ = coeff;
856 if (level == 255 && run == 2)
861 if (count > expected)
871 for (
i = 0;
i <
run;
i++) {
872 *coeff_data |= coeff * 256;
877 *coeff_data++ = coeff;
884 if (count > expected) {
902 av_log(avctx,
AV_LOG_DEBUG,
"End subband coeffs %i extra %i\n", count, count - expected);
932 for (plane = 0; plane < s->
planes; plane++) {
937 if (level == 2 || level == 5)
939 for (o = !!level; o < 4 ; o++) {
949 for (plane = 0; plane < s->
planes && !
ret; plane++) {
955 int act_plane = plane == 1 ? 2 : plane == 2 ? 1 : plane;
956 ptrdiff_t dst_linesize;
957 int16_t *low, *high, *
output, *dst;
961 dst_linesize = pic->
linesize[act_plane];
963 dst_linesize = pic->
linesize[act_plane] / 2;
968 lowpass_width < 3 || lowpass_height < 3) {
974 av_log(avctx,
AV_LOG_DEBUG,
"Decoding level 1 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
979 dsp->
vert_filter(output, output_stride, low, lowpass_width, high, highpass_stride, lowpass_width, lowpass_height);
985 dsp->
vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
990 dsp->
horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
993 for (
i = 0;
i < lowpass_height * 2;
i++) {
994 for (j = 0; j < lowpass_width * 2; j++)
997 output += output_stride * 2;
1009 lowpass_width < 3 || lowpass_height < 3) {
1015 av_log(avctx,
AV_LOG_DEBUG,
"Level 2 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
1020 dsp->
vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1025 dsp->
vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1030 dsp->
horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
1033 for (
i = 0;
i < lowpass_height * 2;
i++) {
1034 for (j = 0; j < lowpass_width * 2; j++)
1037 output += output_stride * 2;
1048 lowpass_height < 3 || lowpass_width < 3 || lowpass_width * 2 > s->
plane[plane].
width) {
1054 av_log(avctx,
AV_LOG_DEBUG,
"Level 3 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
1059 dsp->
vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1064 dsp->
vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1066 dst = (int16_t *)pic->
data[act_plane];
1071 dst += pic->
linesize[act_plane] >> 1;
1088 low += output_stride;
1089 high += output_stride;
1090 dst += dst_linesize;
1098 dsp->
horiz_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1103 dsp->
horiz_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1105 dst = (int16_t *)pic->
data[act_plane];
1110 low += output_stride * 2;
1111 high += output_stride * 2;
1117 for (plane = 0; plane < s->
planes && !
ret; plane++) {
1122 int act_plane = plane == 1 ? 2 : plane == 2 ? 1 : plane;
1123 int16_t *low, *high, *
output, *dst;
1124 ptrdiff_t dst_linesize;
1128 dst_linesize = pic->
linesize[act_plane];
1130 dst_linesize = pic->
linesize[act_plane] / 2;
1135 lowpass_width < 3 || lowpass_height < 3) {
1141 av_log(avctx,
AV_LOG_DEBUG,
"Decoding level 1 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
1146 dsp->
vert_filter(output, output_stride, low, lowpass_width, high, highpass_stride, lowpass_width, lowpass_height);
1151 dsp->
vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1156 dsp->
horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
1159 for (
i = 0;
i < lowpass_height * 2;
i++) {
1160 for (j = 0; j < lowpass_width * 2; j++)
1163 output += output_stride * 2;
1174 lowpass_width < 3 || lowpass_height < 3) {
1180 av_log(avctx,
AV_LOG_DEBUG,
"Level 2 lowpass plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
1185 dsp->
vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1190 dsp->
vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1195 dsp->
horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
1198 for (
i = 0;
i < lowpass_height * 2;
i++) {
1199 for (j = 0; j < lowpass_width * 2; j++)
1201 output += output_stride * 2;
1207 dsp->
vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1212 dsp->
vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1217 dsp->
horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
1223 av_log(avctx,
AV_LOG_DEBUG,
"temporal level %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
1227 lowpass_width < 3 || lowpass_height < 3) {
1236 for (
i = 0;
i < lowpass_height;
i++) {
1238 low += output_stride;
1239 high += output_stride;
1245 dsp->
vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1250 dsp->
vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1255 dsp->
vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1260 dsp->
vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1265 dst = (int16_t *)pic->
data[act_plane];
1270 dst += pic->
linesize[act_plane] >> 1;
1285 low += output_stride;
1286 high += output_stride;
1287 dst += dst_linesize;
1294 dsp->
horiz_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1299 dsp->
horiz_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1304 dsp->
horiz_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1309 dsp->
horiz_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1314 dst = (int16_t *)pic->
data[act_plane];
1319 low += output_stride * 2;
1320 high += output_stride * 2;
1328 int16_t *low, *high, *dst;
1329 int output_stride, lowpass_height, lowpass_width;
1330 ptrdiff_t dst_linesize;
1332 for (plane = 0; plane < s->
planes; plane++) {
1333 int act_plane = plane == 1 ? 2 : plane == 2 ? 1 : plane;
1337 dst_linesize = pic->
linesize[act_plane];
1339 dst_linesize = pic->
linesize[act_plane] / 2;
1348 lowpass_width < 3 || lowpass_height < 3) {
1355 dst = (int16_t *)pic->
data[act_plane];
1363 dst += pic->
linesize[act_plane] >> 1;
1376 low += output_stride;
1377 high += output_stride;
1378 dst += dst_linesize;
1381 dst = (int16_t *)pic->
data[act_plane];
1386 low += output_stride * 2;
1387 high += output_stride * 2;
1451 for (
int plane = 0; plane < pdst->
planes; plane++) {
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
static const unsigned codebook[256][2]
This structure describes decoded (raw) audio or video data.
ptrdiff_t const GLvoid * data
int coded_width
Bitstream width / height, may be different from width/height e.g.
static void peak_table(int16_t *band, Peak *peak, int length)
#define AV_LOG_WARNING
Something somehow does not look correct.
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
static av_cold int init(AVCodecContext *avctx)
#define ALPHA_COMPAND_GAIN
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
#define FF_ARRAY_ELEMS(a)
void(* vert_filter)(int16_t *output, ptrdiff_t out_stride, const int16_t *low, ptrdiff_t low_stride, const int16_t *high, ptrdiff_t high_stride, int width, int height)
static void inverse_temporal_filter(int16_t *low, int16_t *high, int width)
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
static const struct @322 planes[]
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
static void interlaced_vertical_filter(int16_t *output, int16_t *low, int16_t *high, int width, int linesize, int plane)
Macro definitions for various function/variable attributes.
int16_t * subband[SUBBAND_COUNT_3D]
#define av_assert0(cond)
assert() equivalent, that is always enabled.
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
void(* horiz_filter_clip)(int16_t *output, const int16_t *low, const int16_t *high, int width, int bpc)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Multithreading support functions.
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
static int alloc_buffers(AVCodecContext *avctx)
static int get_bits_count(const GetBitContext *s)
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
bitstream reader API header.
int interlaced_frame
The content of the picture is interlaced.
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
CFHD_RL_VLC_ELEM table_18_rl_vlc[4572]
CFHD_RL_VLC_ELEM table_9_rl_vlc[2088]
FrameType
G723.1 frame types.
#define UPDATE_CACHE(name, gb)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
SubBand band[DWT_LEVELS_3D][4]
int is_copy
Whether the parent AVCodecContext is a copy of the context which had init() called on it...
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define AV_PIX_FMT_GBRAP12
const char * name
Name of the codec implementation.
#define CLOSE_READER(name, gb)
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
#define GET_RL_VLC(level, run, name, gb, table, bits,max_depth, need_update)
static av_cold int cfhd_close(AVCodecContext *avctx)
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
static void init_frame_defaults(CFHDContext *s)
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames.The frames must then be freed with ff_thread_release_buffer().Otherwise decode directly into the user-supplied frames.Call ff_thread_report_progress() after some part of the current picture has decoded.A good place to put this is where draw_horiz_band() is called-add this if it isn't called anywhere
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards.If some code can't be moved
static void init_plane_defaults(CFHDContext *s)
uint8_t prescale_table[8]
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static av_cold int cfhd_init(AVCodecContext *avctx)
static void process_bayer(AVFrame *frame, int bpc)
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
static av_always_inline int bytestream2_tell(GetByteContext *g)
Libavcodec external API header.
#define ALPHA_COMPAND_DC_OFFSET
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
static const int16_t alpha[]
main external API structure.
#define OPEN_READER(name, gb)
av_cold void ff_cfhddsp_init(CFHDDSPContext *c, int depth, int bayer)
static void difference_coding(int16_t *band, int width, int height)
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread.Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities.There will be very little speed gain at this point but it should work.If there are inter-frame dependencies
enum AVPixelFormat coded_format
refcounted data buffer API
static const int factor[16]
static int dequant_and_decompand(CFHDContext *s, int level, int quantisation, int codebook)
#define AV_PIX_FMT_GBRP12
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
#define AV_PIX_FMT_YUV422P10
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
#define AV_PIX_FMT_BAYER_RGGB16
void(* horiz_filter)(int16_t *output, ptrdiff_t out_stride, const int16_t *low, ptrdiff_t low_stride, const int16_t *high, ptrdiff_t high_stride, int width, int height)
GLint GLenum GLboolean GLsizei stride
common internal api header.
common internal and external API header
static void process_alpha(int16_t *alpha, int width)
channel
Use these values when setting the channel map with ebur128_set_channel().
struct AVCodecInternal * internal
Private context used for internal data.
static void init_peak_table_defaults(CFHDContext *s)
static const double coeff[2][5]
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
static void free_buffers(CFHDContext *s)
int ff_cfhd_init_vlcs(CFHDContext *s)
#define av_malloc_array(a, b)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
uint8_t lowpass_precision
static double val(void *priv, double ch)
This structure stores compressed data.
void ff_free_vlc(VLC *vlc)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators...
#define AV_CEIL_RSHIFT(a, b)
void * av_mallocz_array(size_t nmemb, size_t size)