52 const uint8_t *val_table,
int nb_codes,
53 int use_static,
int is_ac)
56 uint16_t huff_code[256];
57 uint16_t huff_sym[256];
64 for (i = 0; i < 256; i++)
65 huff_sym[i] = i + 16 * is_ac;
68 huff_sym[0] = 16 * 256;
71 huff_code, 2, 2, huff_sym, 2, 2, use_static);
93 if (len > 14 && buf[12] == 1)
95 if (len > 14 && buf[12] == 2)
134 "error using external huffman table, switching back to internal\n");
182 for (i = 0; i < 64; i++) {
192 len -= 1 + 64 * (1+pr);
222 for (i = 1; i <= 16; i++) {
227 if (len < n || n > 256)
231 for (i = 0; i <
n; i++) {
242 class, index, code_max + 1);
243 if ((ret =
build_vlc(&s->
vlcs[
class][index], bits_table, val_table,
244 code_max + 1, 0,
class > 0)) < 0)
249 if ((ret =
build_vlc(&s->
vlcs[2][index], bits_table, val_table,
250 code_max + 1, 0, 0)) < 0)
273 if (bits > 16 || bits < 1) {
303 if (nb_components <= 0 ||
309 "nb_components changing in interlaced picture\n");
313 if (s->
ls && !(bits <= 8 || nb_components == 1)) {
315 "JPEG-LS that is not <= 8 "
316 "bits/component or 16-bit gray");
322 for (i = 0; i < nb_components; i++) {
328 if (h_count[i] > s->
h_max)
329 s->
h_max = h_count[i];
330 if (v_count[i] > s->
v_max)
331 s->
v_max = v_count[i];
337 if (!h_count[i] || !v_count[i]) {
339 "Invalid sampling factor in component %d %d:%d\n",
340 i, h_count[i], v_count[i]);
345 i, h_count[i], v_count[i],
357 memcmp(s->
h_count, h_count,
sizeof(h_count)) ||
358 memcmp(s->
v_count, v_count,
sizeof(v_count))) {
363 memcpy(s->
h_count, h_count,
sizeof(h_count));
364 memcpy(s->
v_count, v_count,
sizeof(v_count));
392 if (s->
v_max == 1 && s->
h_max == 1 && s->
lossless==1 && (nb_components==3 || nb_components==4))
397 pix_fmt_id = ((unsigned)s->
h_count[0] << 28) | (s->
v_count[0] << 24) |
404 if (!(pix_fmt_id & 0xD0D0D0D0))
405 pix_fmt_id -= (pix_fmt_id & 0xF0F0F0F0) >> 1;
406 if (!(pix_fmt_id & 0x0D0D0D0D))
407 pix_fmt_id -= (pix_fmt_id & 0x0F0F0F0F) >> 1;
409 for (i = 0; i < 8; i++) {
410 int j = 6 + (i&1) - (i&6);
411 int is = (pix_fmt_id >> (4*i)) & 0xF;
412 int js = (pix_fmt_id >> (4*j)) & 0xF;
414 if (is == 1 && js != 2 && (i < 2 || i > 5))
415 js = (pix_fmt_id >> ( 8 + 4*(i&1))) & 0xF;
416 if (is == 1 && js != 2 && (i < 2 || i > 5))
417 js = (pix_fmt_id >> (16 + 4*(i&1))) & 0xF;
419 if (is == 1 && js == 2) {
425 switch (pix_fmt_id) {
513 if (pix_fmt_id == 0x14111100)
553 if (pix_fmt_id == 0x42111100) {
557 }
else if (pix_fmt_id == 0x24111100) {
590 else if (s->
bits <= 8)
609 for (i = 0; i < 4; i++)
616 if (len != (8 + (3 * nb_components)))
627 int bw = (width + s->
h_max * 8 - 1) / (s->
h_max * 8);
628 int bh = (height + s->
v_max * 8 - 1) / (s->
v_max * 8);
648 if (code < 0 || code > 16) {
650 "mjpeg_decode_dc: bad vlc: %d:%d (%p)\n",
651 0, dc_index, &s->
vlcs[0][dc_index]);
663 int dc_index,
int ac_index, int16_t *quant_matrix)
669 if (val == 0xfffff) {
673 val = val * quant_matrix[0] + s->
last_dc[component];
683 i += ((unsigned)code) >> 4;
691 int sign = (~cache) >> 31;
692 level = (
NEG_USR32(sign ^ cache,code) ^ sign) - sign;
702 block[j] = level * quant_matrix[j];
711 int component,
int dc_index,
712 int16_t *quant_matrix,
int Al)
717 if (val == 0xfffff) {
721 val = (val * quant_matrix[0] << Al) + s->
last_dc[component];
729 uint8_t *last_nnz,
int ac_index,
730 int16_t *quant_matrix,
731 int ss,
int se,
int Al,
int *EOBRUN)
742 for (i = ss; ; i++) {
746 run = ((unsigned) code) >> 4;
755 int sign = (~cache) >> 31;
756 level = (
NEG_USR32(sign ^ cache,code) ^ sign) - sign;
764 block[j] = level * quant_matrix[j] << Al;
771 block[j] = level * quant_matrix[j] << Al;
800 #define REFINE_BIT(j) { \
801 UPDATE_CACHE(re, &s->gb); \
802 sign = block[j] >> 15; \
803 block[j] += SHOW_UBITS(re, &s->gb, 1) * \
804 ((quant_matrix[j] ^ sign) - sign) << Al; \
805 LAST_SKIP_BITS(re, &s->gb, 1); \
813 av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); \
818 j = s->scantable.permutated[i]; \
821 else if (run-- == 0) \
828 int ac_index, int16_t *quant_matrix,
829 int ss,
int se,
int Al,
int *EOBRUN)
831 int code, i = ss, j, sign,
val,
run;
832 int last =
FFMIN(se, *last_nnz);
843 run = ((unsigned) code) >> 4;
850 block[j] = ((quant_matrix[j]^
val) - val) << Al;
858 run = ((unsigned) code) >> 4;
879 for (; i <= last; i++) {
900 for (i = 0; i < nb_components; i++)
914 for (i = 0; i < nb_components; i++)
929 int left[4], top[4], topleft[4];
930 const int linesize = s->
linesize[0];
931 const int mask = ((1 << s->
bits) - 1) << point_transform;
947 for (i = 0; i < 4; i++)
950 for (mb_y = 0; mb_y < s->
mb_height; mb_y++) {
954 ptr += linesize >> 1;
956 for (i = 0; i < 4; i++)
957 top[i] = left[i] = topleft[i] =
buffer[0][i];
959 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
967 top[i] = left[i]= topleft[i]= 1 << (s->
bits - 1);
969 if (mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || !mb_x)
970 modified_predictor = 1;
972 for (i=0;i<nb_components;i++) {
978 PREDICT(pred, topleft[i], top[i], left[i], modified_predictor);
984 left[i] =
buffer[mb_x][i] =
985 mask & (pred + (dc << point_transform));
994 for(i=0; i<nb_components; i++) {
997 for(mb_x = 0; mb_x < s->
mb_width; mb_x++) {
998 ptr[4*mb_x+3-
c] =
buffer[mb_x][i];
1000 }
else if(s->
bits == 9) {
1003 for(mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1004 ((uint16_t*)ptr)[4*mb_x+
c] =
buffer[mb_x][i];
1008 }
else if (s->
rct) {
1009 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1010 ptr[3*mb_x + 1] =
buffer[mb_x][0] - ((
buffer[mb_x][1] +
buffer[mb_x][2] - 0x200) >> 2);
1011 ptr[3*mb_x + 0] =
buffer[mb_x][1] + ptr[3*mb_x + 1];
1012 ptr[3*mb_x + 2] =
buffer[mb_x][2] + ptr[3*mb_x + 1];
1015 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1017 ptr[3*mb_x + 0] =
buffer[mb_x][1] + ptr[3*mb_x + 1];
1018 ptr[3*mb_x + 2] =
buffer[mb_x][2] + ptr[3*mb_x + 1];
1021 for(i=0; i<nb_components; i++) {
1024 for(mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1025 ptr[3*mb_x+2-
c] =
buffer[mb_x][i];
1027 }
else if(s->
bits == 9) {
1030 for(mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1031 ((uint16_t*)ptr)[3*mb_x+2-
c] =
buffer[mb_x][i];
1041 int point_transform,
int nb_components)
1043 int i, mb_x, mb_y,
mask;
1045 int resync_mb_y = 0;
1046 int resync_mb_x = 0;
1048 point_transform += bits - s->
bits;
1049 mask = ((1 << s->
bits) - 1) << point_transform;
1051 av_assert0(nb_components>=1 && nb_components<=4);
1053 for (mb_y = 0; mb_y < s->
mb_height; mb_y++) {
1054 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1061 if(!mb_x || mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || s->
interlaced){
1062 int toprow = mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x;
1063 int leftcol = !mb_x || mb_y == resync_mb_y && mb_x == resync_mb_x;
1064 for (i = 0; i < nb_components; i++) {
1067 int n,
h,
v, x,
y,
c, j, linesize;
1076 if(bits>8) linesize /= 2;
1078 for(j=0; j<
n; j++) {
1087 if(x==0 && leftcol){
1088 pred= 1 << (bits - 1);
1093 if(x==0 && leftcol){
1094 pred= ptr[-linesize];
1096 PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1101 ptr += linesize >> 1;
1103 *ptr= pred + (dc << point_transform);
1105 ptr16 = (uint16_t*)(s->
picture_ptr->
data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x));
1107 if(x==0 && leftcol){
1108 pred= 1 << (bits - 1);
1113 if(x==0 && leftcol){
1114 pred= ptr16[-linesize];
1116 PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1121 ptr16 += linesize >> 1;
1123 *ptr16= pred + (dc << point_transform);
1132 for (i = 0; i < nb_components; i++) {
1135 int n,
h,
v, x,
y,
c, j, linesize,
dc;
1144 if(bits>8) linesize /= 2;
1146 for (j = 0; j <
n; j++) {
1154 (linesize * (v * mb_y +
y)) +
1156 PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1159 *ptr = pred + (dc << point_transform);
1161 ptr16 = (uint16_t*)(s->
picture_ptr->
data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x));
1162 PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1165 *ptr16= pred + (dc << point_transform);
1186 int linesize,
int lowres)
1191 case 1:
copy_block4(dst, src, linesize, linesize, 4);
1193 case 2:
copy_block2(dst, src, linesize, linesize, 2);
1195 case 3: *dst = *
src;
1202 int block_x, block_y;
1205 for (block_y=0; block_y<
size; block_y++)
1206 for (block_x=0; block_x<
size; block_x++)
1207 *(uint16_t*)(ptr + 2*block_x + block_y*linesize) <<= 16 - s->
bits;
1209 for (block_y=0; block_y<
size; block_y++)
1210 for (block_x=0; block_x<
size; block_x++)
1211 *(ptr + block_x + block_y*linesize) <<= 8 - s->
bits;
1216 int Al,
const uint8_t *mb_bitmask,
1217 int mb_bitmask_size,
1225 int bytes_per_pixel = 1 + (s->
bits > 8);
1237 for (i = 0; i < nb_components; i++) {
1240 reference_data[
c] = reference ? reference->
data[
c] :
NULL;
1245 for (mb_y = 0; mb_y < s->
mb_height; mb_y++) {
1246 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1257 for (i = 0; i < nb_components; i++) {
1259 int n,
h,
v, x,
y,
c, j;
1267 for (j = 0; j <
n; j++) {
1268 block_offset = (((linesize[
c] * (v * mb_y +
y) * 8) +
1269 (h * mb_x + x) * 8 * bytes_per_pixel) >> s->
avctx->
lowres);
1272 block_offset += linesize[
c] >> 1;
1273 if ( 8*(h * mb_x + x) < s->
width
1274 && 8*(v * mb_y +
y) < s->
height) {
1275 ptr = data[
c] + block_offset;
1290 "error y=%d x=%d\n", mb_y, mb_x);
1310 "error y=%d x=%d\n", mb_y, mb_x);
1314 ff_dlog(s->
avctx,
"mb: %d %d processed\n", mb_y, mb_x);
1317 (v * mb_y + y) * 8, (h * mb_x + x) * 8);
1332 int se,
int Ah,
int Al)
1341 int bytes_per_pixel = 1 + (s->
bits > 8);
1344 if (se < ss || se > 63) {
1357 data += linesize >> 1;
1361 for (mb_y = 0; mb_y < s->
mb_height; mb_y++) {
1366 for (mb_x = 0; mb_x < s->
mb_width; mb_x++,
block++, last_nnz++) {
1373 quant_matrix, ss, se, Al, &EOBRUN);
1376 quant_matrix, ss, se, Al, &EOBRUN);
1379 "error y=%d x=%d\n", mb_y, mb_x);
1397 int mb_bitmask_size,
const AVFrame *reference)
1401 const int block_size = s->
lossless ? 1 : 8;
1402 int ilv, prev_shift;
1406 "Can not process SOS before SOF, skipping\n");
1416 "decode_sos: nb_components (%d) unsupported\n", nb_components);
1419 if (len != 6 + 2 * nb_components) {
1423 for (i = 0; i < nb_components; i++) {
1432 "decode_sos: index(%d) out of components\n", index);
1448 index = (index+2)%3;
1468 prev_shift = point_transform = 0;
1470 if (nb_components > 1) {
1474 }
else if (!s->
ls) {
1477 s->
mb_width = (s->
width + h * block_size - 1) / (h * block_size);
1486 s->
lossless ?
"lossless" :
"sequential DCT", s->
rgb ?
"RGB" :
"",
1496 for (i = 0; i < nb_components; i++)
1501 if (CONFIG_JPEGLS_DECODER && s->
ls) {
1506 point_transform, ilv)) < 0)
1515 nb_components)) < 0)
1524 point_transform)) < 0)
1528 prev_shift, point_transform,
1529 mb_bitmask, mb_bitmask_size, reference)) < 0)
1613 int t_w, t_h, v1, v2;
1625 "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n",
1634 if (len -10 - (t_w * t_h * 3) > 0)
1635 len -= t_w * t_h * 3;
1657 "Pegasus lossless jpeg header found\n");
1687 if (
id ==
AV_RL32(
"colr") && len > 0) {
1694 if (
id ==
AV_RL32(
"xfrm") && len > 0) {
1722 }
else if (type == 1) {
1734 if (!(flags & 0x04)) {
1744 int ret,
le, ifd_offset, bytes_read;
1801 "mjpeg: error, decode_app parser read over the end\n");
1815 for (i = 0; i < len - 2; i++)
1817 if (i > 0 && cbuf[i - 1] ==
'\n')
1826 if (!strncmp(cbuf,
"AVID", 4)) {
1828 }
else if (!strcmp(cbuf,
"CS=ITU601"))
1830 else if ((!strncmp(cbuf,
"Intel(R) JPEG Library, version 1", 32) && s->
avctx->
codec_tag) ||
1831 (!strncmp(cbuf,
"Metasoft MJPEG Codec", 20)))
1850 buf_ptr = *pbuf_ptr;
1851 while (buf_end - buf_ptr > 1) {
1854 if ((v == 0xff) && (v2 >= 0xc0) && (v2 <= 0xfe) && buf_ptr < buf_end) {
1863 ff_dlog(
NULL,
"find_marker skipped %d bytes\n", skipped);
1864 *pbuf_ptr = buf_ptr;
1870 const uint8_t **unescaped_buf_ptr,
1871 int *unescaped_buf_size)
1881 if (start_code ==
SOS && !s->
ls) {
1885 while (src < buf_end) {
1891 while (src < buf_end && x == 0xff)
1894 if (x >= 0xd0 && x <= 0xd7)
1901 *unescaped_buf_ptr = s->
buffer;
1902 *unescaped_buf_size = dst - s->
buffer;
1903 memset(s->
buffer + *unescaped_buf_size, 0,
1907 (buf_end - *buf_ptr) - (dst - s->
buffer));
1908 }
else if (start_code ==
SOS && s->
ls) {
1916 while (src + t < buf_end) {
1919 while ((src + t < buf_end) && x == 0xff)
1946 *unescaped_buf_ptr = dst;
1947 *unescaped_buf_size = (bit_count + 7) >> 3;
1948 memset(s->
buffer + *unescaped_buf_size, 0,
1951 *unescaped_buf_ptr = *buf_ptr;
1952 *unescaped_buf_size = buf_end - *buf_ptr;
1963 int buf_size = avpkt->
size;
1965 const uint8_t *buf_end, *buf_ptr;
1966 const uint8_t *unescaped_buf_ptr;
1968 int unescaped_buf_size;
1979 buf_end = buf + buf_size;
1980 while (buf_ptr < buf_end) {
1984 &unescaped_buf_size);
1986 if (start_code < 0) {
1988 }
else if (unescaped_buf_size > INT_MAX / 8) {
1990 "MJPEG packet 0x%x too big (%d/%d), corrupt data?\n",
1991 start_code, unescaped_buf_size, buf_size);
1995 start_code, buf_end - buf_ptr);
2009 if (start_code >= 0xd0 && start_code <= 0xd7)
2011 "restart marker: %d\n", start_code & 0x0f);
2013 else if (start_code >=
APP0 && start_code <=
APP15)
2016 else if (start_code ==
COM)
2021 if (!CONFIG_JPEGLS_DECODER &&
2022 (start_code ==
SOF48 || start_code ==
LSE)) {
2027 switch (start_code) {
2072 if (!CONFIG_JPEGLS_DECODER ||
2081 "Found EOI before any SOF, ignoring\n");
2099 int qpw = (s->
width + 15) / 16;
2102 memset(qp_table_buf->
data, qp, qpw);
2131 "mjpeg: unsupported coding type (%x)\n", start_code);
2138 "marker parser used %d bytes (%d bits)\n",
2170 for (p = 0; p<4; p++) {
2183 for (i = 0; i <
h; i++) {
2185 if (is16bit) ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 2];
2186 else line[w - 1] = line[(w - 1) / 2];
2187 for (index = w - 2; index > 0; index--) {
2189 ((uint16_t*)line)[
index] = (((uint16_t*)line)[index / 2] + ((uint16_t*)line)[(index + 1) / 2]) >> 1;
2191 line[
index] = (line[index / 2] + line[(index + 1) / 2]) >> 1;
2195 ((uint16_t*)line)[w - 1] =
2196 ((uint16_t*)line)[w - 2] = ((uint16_t*)line)[(w - 1) / 3];
2199 line[w - 2] = line[(w - 1) / 3];
2201 for (index = w - 3; index > 0; index--) {
2202 line[
index] = (line[index / 3] + line[(index + 1) / 3] + line[(index + 2) / 3] + 1) / 3;
2226 for (p = 0; p < 4; p++) {
2237 for (i = h - 1; i; i--) {
2240 if (src1 == src2 || i == h - 1) {
2241 memcpy(dst, src1, w);
2243 for (index = 0; index < w; index++)
2244 dst[index] = (src1[index] + src2[index]) >> 1;
2253 for (index=0; index<4; index++) {
2257 if(index && index<3){
2263 for (i=0; i<h/2; i++) {
2265 FFSWAP(
int, dst[j], dst2[j]);
2275 for (i=0; i<
h; i++) {
2278 for (index=0; index<4; index++) {
2282 for (j=0; j<w; j++) {
2284 int r = dst[0][j] * k;
2285 int g = dst[1][j] * k;
2286 int b = dst[2][j] * k;
2287 dst[0][j] = g*257 >> 16;
2288 dst[1][j] = b*257 >> 16;
2289 dst[2][j] = r*257 >> 16;
2297 for (i=0; i<
h; i++) {
2300 for (index=0; index<4; index++) {
2304 for (j=0; j<w; j++) {
2306 int r = (255 - dst[0][j]) * k;
2307 int g = (128 - dst[1][j]) * k;
2308 int b = (128 - dst[2][j]) * k;
2309 dst[0][j] = r*257 >> 16;
2310 dst[1][j] = (g*257 >> 16) + 128;
2311 dst[2][j] = (b*257 >> 16) + 128;
2332 return buf_ptr -
buf;
2355 for (i = 0; i < 3; i++) {
2356 for (j = 0; j < 4; j++)
2373 #if CONFIG_MJPEG_DECODER
2374 #define OFFSET(x) offsetof(MJpegDecodeContext, x)
2375 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
2377 {
"extern_huff",
"Use external huffman table.",
2382 static const AVClass mjpegdec_class = {
2401 .priv_class = &mjpegdec_class,
2405 #if CONFIG_THP_DECODER
int block_stride[MAX_COMPONENTS]
const struct AVCodec * codec
const char const char void * val
const AVPixFmtDescriptor * pix_desc
!< stereoscopic information (cached, since it is read before frame allocation)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Views are packed per line, as if interlaced.
int v_count[MAX_COMPONENTS]
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
This structure describes decoded (raw) audio or video data.
ptrdiff_t const GLvoid * data
void(* clear_block)(int16_t *block)
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
#define AV_LOG_WARNING
Something somehow does not look correct.
#define LIBAVUTIL_VERSION_INT
packed RGB 8:8:8, 24bpp, RGBRGB...
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
static void skip_bits_long(GetBitContext *s, int n)
static av_cold int init(AVCodecContext *avctx)
#define AV_PIX_FMT_RGBA64
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
int h_scount[MAX_COMPONENTS]
int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
static int mjpeg_decode_com(MJpegDecodeContext *s)
enum AVColorRange color_range
MPEG vs JPEG YUV range.
int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
int qscale[4]
quantizer scale calculated from quant_matrixes
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional FF_INPUT_BUFFER_PADDING_SIZE at the end w...
int dc_index[MAX_COMPONENTS]
size_t av_get_codec_tag_string(char *buf, size_t buf_size, unsigned int codec_tag)
Put a string representing the codec tag codec_tag in buf.
int linesize[MAX_COMPONENTS]
linesize << interlaced
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
MJPEG encoder and decoder.
int comp_index[MAX_COMPONENTS]
static void copy_block2(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
#define FF_QSCALE_TYPE_MPEG1
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
#define av_assert0(cond)
assert() equivalent, that is always enabled.
void void avpriv_request_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
void ff_mjpeg_build_huffman_codes(uint8_t *huff_size, uint16_t *huff_code, const uint8_t *bits_table, const uint8_t *val_table)
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static int mjpeg_decode_dri(MJpegDecodeContext *s)
8 bit with AV_PIX_FMT_RGB32 palette
Stereo 3D type: this structure describes how two videos are packed within a single video surface...
uint16_t(* ljpeg_buffer)[4]
unsigned int ljpeg_buffer_size
int16_t quant_matrixes[4][64]
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
const uint8_t avpriv_mjpeg_bits_dc_luminance[17]
uint8_t * last_nnz[MAX_COMPONENTS]
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
int quant_sindex[MAX_COMPONENTS]
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range...
static int get_bits_count(const GetBitContext *s)
int h_count[MAX_COMPONENTS]
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
#define AV_PIX_FMT_YUV444P16
int interlaced_frame
The content of the picture is interlaced.
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
#define AV_PIX_FMT_YUVA420P16
const OptionDef options[]
void av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
#define PREDICT(ret, topleft, top, left, predictor)
static void predictor(uint8_t *src, int size)
static int get_bits_left(GetBitContext *gb)
int ff_mjpeg_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
AVDictionary * exif_metadata
#define UPDATE_CACHE(name, gb)
int width
width and height of the video frame
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int flags
Additional information about the frame packing.
static int handle_rstn(MJpegDecodeContext *s, int nb_components)
static const uint16_t mask[17]
#define PTRDIFF_SPECIFIER
#define AV_EF_EXPLODE
abort decoding on minor error detection
int nb_blocks[MAX_COMPONENTS]
const uint8_t avpriv_mjpeg_bits_dc_chrominance[17]
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
#define AV_PIX_FMT_YUVA444P16
simple assert() macros that are a bit more flexible than ISO C assert().
const char * name
Name of the codec implementation.
static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
int ff_jpegls_decode_lse(MJpegDecodeContext *s)
Decode LSE block with initialization parameters.
static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
Video is not stereoscopic (and metadata has to be there).
#define CLOSE_READER(name, gb)
Libavcodec external API header.
static int build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int nb_codes, int use_static, int is_ac)
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
static int decode_dc_progressive(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, int16_t *quant_matrix, int Al)
#define FF_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
#define FF_DEBUG_STARTCODE
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
static av_always_inline void mjpeg_copy_block(MJpegDecodeContext *s, uint8_t *dst, const uint8_t *src, int linesize, int lowres)
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
enum AVPictureType pict_type
Picture type of the frame.
#define AV_PIX_FMT_GBRP16
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
#define AV_PIX_FMT_GRAY16
static int decode_block(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, int ac_index, int16_t *quant_matrix)
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
int component_id[MAX_COMPONENTS]
static int mjpeg_decode_app(MJpegDecodeContext *s)
#define FF_CEIL_RSHIFT(a, b)
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
#define LAST_SKIP_BITS(name, gb, num)
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
int v_scount[MAX_COMPONENTS]
#define GET_VLC(code, name, gb, table, bits, max_depth)
If the vlc code is invalid and max_depth=1, then no bits will be removed.
uint8_t idct_permutation[64]
IDCT input permutation.
packed RGB 8:8:8, 24bpp, BGRBGR...
void avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
#define SHOW_UBITS(name, gb, num)
static void flush(AVCodecContext *avctx)
the normal 2^n-1 "JPEG" YUV ranges
static const float pred[4]
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
#define AV_PIX_FMT_YUV420P16
static av_always_inline int bytestream2_tell(GetByteContext *g)
void(* idct_put)(uint8_t *dest, int line_size, int16_t *block)
block -> idct -> clip to unsigned 8 bit -> dest.
enum AVStereo3DType type
How views are packed within the video.
#define AV_LOG_INFO
Standard information.
AVDictionary ** avpriv_frame_get_metadatap(AVFrame *frame)
AVBufferRef * av_buffer_alloc(int size)
Allocate an AVBuffer of the given size using av_malloc().
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
main external API structure.
uint8_t * data
The data buffer.
static int get_xbits(GetBitContext *s, int n)
read mpeg1 dc style vlc (sign bit + mantissa with no MSB).
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
#define OPEN_READER(name, gb)
int avpriv_exif_decode_ifd(AVCodecContext *avctx, GetByteContext *gbytes, int le, int depth, AVDictionary **metadata)
Recursively decodes all IFD's and adds included TAGS into the metadata dictionary.
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
const uint8_t avpriv_mjpeg_val_dc[12]
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
static unsigned int get_bits1(GetBitContext *s)
BYTE int const BYTE int int int height
int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transform, int ilv)
Describe the class of an AVClass context structure.
static void skip_bits(GetBitContext *s, int n)
static int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
int ac_index[MAX_COMPONENTS]
enum AVColorSpace colorspace
YUV colorspace type.
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
#define GET_CACHE(name, gb)
uint16_t step_minus1
Number of elements between 2 horizontally consecutive pixels minus 1.
const uint8_t ff_zigzag_direct[64]
uint64_t coefs_finished[MAX_COMPONENTS]
bitmask of which coefs have been completely decoded (progressive mode)
#define AV_PIX_FMT_GBR24P
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
const uint8_t avpriv_mjpeg_bits_ac_chrominance[17]
static const uint8_t start_code[]
Views are on top of each other.
static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int se, int Ah, int Al)
const uint8_t avpriv_mjpeg_val_ac_chrominance[]
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
JPEG-LS extension parameters.
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
#define FF_DEBUG_PICT_INFO
int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
the normal 219*2^(n-8) "MPEG" YUV ranges
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
Views are next to each other.
static int decode(AVCodecContext *avctx, void *data, int *got_sub, AVPacket *avpkt)
static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point_transform, int nb_components)
A reference to a data buffer.
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
common internal api header.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
static void build_basic_mjpeg_vlc(MJpegDecodeContext *s)
static void copy_mb(CinepakEncContext *s, AVPicture *a, AVPicture *b)
planar GBRA 4:4:4:4 32bpp
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
static int decode_block_refinement(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, int16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
const uint8_t avpriv_mjpeg_bits_ac_luminance[17]
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
int top_field_first
If the content is interlaced, is top field displayed first.
int got_picture
we found a SOF and picture is valid, too.
const uint8_t avpriv_mjpeg_val_ac_luminance[]
int16_t(*[MAX_COMPONENTS] blocks)[64]
intermediate sums (progressive mode)
static void copy_block4(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
VLC_TYPE(* table)[2]
code, bits
int key_frame
1 -> keyframe, 0-> not
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
int last_dc[MAX_COMPONENTS]
static const uint8_t * align_get_bits(GetBitContext *s)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> out
static int decode_block_progressive(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, int16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc
static void * av_mallocz_array(size_t nmemb, size_t size)
static void decode_flush(AVCodecContext *avctx)
int frame_number
Frame counter, set by libavcodec.
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
enum AVFieldOrder field_order
Field order.
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
mpeg1 4:2:0, jpeg 4:2:0, h263 4:2:0
#define FFSWAP(type, a, b)
int ff_mjpeg_find_marker(MJpegDecodeContext *s, const uint8_t **buf_ptr, const uint8_t *buf_end, const uint8_t **unescaped_buf_ptr, int *unescaped_buf_size)
AVStereo3D * av_stereo3d_alloc(void)
Allocate an AVStereo3D structure and set its fields to default values.
#define MKTAG(a, b, c, d)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
This structure stores compressed data.
void ff_free_vlc(VLC *vlc)
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
#define AV_PIX_FMT_YUV422P16